1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2006 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/interrupt.h>
25 #include <linux/delay.h>
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_cmnd.h>
29 #include <scsi/scsi_device.h>
30 #include <scsi/scsi_host.h>
31 #include <scsi/scsi_transport_fc.h>
35 #include "lpfc_disc.h"
36 #include "lpfc_scsi.h"
38 #include "lpfc_crtn.h"
39 #include "lpfc_logmsg.h"
40 #include "lpfc_compat.h"
43 * Define macro to log: Mailbox command x%x cannot issue Data
44 * This allows multiple uses of lpfc_msgBlk0311
45 * w/o perturbing log msg utility.
47 #define LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag) \
48 lpfc_printf_log(phba, \
51 "%d:0311 Mailbox command x%x cannot issue " \
52 "Data: x%x x%x x%x\n", \
60 /* There are only four IOCB completion types. */
61 typedef enum _lpfc_iocb_type {
69 lpfc_sli_get_iocbq(struct lpfc_hba * phba)
71 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
72 struct lpfc_iocbq * iocbq = NULL;
74 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
79 lpfc_sli_release_iocbq(struct lpfc_hba * phba, struct lpfc_iocbq * iocbq)
81 size_t start_clean = (size_t)(&((struct lpfc_iocbq *)NULL)->iocb);
84 * Clean all volatile data fields, preserve iotag and node struct.
86 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
87 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
91 * Translate the iocb command to an iocb command type used to decide the final
92 * disposition of each completed IOCB.
95 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
97 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
99 if (iocb_cmnd > CMD_MAX_IOCB_CMD)
103 case CMD_XMIT_SEQUENCE_CR:
104 case CMD_XMIT_SEQUENCE_CX:
105 case CMD_XMIT_BCAST_CN:
106 case CMD_XMIT_BCAST_CX:
107 case CMD_ELS_REQUEST_CR:
108 case CMD_ELS_REQUEST_CX:
109 case CMD_CREATE_XRI_CR:
110 case CMD_CREATE_XRI_CX:
112 case CMD_XMIT_ELS_RSP_CX:
114 case CMD_FCP_IWRITE_CR:
115 case CMD_FCP_IWRITE_CX:
116 case CMD_FCP_IREAD_CR:
117 case CMD_FCP_IREAD_CX:
118 case CMD_FCP_ICMND_CR:
119 case CMD_FCP_ICMND_CX:
120 case CMD_FCP_TSEND_CX:
121 case CMD_FCP_TRSP_CX:
122 case CMD_FCP_TRECEIVE_CX:
123 case CMD_FCP_AUTO_TRSP_CX:
124 case CMD_ADAPTER_MSG:
125 case CMD_ADAPTER_DUMP:
126 case CMD_XMIT_SEQUENCE64_CR:
127 case CMD_XMIT_SEQUENCE64_CX:
128 case CMD_XMIT_BCAST64_CN:
129 case CMD_XMIT_BCAST64_CX:
130 case CMD_ELS_REQUEST64_CR:
131 case CMD_ELS_REQUEST64_CX:
132 case CMD_FCP_IWRITE64_CR:
133 case CMD_FCP_IWRITE64_CX:
134 case CMD_FCP_IREAD64_CR:
135 case CMD_FCP_IREAD64_CX:
136 case CMD_FCP_ICMND64_CR:
137 case CMD_FCP_ICMND64_CX:
138 case CMD_FCP_TSEND64_CX:
139 case CMD_FCP_TRSP64_CX:
140 case CMD_FCP_TRECEIVE64_CX:
141 case CMD_GEN_REQUEST64_CR:
142 case CMD_GEN_REQUEST64_CX:
143 case CMD_XMIT_ELS_RSP64_CX:
144 type = LPFC_SOL_IOCB;
146 case CMD_ABORT_XRI_CN:
147 case CMD_ABORT_XRI_CX:
148 case CMD_CLOSE_XRI_CN:
149 case CMD_CLOSE_XRI_CX:
150 case CMD_XRI_ABORTED_CX:
151 case CMD_ABORT_MXRI64_CN:
152 type = LPFC_ABORT_IOCB;
154 case CMD_RCV_SEQUENCE_CX:
155 case CMD_RCV_ELS_REQ_CX:
156 case CMD_RCV_SEQUENCE64_CX:
157 case CMD_RCV_ELS_REQ64_CX:
158 type = LPFC_UNSOL_IOCB;
161 type = LPFC_UNKNOWN_IOCB;
169 lpfc_sli_ring_map(struct lpfc_hba * phba, LPFC_MBOXQ_t *pmb)
171 struct lpfc_sli *psli = &phba->sli;
172 MAILBOX_t *pmbox = &pmb->mb;
175 for (i = 0; i < psli->num_rings; i++) {
176 phba->hba_state = LPFC_INIT_MBX_CMDS;
177 lpfc_config_ring(phba, i, pmb);
178 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
179 if (rc != MBX_SUCCESS) {
180 lpfc_printf_log(phba,
183 "%d:0446 Adapter failed to init, "
184 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
190 phba->hba_state = LPFC_HBA_ERROR;
198 lpfc_sli_ringtxcmpl_put(struct lpfc_hba * phba,
199 struct lpfc_sli_ring * pring, struct lpfc_iocbq * piocb)
201 list_add_tail(&piocb->list, &pring->txcmplq);
202 pring->txcmplq_cnt++;
203 if (unlikely(pring->ringno == LPFC_ELS_RING))
204 mod_timer(&phba->els_tmofunc,
205 jiffies + HZ * (phba->fc_ratov << 1));
210 static struct lpfc_iocbq *
211 lpfc_sli_ringtx_get(struct lpfc_hba * phba, struct lpfc_sli_ring * pring)
213 struct list_head *dlp;
214 struct lpfc_iocbq *cmd_iocb;
218 list_remove_head((&pring->txq), cmd_iocb,
222 /* If the first ptr is not equal to the list header,
223 * deque the IOCBQ_t and return it.
231 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
233 struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno];
234 uint32_t max_cmd_idx = pring->numCiocb;
237 if ((pring->next_cmdidx == pring->cmdidx) &&
238 (++pring->next_cmdidx >= max_cmd_idx))
239 pring->next_cmdidx = 0;
241 if (unlikely(pring->local_getidx == pring->next_cmdidx)) {
243 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
245 if (unlikely(pring->local_getidx >= max_cmd_idx)) {
246 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
247 "%d:0315 Ring %d issue: portCmdGet %d "
248 "is bigger then cmd ring %d\n",
249 phba->brd_no, pring->ringno,
250 pring->local_getidx, max_cmd_idx);
252 phba->hba_state = LPFC_HBA_ERROR;
254 * All error attention handlers are posted to
257 phba->work_ha |= HA_ERATT;
258 phba->work_hs = HS_FFER3;
260 wake_up(phba->work_wait);
265 if (pring->local_getidx == pring->next_cmdidx)
269 iocb = IOCB_ENTRY(pring->cmdringaddr, pring->cmdidx);
275 lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_iocbq * iocbq)
277 struct lpfc_iocbq ** new_arr;
278 struct lpfc_iocbq ** old_arr;
280 struct lpfc_sli *psli = &phba->sli;
283 spin_lock_irq(phba->host->host_lock);
284 iotag = psli->last_iotag;
285 if(++iotag < psli->iocbq_lookup_len) {
286 psli->last_iotag = iotag;
287 psli->iocbq_lookup[iotag] = iocbq;
288 spin_unlock_irq(phba->host->host_lock);
289 iocbq->iotag = iotag;
292 else if (psli->iocbq_lookup_len < (0xffff
293 - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
294 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
295 spin_unlock_irq(phba->host->host_lock);
296 new_arr = kmalloc(new_len * sizeof (struct lpfc_iocbq *),
299 memset((char *)new_arr, 0,
300 new_len * sizeof (struct lpfc_iocbq *));
301 spin_lock_irq(phba->host->host_lock);
302 old_arr = psli->iocbq_lookup;
303 if (new_len <= psli->iocbq_lookup_len) {
304 /* highly unprobable case */
306 iotag = psli->last_iotag;
307 if(++iotag < psli->iocbq_lookup_len) {
308 psli->last_iotag = iotag;
309 psli->iocbq_lookup[iotag] = iocbq;
310 spin_unlock_irq(phba->host->host_lock);
311 iocbq->iotag = iotag;
314 spin_unlock_irq(phba->host->host_lock);
317 if (psli->iocbq_lookup)
318 memcpy(new_arr, old_arr,
319 ((psli->last_iotag + 1) *
320 sizeof (struct lpfc_iocbq *)));
321 psli->iocbq_lookup = new_arr;
322 psli->iocbq_lookup_len = new_len;
323 psli->last_iotag = iotag;
324 psli->iocbq_lookup[iotag] = iocbq;
325 spin_unlock_irq(phba->host->host_lock);
326 iocbq->iotag = iotag;
331 spin_unlock_irq(phba->host->host_lock);
333 lpfc_printf_log(phba, KERN_ERR,LOG_SLI,
334 "%d:0318 Failed to allocate IOTAG.last IOTAG is %d\n",
335 phba->brd_no, psli->last_iotag);
341 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
342 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
347 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
350 * Issue iocb command to adapter
352 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, sizeof (IOCB_t));
354 pring->stats.iocb_cmd++;
357 * If there is no completion routine to call, we can release the
358 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
359 * that have no rsp ring completion, iocb_cmpl MUST be NULL.
361 if (nextiocb->iocb_cmpl)
362 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
364 lpfc_sli_release_iocbq(phba, nextiocb);
367 * Let the HBA know what IOCB slot will be the next one the
368 * driver will put a command into.
370 pring->cmdidx = pring->next_cmdidx;
371 writel(pring->cmdidx, phba->MBslimaddr
372 + (SLIMOFF + (pring->ringno * 2)) * 4);
376 lpfc_sli_update_full_ring(struct lpfc_hba * phba,
377 struct lpfc_sli_ring *pring)
379 int ringno = pring->ringno;
381 pring->flag |= LPFC_CALL_RING_AVAILABLE;
386 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
387 * The HBA will tell us when an IOCB entry is available.
389 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
390 readl(phba->CAregaddr); /* flush */
392 pring->stats.iocb_cmd_full++;
396 lpfc_sli_update_ring(struct lpfc_hba * phba,
397 struct lpfc_sli_ring *pring)
399 int ringno = pring->ringno;
402 * Tell the HBA that there is work to do in this ring.
405 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
406 readl(phba->CAregaddr); /* flush */
410 lpfc_sli_resume_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring)
413 struct lpfc_iocbq *nextiocb;
417 * (a) there is anything on the txq to send
419 * (c) link attention events can be processed (fcp ring only)
420 * (d) IOCB processing is not blocked by the outstanding mbox command.
422 if (pring->txq_cnt &&
423 (phba->hba_state > LPFC_LINK_DOWN) &&
424 (pring->ringno != phba->sli.fcp_ring ||
425 phba->sli.sli_flag & LPFC_PROCESS_LA) &&
426 !(pring->flag & LPFC_STOP_IOCB_MBX)) {
428 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
429 (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
430 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
433 lpfc_sli_update_ring(phba, pring);
435 lpfc_sli_update_full_ring(phba, pring);
441 /* lpfc_sli_turn_on_ring is only called by lpfc_sli_handle_mb_event below */
443 lpfc_sli_turn_on_ring(struct lpfc_hba * phba, int ringno)
445 struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[ringno];
447 /* If the ring is active, flag it */
448 if (phba->sli.ring[ringno].cmdringaddr) {
449 if (phba->sli.ring[ringno].flag & LPFC_STOP_IOCB_MBX) {
450 phba->sli.ring[ringno].flag &= ~LPFC_STOP_IOCB_MBX;
452 * Force update of the local copy of cmdGetInx
454 phba->sli.ring[ringno].local_getidx
455 = le32_to_cpu(pgp->cmdGetInx);
456 spin_lock_irq(phba->host->host_lock);
457 lpfc_sli_resume_iocb(phba, &phba->sli.ring[ringno]);
458 spin_unlock_irq(phba->host->host_lock);
464 lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
468 switch (mbxCommand) {
472 case MBX_RUN_BIU_DIAG:
475 case MBX_CONFIG_LINK:
476 case MBX_CONFIG_RING:
478 case MBX_READ_CONFIG:
479 case MBX_READ_RCONFIG:
481 case MBX_READ_STATUS:
485 case MBX_READ_LNK_STAT:
487 case MBX_UNREG_LOGIN:
490 case MBX_DUMP_MEMORY:
491 case MBX_DUMP_CONTEXT:
496 case MBX_DEL_LD_ENTRY:
497 case MBX_RUN_PROGRAM:
502 case MBX_CONFIG_FARP:
505 case MBX_RUN_BIU_DIAG64:
506 case MBX_CONFIG_PORT:
507 case MBX_READ_SPARM64:
509 case MBX_REG_LOGIN64:
511 case MBX_FLASH_WR_ULA:
513 case MBX_LOAD_EXP_ROM:
523 lpfc_sli_wake_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
525 wait_queue_head_t *pdone_q;
528 * If pdone_q is empty, the driver thread gave up waiting and
531 pmboxq->mbox_flag |= LPFC_MBX_WAKE;
532 pdone_q = (wait_queue_head_t *) pmboxq->context1;
534 wake_up_interruptible(pdone_q);
539 lpfc_sli_def_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
541 struct lpfc_dmabuf *mp;
545 mp = (struct lpfc_dmabuf *) (pmb->context1);
548 lpfc_mbuf_free(phba, mp->virt, mp->phys);
553 * If a REG_LOGIN succeeded after node is destroyed or node
554 * is in re-discovery driver need to cleanup the RPI.
556 if (!(phba->fc_flag & FC_UNLOADING) &&
557 (pmb->mb.mbxCommand == MBX_REG_LOGIN64) &&
558 (!pmb->mb.mbxStatus)) {
560 rpi = pmb->mb.un.varWords[0];
561 lpfc_unreg_login(phba, rpi, pmb);
562 pmb->mbox_cmpl=lpfc_sli_def_mbox_cmpl;
563 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
564 if (rc != MBX_NOT_FINISHED)
568 mempool_free( pmb, phba->mbox_mem_pool);
573 lpfc_sli_handle_mb_event(struct lpfc_hba * phba)
578 struct lpfc_sli *psli;
580 uint32_t process_next;
583 /* We should only get here if we are in SLI2 mode */
584 if (!(phba->sli.sli_flag & LPFC_SLI2_ACTIVE)) {
588 phba->sli.slistat.mbox_event++;
590 /* Get a Mailbox buffer to setup mailbox commands for callback */
591 if ((pmb = phba->sli.mbox_active)) {
593 mbox = &phba->slim2p->mbx;
595 /* First check out the status word */
596 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof (uint32_t));
598 /* Sanity check to ensure the host owns the mailbox */
599 if (pmbox->mbxOwner != OWN_HOST) {
600 /* Lets try for a while */
601 for (i = 0; i < 10240; i++) {
602 /* First copy command data */
603 lpfc_sli_pcimem_bcopy(mbox, pmbox,
605 if (pmbox->mbxOwner == OWN_HOST)
608 /* Stray Mailbox Interrupt, mbxCommand <cmd> mbxStatus
610 lpfc_printf_log(phba,
613 "%d:0304 Stray Mailbox Interrupt "
614 "mbxCommand x%x mbxStatus x%x\n",
619 spin_lock_irq(phba->host->host_lock);
620 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
621 spin_unlock_irq(phba->host->host_lock);
626 del_timer_sync(&phba->sli.mbox_tmo);
627 phba->work_hba_events &= ~WORKER_MBOX_TMO;
630 * It is a fatal error if unknown mbox command completion.
632 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
635 /* Unknow mailbox command compl */
636 lpfc_printf_log(phba,
639 "%d:0323 Unknown Mailbox command %x Cmpl\n",
642 phba->hba_state = LPFC_HBA_ERROR;
643 phba->work_hs = HS_FFER3;
644 lpfc_handle_eratt(phba);
648 phba->sli.mbox_active = NULL;
649 if (pmbox->mbxStatus) {
650 phba->sli.slistat.mbox_stat_err++;
651 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
652 /* Mbox cmd cmpl error - RETRYing */
653 lpfc_printf_log(phba,
656 "%d:0305 Mbox cmd cmpl error - "
657 "RETRYing Data: x%x x%x x%x x%x\n",
661 pmbox->un.varWords[0],
663 pmbox->mbxStatus = 0;
664 pmbox->mbxOwner = OWN_HOST;
665 spin_lock_irq(phba->host->host_lock);
666 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
667 spin_unlock_irq(phba->host->host_lock);
668 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
669 if (rc == MBX_SUCCESS)
674 /* Mailbox cmd <cmd> Cmpl <cmpl> */
675 lpfc_printf_log(phba,
678 "%d:0307 Mailbox cmd x%x Cmpl x%p "
679 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
683 *((uint32_t *) pmbox),
684 pmbox->un.varWords[0],
685 pmbox->un.varWords[1],
686 pmbox->un.varWords[2],
687 pmbox->un.varWords[3],
688 pmbox->un.varWords[4],
689 pmbox->un.varWords[5],
690 pmbox->un.varWords[6],
691 pmbox->un.varWords[7]);
693 if (pmb->mbox_cmpl) {
694 lpfc_sli_pcimem_bcopy(mbox, pmbox, MAILBOX_CMD_SIZE);
695 pmb->mbox_cmpl(phba,pmb);
701 process_next = 0; /* by default don't loop */
702 spin_lock_irq(phba->host->host_lock);
703 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
705 /* Process next mailbox command if there is one */
706 if ((pmb = lpfc_mbox_get(phba))) {
707 spin_unlock_irq(phba->host->host_lock);
708 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
709 if (rc == MBX_NOT_FINISHED) {
710 pmb->mb.mbxStatus = MBX_NOT_FINISHED;
711 pmb->mbox_cmpl(phba,pmb);
713 continue; /* loop back */
716 spin_unlock_irq(phba->host->host_lock);
717 /* Turn on IOCB processing */
718 for (i = 0; i < phba->sli.num_rings; i++)
719 lpfc_sli_turn_on_ring(phba, i);
722 } while (process_next);
727 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
728 struct lpfc_iocbq *saveq)
736 irsp = &(saveq->iocb);
737 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX)
738 || (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX)) {
743 (WORD5 *) & (saveq->iocb.un.
745 Rctl = w5p->hcsw.Rctl;
746 Type = w5p->hcsw.Type;
748 /* Firmware Workaround */
749 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
750 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX)) {
753 w5p->hcsw.Rctl = Rctl;
754 w5p->hcsw.Type = Type;
757 /* unSolicited Responses */
758 if (pring->prt[0].profile) {
759 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
760 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
764 /* We must search, based on rctl / type
765 for the right routine */
766 for (i = 0; i < pring->num_mask;
768 if ((pring->prt[i].rctl ==
772 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
773 (pring->prt[i].lpfc_sli_rcv_unsol_event)
774 (phba, pring, saveq);
781 /* Unexpected Rctl / Type received */
782 /* Ring <ringno> handler: unexpected
783 Rctl <Rctl> Type <Type> received */
784 lpfc_printf_log(phba,
787 "%d:0313 Ring %d handler: unexpected Rctl x%x "
788 "Type x%x received \n",
797 static struct lpfc_iocbq *
798 lpfc_sli_iocbq_lookup(struct lpfc_hba * phba,
799 struct lpfc_sli_ring * pring,
800 struct lpfc_iocbq * prspiocb)
802 struct lpfc_iocbq *cmd_iocb = NULL;
805 iotag = prspiocb->iocb.ulpIoTag;
807 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
808 cmd_iocb = phba->sli.iocbq_lookup[iotag];
809 list_del(&cmd_iocb->list);
810 pring->txcmplq_cnt--;
814 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
815 "%d:0317 iotag x%x is out off "
816 "range: max iotag x%x wd0 x%x\n",
818 phba->sli.last_iotag,
819 *(((uint32_t *) &prspiocb->iocb) + 7));
824 lpfc_sli_process_sol_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring,
825 struct lpfc_iocbq *saveq)
827 struct lpfc_iocbq * cmdiocbp;
831 /* Based on the iotag field, get the cmd IOCB from the txcmplq */
832 spin_lock_irqsave(phba->host->host_lock, iflag);
833 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
835 if (cmdiocbp->iocb_cmpl) {
837 * Post all ELS completions to the worker thread.
838 * All other are passed to the completion callback.
840 if (pring->ringno == LPFC_ELS_RING) {
841 if (cmdiocbp->iocb_flag & LPFC_DRIVER_ABORTED) {
842 cmdiocbp->iocb_flag &=
843 ~LPFC_DRIVER_ABORTED;
844 saveq->iocb.ulpStatus =
846 saveq->iocb.un.ulpWord[4] =
849 spin_unlock_irqrestore(phba->host->host_lock,
851 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
852 spin_lock_irqsave(phba->host->host_lock, iflag);
855 spin_unlock_irqrestore(phba->host->host_lock,
857 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
858 spin_lock_irqsave(phba->host->host_lock, iflag);
861 lpfc_sli_release_iocbq(phba, cmdiocbp);
864 * Unknown initiating command based on the response iotag.
865 * This could be the case on the ELS ring because of
868 if (pring->ringno != LPFC_ELS_RING) {
870 * Ring <ringno> handler: unexpected completion IoTag
873 lpfc_printf_log(phba,
876 "%d:0322 Ring %d handler: unexpected "
877 "completion IoTag x%x Data: x%x x%x x%x x%x\n",
880 saveq->iocb.ulpIoTag,
881 saveq->iocb.ulpStatus,
882 saveq->iocb.un.ulpWord[4],
883 saveq->iocb.ulpCommand,
884 saveq->iocb.ulpContext);
888 spin_unlock_irqrestore(phba->host->host_lock, iflag);
892 static void lpfc_sli_rsp_pointers_error(struct lpfc_hba * phba,
893 struct lpfc_sli_ring * pring)
895 struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno];
897 * Ring <ringno> handler: portRspPut <portRspPut> is bigger then
898 * rsp ring <portRspMax>
900 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
901 "%d:0312 Ring %d handler: portRspPut %d "
902 "is bigger then rsp ring %d\n",
903 phba->brd_no, pring->ringno,
904 le32_to_cpu(pgp->rspPutInx),
907 phba->hba_state = LPFC_HBA_ERROR;
910 * All error attention handlers are posted to
913 phba->work_ha |= HA_ERATT;
914 phba->work_hs = HS_FFER3;
916 wake_up(phba->work_wait);
921 void lpfc_sli_poll_fcp_ring(struct lpfc_hba * phba)
923 struct lpfc_sli * psli = &phba->sli;
924 struct lpfc_sli_ring * pring = &psli->ring[LPFC_FCP_RING];
926 IOCB_t *entry = NULL;
927 struct lpfc_iocbq *cmdiocbq = NULL;
928 struct lpfc_iocbq rspiocbq;
929 struct lpfc_pgp *pgp;
931 uint32_t portRspPut, portRspMax;
933 uint32_t rsp_cmpl = 0;
934 void __iomem *to_slim;
937 pring->stats.iocb_event++;
939 /* The driver assumes SLI-2 mode */
940 pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno];
943 * The next available response entry should never exceed the maximum
944 * entries. If it does, treat it as an adapter hardware error.
946 portRspMax = pring->numRiocb;
947 portRspPut = le32_to_cpu(pgp->rspPutInx);
948 if (unlikely(portRspPut >= portRspMax)) {
949 lpfc_sli_rsp_pointers_error(phba, pring);
954 while (pring->rspidx != portRspPut) {
956 entry = IOCB_ENTRY(pring->rspringaddr, pring->rspidx);
958 if (++pring->rspidx >= portRspMax)
961 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
962 (uint32_t *) &rspiocbq.iocb,
964 irsp = &rspiocbq.iocb;
965 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
966 pring->stats.iocb_rsp++;
969 if (unlikely(irsp->ulpStatus)) {
970 /* Rsp ring <ringno> error: IOCB */
971 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
972 "%d:0326 Rsp Ring %d error: IOCB Data: "
973 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
974 phba->brd_no, pring->ringno,
981 *(((uint32_t *) irsp) + 6),
982 *(((uint32_t *) irsp) + 7));
986 case LPFC_ABORT_IOCB:
989 * Idle exchange closed via ABTS from port. No iocb
990 * resources need to be recovered.
992 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
993 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
994 "%d:0314 IOCB cmd 0x%x"
995 " processed. Skipping"
996 " completion", phba->brd_no,
1001 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
1003 if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) {
1004 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
1009 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
1010 char adaptermsg[LPFC_MAX_ADPTMSG];
1011 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
1012 memcpy(&adaptermsg[0], (uint8_t *) irsp,
1014 dev_warn(&((phba->pcidev)->dev), "lpfc%d: %s",
1015 phba->brd_no, adaptermsg);
1017 /* Unknown IOCB command */
1018 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1019 "%d:0321 Unknown IOCB command "
1020 "Data: x%x, x%x x%x x%x x%x\n",
1031 * The response IOCB has been processed. Update the ring
1032 * pointer in SLIM. If the port response put pointer has not
1033 * been updated, sync the pgp->rspPutInx and fetch the new port
1034 * response put pointer.
1036 to_slim = phba->MBslimaddr +
1037 (SLIMOFF + (pring->ringno * 2) + 1) * 4;
1038 writeb(pring->rspidx, to_slim);
1040 if (pring->rspidx == portRspPut)
1041 portRspPut = le32_to_cpu(pgp->rspPutInx);
1044 ha_copy = readl(phba->HAregaddr);
1045 ha_copy >>= (LPFC_FCP_RING * 4);
1047 if ((rsp_cmpl > 0) && (ha_copy & HA_R0RE_REQ)) {
1048 pring->stats.iocb_rsp_full++;
1049 status = ((CA_R0ATT | CA_R0RE_RSP) << (LPFC_FCP_RING * 4));
1050 writel(status, phba->CAregaddr);
1051 readl(phba->CAregaddr);
1053 if ((ha_copy & HA_R0CE_RSP) &&
1054 (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
1055 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
1056 pring->stats.iocb_cmd_empty++;
1058 /* Force update of the local copy of cmdGetInx */
1059 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
1060 lpfc_sli_resume_iocb(phba, pring);
1062 if ((pring->lpfc_sli_cmd_available))
1063 (pring->lpfc_sli_cmd_available) (phba, pring);
1071 * This routine presumes LPFC_FCP_RING handling and doesn't bother
1072 * to check it explicitly.
1075 lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
1076 struct lpfc_sli_ring * pring, uint32_t mask)
1078 struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno];
1079 IOCB_t *irsp = NULL;
1080 IOCB_t *entry = NULL;
1081 struct lpfc_iocbq *cmdiocbq = NULL;
1082 struct lpfc_iocbq rspiocbq;
1084 uint32_t portRspPut, portRspMax;
1086 lpfc_iocb_type type;
1087 unsigned long iflag;
1088 uint32_t rsp_cmpl = 0;
1089 void __iomem *to_slim;
1091 spin_lock_irqsave(phba->host->host_lock, iflag);
1092 pring->stats.iocb_event++;
1095 * The next available response entry should never exceed the maximum
1096 * entries. If it does, treat it as an adapter hardware error.
1098 portRspMax = pring->numRiocb;
1099 portRspPut = le32_to_cpu(pgp->rspPutInx);
1100 if (unlikely(portRspPut >= portRspMax)) {
1101 lpfc_sli_rsp_pointers_error(phba, pring);
1102 spin_unlock_irqrestore(phba->host->host_lock, iflag);
1107 while (pring->rspidx != portRspPut) {
1109 * Fetch an entry off the ring and copy it into a local data
1110 * structure. The copy involves a byte-swap since the
1111 * network byte order and pci byte orders are different.
1113 entry = IOCB_ENTRY(pring->rspringaddr, pring->rspidx);
1115 if (++pring->rspidx >= portRspMax)
1118 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
1119 (uint32_t *) &rspiocbq.iocb,
1121 INIT_LIST_HEAD(&(rspiocbq.list));
1122 irsp = &rspiocbq.iocb;
1124 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
1125 pring->stats.iocb_rsp++;
1128 if (unlikely(irsp->ulpStatus)) {
1129 /* Rsp ring <ringno> error: IOCB */
1130 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1131 "%d:0336 Rsp Ring %d error: IOCB Data: "
1132 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
1133 phba->brd_no, pring->ringno,
1134 irsp->un.ulpWord[0], irsp->un.ulpWord[1],
1135 irsp->un.ulpWord[2], irsp->un.ulpWord[3],
1136 irsp->un.ulpWord[4], irsp->un.ulpWord[5],
1137 *(((uint32_t *) irsp) + 6),
1138 *(((uint32_t *) irsp) + 7));
1142 case LPFC_ABORT_IOCB:
1145 * Idle exchange closed via ABTS from port. No iocb
1146 * resources need to be recovered.
1148 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
1149 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1150 "%d:0333 IOCB cmd 0x%x"
1151 " processed. Skipping"
1152 " completion\n", phba->brd_no,
1157 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
1159 if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) {
1160 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
1161 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
1164 spin_unlock_irqrestore(
1165 phba->host->host_lock, iflag);
1166 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
1168 spin_lock_irqsave(phba->host->host_lock,
1173 case LPFC_UNSOL_IOCB:
1174 spin_unlock_irqrestore(phba->host->host_lock, iflag);
1175 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
1176 spin_lock_irqsave(phba->host->host_lock, iflag);
1179 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
1180 char adaptermsg[LPFC_MAX_ADPTMSG];
1181 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
1182 memcpy(&adaptermsg[0], (uint8_t *) irsp,
1184 dev_warn(&((phba->pcidev)->dev), "lpfc%d: %s",
1185 phba->brd_no, adaptermsg);
1187 /* Unknown IOCB command */
1188 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1189 "%d:0334 Unknown IOCB command "
1190 "Data: x%x, x%x x%x x%x x%x\n",
1191 phba->brd_no, type, irsp->ulpCommand,
1192 irsp->ulpStatus, irsp->ulpIoTag,
1199 * The response IOCB has been processed. Update the ring
1200 * pointer in SLIM. If the port response put pointer has not
1201 * been updated, sync the pgp->rspPutInx and fetch the new port
1202 * response put pointer.
1204 to_slim = phba->MBslimaddr +
1205 (SLIMOFF + (pring->ringno * 2) + 1) * 4;
1206 writel(pring->rspidx, to_slim);
1208 if (pring->rspidx == portRspPut)
1209 portRspPut = le32_to_cpu(pgp->rspPutInx);
1212 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
1213 pring->stats.iocb_rsp_full++;
1214 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
1215 writel(status, phba->CAregaddr);
1216 readl(phba->CAregaddr);
1218 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
1219 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
1220 pring->stats.iocb_cmd_empty++;
1222 /* Force update of the local copy of cmdGetInx */
1223 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
1224 lpfc_sli_resume_iocb(phba, pring);
1226 if ((pring->lpfc_sli_cmd_available))
1227 (pring->lpfc_sli_cmd_available) (phba, pring);
1231 spin_unlock_irqrestore(phba->host->host_lock, iflag);
1237 lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
1238 struct lpfc_sli_ring * pring, uint32_t mask)
1241 IOCB_t *irsp = NULL;
1242 struct lpfc_iocbq *rspiocbp = NULL;
1243 struct lpfc_iocbq *next_iocb;
1244 struct lpfc_iocbq *cmdiocbp;
1245 struct lpfc_iocbq *saveq;
1246 struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno];
1247 uint8_t iocb_cmd_type;
1248 lpfc_iocb_type type;
1249 uint32_t status, free_saveq;
1250 uint32_t portRspPut, portRspMax;
1252 unsigned long iflag;
1253 void __iomem *to_slim;
1255 spin_lock_irqsave(phba->host->host_lock, iflag);
1256 pring->stats.iocb_event++;
1259 * The next available response entry should never exceed the maximum
1260 * entries. If it does, treat it as an adapter hardware error.
1262 portRspMax = pring->numRiocb;
1263 portRspPut = le32_to_cpu(pgp->rspPutInx);
1264 if (portRspPut >= portRspMax) {
1266 * Ring <ringno> handler: portRspPut <portRspPut> is bigger then
1267 * rsp ring <portRspMax>
1269 lpfc_printf_log(phba,
1272 "%d:0303 Ring %d handler: portRspPut %d "
1273 "is bigger then rsp ring %d\n",
1275 pring->ringno, portRspPut, portRspMax);
1277 phba->hba_state = LPFC_HBA_ERROR;
1278 spin_unlock_irqrestore(phba->host->host_lock, iflag);
1280 phba->work_hs = HS_FFER3;
1281 lpfc_handle_eratt(phba);
1287 while (pring->rspidx != portRspPut) {
1289 * Build a completion list and call the appropriate handler.
1290 * The process is to get the next available response iocb, get
1291 * a free iocb from the list, copy the response data into the
1292 * free iocb, insert to the continuation list, and update the
1293 * next response index to slim. This process makes response
1294 * iocb's in the ring available to DMA as fast as possible but
1295 * pays a penalty for a copy operation. Since the iocb is
1296 * only 32 bytes, this penalty is considered small relative to
1297 * the PCI reads for register values and a slim write. When
1298 * the ulpLe field is set, the entire Command has been
1301 entry = IOCB_ENTRY(pring->rspringaddr, pring->rspidx);
1302 rspiocbp = lpfc_sli_get_iocbq(phba);
1303 if (rspiocbp == NULL) {
1304 printk(KERN_ERR "%s: out of buffers! Failing "
1305 "completion.\n", __FUNCTION__);
1309 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb, sizeof (IOCB_t));
1310 irsp = &rspiocbp->iocb;
1312 if (++pring->rspidx >= portRspMax)
1315 to_slim = phba->MBslimaddr + (SLIMOFF + (pring->ringno * 2)
1317 writel(pring->rspidx, to_slim);
1319 if (list_empty(&(pring->iocb_continueq))) {
1320 list_add(&rspiocbp->list, &(pring->iocb_continueq));
1322 list_add_tail(&rspiocbp->list,
1323 &(pring->iocb_continueq));
1326 pring->iocb_continueq_cnt++;
1329 * By default, the driver expects to free all resources
1330 * associated with this iocb completion.
1333 saveq = list_get_first(&pring->iocb_continueq,
1334 struct lpfc_iocbq, list);
1335 irsp = &(saveq->iocb);
1336 list_del_init(&pring->iocb_continueq);
1337 pring->iocb_continueq_cnt = 0;
1339 pring->stats.iocb_rsp++;
1341 if (irsp->ulpStatus) {
1342 /* Rsp ring <ringno> error: IOCB */
1343 lpfc_printf_log(phba,
1346 "%d:0328 Rsp Ring %d error: IOCB Data: "
1347 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
1350 irsp->un.ulpWord[0],
1351 irsp->un.ulpWord[1],
1352 irsp->un.ulpWord[2],
1353 irsp->un.ulpWord[3],
1354 irsp->un.ulpWord[4],
1355 irsp->un.ulpWord[5],
1356 *(((uint32_t *) irsp) + 6),
1357 *(((uint32_t *) irsp) + 7));
1361 * Fetch the IOCB command type and call the correct
1362 * completion routine. Solicited and Unsolicited
1363 * IOCBs on the ELS ring get freed back to the
1364 * lpfc_iocb_list by the discovery kernel thread.
1366 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
1367 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
1368 if (type == LPFC_SOL_IOCB) {
1369 spin_unlock_irqrestore(phba->host->host_lock,
1371 rc = lpfc_sli_process_sol_iocb(phba, pring,
1373 spin_lock_irqsave(phba->host->host_lock, iflag);
1374 } else if (type == LPFC_UNSOL_IOCB) {
1375 spin_unlock_irqrestore(phba->host->host_lock,
1377 rc = lpfc_sli_process_unsol_iocb(phba, pring,
1379 spin_lock_irqsave(phba->host->host_lock, iflag);
1380 } else if (type == LPFC_ABORT_IOCB) {
1381 if ((irsp->ulpCommand != CMD_XRI_ABORTED_CX) &&
1383 lpfc_sli_iocbq_lookup(phba, pring,
1385 /* Call the specified completion
1387 if (cmdiocbp->iocb_cmpl) {
1388 spin_unlock_irqrestore(
1389 phba->host->host_lock,
1391 (cmdiocbp->iocb_cmpl) (phba,
1394 phba->host->host_lock,
1397 lpfc_sli_release_iocbq(phba,
1400 } else if (type == LPFC_UNKNOWN_IOCB) {
1401 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
1403 char adaptermsg[LPFC_MAX_ADPTMSG];
1405 memset(adaptermsg, 0,
1407 memcpy(&adaptermsg[0], (uint8_t *) irsp,
1409 dev_warn(&((phba->pcidev)->dev),
1411 phba->brd_no, adaptermsg);
1413 /* Unknown IOCB command */
1414 lpfc_printf_log(phba,
1417 "%d:0335 Unknown IOCB command "
1418 "Data: x%x x%x x%x x%x\n",
1428 if (!list_empty(&saveq->list)) {
1429 list_for_each_entry_safe(rspiocbp,
1433 list_del(&rspiocbp->list);
1434 lpfc_sli_release_iocbq(phba,
1438 lpfc_sli_release_iocbq(phba, saveq);
1443 * If the port response put pointer has not been updated, sync
1444 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
1445 * response put pointer.
1447 if (pring->rspidx == portRspPut) {
1448 portRspPut = le32_to_cpu(pgp->rspPutInx);
1450 } /* while (pring->rspidx != portRspPut) */
1452 if ((rspiocbp != 0) && (mask & HA_R0RE_REQ)) {
1453 /* At least one response entry has been freed */
1454 pring->stats.iocb_rsp_full++;
1455 /* SET RxRE_RSP in Chip Att register */
1456 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
1457 writel(status, phba->CAregaddr);
1458 readl(phba->CAregaddr); /* flush */
1460 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
1461 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
1462 pring->stats.iocb_cmd_empty++;
1464 /* Force update of the local copy of cmdGetInx */
1465 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
1466 lpfc_sli_resume_iocb(phba, pring);
1468 if ((pring->lpfc_sli_cmd_available))
1469 (pring->lpfc_sli_cmd_available) (phba, pring);
1473 spin_unlock_irqrestore(phba->host->host_lock, iflag);
1478 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1480 LIST_HEAD(completions);
1481 struct lpfc_iocbq *iocb, *next_iocb;
1487 /* Error everything on txq and txcmplq
1490 spin_lock_irq(phba->host->host_lock);
1491 list_splice_init(&pring->txq, &completions);
1494 /* Next issue ABTS for everything on the txcmplq */
1495 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
1496 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
1498 spin_unlock_irq(phba->host->host_lock);
1500 while (!list_empty(&completions)) {
1501 iocb = list_get_first(&completions, struct lpfc_iocbq, list);
1503 list_del(&iocb->list);
1505 if (iocb->iocb_cmpl) {
1506 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
1507 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
1508 (iocb->iocb_cmpl) (phba, iocb, iocb);
1510 lpfc_sli_release_iocbq(phba, iocb);
1517 lpfc_sli_brdready(struct lpfc_hba * phba, uint32_t mask)
1523 /* Read the HBA Host Status Register */
1524 status = readl(phba->HSregaddr);
1527 * Check status register every 100ms for 5 retries, then every
1528 * 500ms for 5, then every 2.5 sec for 5, then reset board and
1529 * every 2.5 sec for 4.
1530 * Break our of the loop if errors occurred during init.
1532 while (((status & mask) != mask) &&
1533 !(status & HS_FFERM) &&
1544 phba->hba_state = LPFC_STATE_UNKNOWN; /* Do post */
1545 lpfc_sli_brdrestart(phba);
1547 /* Read the HBA Host Status Register */
1548 status = readl(phba->HSregaddr);
1551 /* Check to see if any errors occurred during init */
1552 if ((status & HS_FFERM) || (i >= 20)) {
1553 phba->hba_state = LPFC_HBA_ERROR;
1560 #define BARRIER_TEST_PATTERN (0xdeadbeef)
1562 void lpfc_reset_barrier(struct lpfc_hba * phba)
1564 uint32_t __iomem *resp_buf;
1565 uint32_t __iomem *mbox_buf;
1566 volatile uint32_t mbox;
1571 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
1572 if (hdrtype != 0x80 ||
1573 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
1574 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
1578 * Tell the other part of the chip to suspend temporarily all
1581 resp_buf = phba->MBslimaddr;
1583 /* Disable the error attention */
1584 hc_copy = readl(phba->HCregaddr);
1585 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
1586 readl(phba->HCregaddr); /* flush */
1588 if (readl(phba->HAregaddr) & HA_ERATT) {
1589 /* Clear Chip error bit */
1590 writel(HA_ERATT, phba->HAregaddr);
1595 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
1596 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
1598 writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
1599 mbox_buf = phba->MBslimaddr;
1600 writel(mbox, mbox_buf);
1603 readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN) && i < 50; i++)
1606 if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) {
1607 if (phba->sli.sli_flag & LPFC_SLI2_ACTIVE ||
1614 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
1615 for (i = 0; readl(resp_buf) != mbox && i < 500; i++)
1620 while (!(readl(phba->HAregaddr) & HA_ERATT) && ++i < 500)
1623 if (readl(phba->HAregaddr) & HA_ERATT) {
1624 writel(HA_ERATT, phba->HAregaddr);
1629 writel(hc_copy, phba->HCregaddr);
1630 readl(phba->HCregaddr); /* flush */
1634 lpfc_sli_brdkill(struct lpfc_hba * phba)
1636 struct lpfc_sli *psli;
1646 lpfc_printf_log(phba,
1649 "%d:0329 Kill HBA Data: x%x x%x\n",
1654 if ((pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
1658 /* Disable the error attention */
1659 spin_lock_irq(phba->host->host_lock);
1660 status = readl(phba->HCregaddr);
1661 status &= ~HC_ERINT_ENA;
1662 writel(status, phba->HCregaddr);
1663 readl(phba->HCregaddr); /* flush */
1664 spin_unlock_irq(phba->host->host_lock);
1666 lpfc_kill_board(phba, pmb);
1667 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1668 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
1670 if (retval != MBX_SUCCESS) {
1671 if (retval != MBX_BUSY)
1672 mempool_free(pmb, phba->mbox_mem_pool);
1676 psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
1678 mempool_free(pmb, phba->mbox_mem_pool);
1680 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
1681 * attention every 100ms for 3 seconds. If we don't get ERATT after
1682 * 3 seconds we still set HBA_ERROR state because the status of the
1683 * board is now undefined.
1685 ha_copy = readl(phba->HAregaddr);
1687 while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
1689 ha_copy = readl(phba->HAregaddr);
1692 del_timer_sync(&psli->mbox_tmo);
1693 if (ha_copy & HA_ERATT) {
1694 writel(HA_ERATT, phba->HAregaddr);
1697 spin_lock_irq(phba->host->host_lock);
1698 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
1699 spin_unlock_irq(phba->host->host_lock);
1701 psli->mbox_active = NULL;
1702 lpfc_hba_down_post(phba);
1703 phba->hba_state = LPFC_HBA_ERROR;
1705 return (ha_copy & HA_ERATT ? 0 : 1);
1709 lpfc_sli_brdreset(struct lpfc_hba * phba)
1711 struct lpfc_sli *psli;
1712 struct lpfc_sli_ring *pring;
1719 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1720 "%d:0325 Reset HBA Data: x%x x%x\n", phba->brd_no,
1721 phba->hba_state, psli->sli_flag);
1723 /* perform board reset */
1724 phba->fc_eventTag = 0;
1726 phba->fc_prevDID = 0;
1728 /* Turn off parity checking and serr during the physical reset */
1729 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
1730 pci_write_config_word(phba->pcidev, PCI_COMMAND,
1732 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
1734 psli->sli_flag &= ~(LPFC_SLI2_ACTIVE | LPFC_PROCESS_LA);
1735 /* Now toggle INITFF bit in the Host Control Register */
1736 writel(HC_INITFF, phba->HCregaddr);
1738 readl(phba->HCregaddr); /* flush */
1739 writel(0, phba->HCregaddr);
1740 readl(phba->HCregaddr); /* flush */
1742 /* Restore PCI cmd register */
1743 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
1745 /* Initialize relevant SLI info */
1746 for (i = 0; i < psli->num_rings; i++) {
1747 pring = &psli->ring[i];
1750 pring->next_cmdidx = 0;
1751 pring->local_getidx = 0;
1753 pring->missbufcnt = 0;
1756 phba->hba_state = LPFC_WARM_START;
1761 lpfc_sli_brdrestart(struct lpfc_hba * phba)
1764 struct lpfc_sli *psli;
1766 volatile uint32_t word0;
1767 void __iomem *to_slim;
1769 spin_lock_irq(phba->host->host_lock);
1774 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1775 "%d:0337 Restart HBA Data: x%x x%x\n", phba->brd_no,
1776 phba->hba_state, psli->sli_flag);
1779 mb = (MAILBOX_t *) &word0;
1780 mb->mbxCommand = MBX_RESTART;
1783 lpfc_reset_barrier(phba);
1785 to_slim = phba->MBslimaddr;
1786 writel(*(uint32_t *) mb, to_slim);
1787 readl(to_slim); /* flush */
1789 /* Only skip post after fc_ffinit is completed */
1790 if (phba->hba_state) {
1792 word0 = 1; /* This is really setting up word1 */
1795 word0 = 0; /* This is really setting up word1 */
1797 to_slim = phba->MBslimaddr + sizeof (uint32_t);
1798 writel(*(uint32_t *) mb, to_slim);
1799 readl(to_slim); /* flush */
1801 lpfc_sli_brdreset(phba);
1803 phba->hba_state = LPFC_INIT_START;
1805 spin_unlock_irq(phba->host->host_lock);
1807 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
1808 psli->stats_start = get_seconds();
1815 lpfc_hba_down_post(phba);
1821 lpfc_sli_chipset_init(struct lpfc_hba *phba)
1823 uint32_t status, i = 0;
1825 /* Read the HBA Host Status Register */
1826 status = readl(phba->HSregaddr);
1828 /* Check status register to see what current state is */
1830 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
1832 /* Check every 100ms for 5 retries, then every 500ms for 5, then
1833 * every 2.5 sec for 5, then reset board and every 2.5 sec for
1837 /* Adapter failed to init, timeout, status reg
1839 lpfc_printf_log(phba,
1842 "%d:0436 Adapter failed to init, "
1843 "timeout, status reg x%x\n",
1846 phba->hba_state = LPFC_HBA_ERROR;
1850 /* Check to see if any errors occurred during init */
1851 if (status & HS_FFERM) {
1852 /* ERROR: During chipset initialization */
1853 /* Adapter failed to init, chipset, status reg
1855 lpfc_printf_log(phba,
1858 "%d:0437 Adapter failed to init, "
1859 "chipset, status reg x%x\n",
1862 phba->hba_state = LPFC_HBA_ERROR;
1868 } else if (i <= 10) {
1875 phba->hba_state = LPFC_STATE_UNKNOWN; /* Do post */
1876 lpfc_sli_brdrestart(phba);
1878 /* Read the HBA Host Status Register */
1879 status = readl(phba->HSregaddr);
1882 /* Check to see if any errors occurred during init */
1883 if (status & HS_FFERM) {
1884 /* ERROR: During chipset initialization */
1885 /* Adapter failed to init, chipset, status reg <status> */
1886 lpfc_printf_log(phba,
1889 "%d:0438 Adapter failed to init, chipset, "
1893 phba->hba_state = LPFC_HBA_ERROR;
1897 /* Clear all interrupt enable conditions */
1898 writel(0, phba->HCregaddr);
1899 readl(phba->HCregaddr); /* flush */
1901 /* setup host attn register */
1902 writel(0xffffffff, phba->HAregaddr);
1903 readl(phba->HAregaddr); /* flush */
1908 lpfc_sli_hba_setup(struct lpfc_hba * phba)
1911 uint32_t resetcount = 0, rc = 0, done = 0;
1913 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1915 phba->hba_state = LPFC_HBA_ERROR;
1919 while (resetcount < 2 && !done) {
1920 spin_lock_irq(phba->host->host_lock);
1921 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
1922 spin_unlock_irq(phba->host->host_lock);
1923 phba->hba_state = LPFC_STATE_UNKNOWN;
1924 lpfc_sli_brdrestart(phba);
1926 rc = lpfc_sli_chipset_init(phba);
1930 spin_lock_irq(phba->host->host_lock);
1931 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
1932 spin_unlock_irq(phba->host->host_lock);
1935 /* Call pre CONFIG_PORT mailbox command initialization. A value of 0
1936 * means the call was successful. Any other nonzero value is a failure,
1937 * but if ERESTART is returned, the driver may reset the HBA and try
1940 rc = lpfc_config_port_prep(phba);
1941 if (rc == -ERESTART) {
1942 phba->hba_state = 0;
1948 phba->hba_state = LPFC_INIT_MBX_CMDS;
1949 lpfc_config_port(phba, pmb);
1950 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1951 if (rc == MBX_SUCCESS)
1954 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1955 "%d:0442 Adapter failed to init, mbxCmd x%x "
1956 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
1957 phba->brd_no, pmb->mb.mbxCommand,
1958 pmb->mb.mbxStatus, 0);
1959 phba->sli.sli_flag &= ~LPFC_SLI2_ACTIVE;
1963 goto lpfc_sli_hba_setup_error;
1965 rc = lpfc_sli_ring_map(phba, pmb);
1968 goto lpfc_sli_hba_setup_error;
1970 phba->sli.sli_flag |= LPFC_PROCESS_LA;
1972 rc = lpfc_config_port_post(phba);
1974 goto lpfc_sli_hba_setup_error;
1976 goto lpfc_sli_hba_setup_exit;
1977 lpfc_sli_hba_setup_error:
1978 phba->hba_state = LPFC_HBA_ERROR;
1979 lpfc_sli_hba_setup_exit:
1980 mempool_free(pmb, phba->mbox_mem_pool);
1984 /*! lpfc_mbox_timeout
1988 * \param hba Pointer to per struct lpfc_hba structure
1989 * \param l1 Pointer to the driver's mailbox queue.
1995 * This routine handles mailbox timeout events at timer interrupt context.
1998 lpfc_mbox_timeout(unsigned long ptr)
2000 struct lpfc_hba *phba;
2001 unsigned long iflag;
2003 phba = (struct lpfc_hba *)ptr;
2004 spin_lock_irqsave(phba->host->host_lock, iflag);
2005 if (!(phba->work_hba_events & WORKER_MBOX_TMO)) {
2006 phba->work_hba_events |= WORKER_MBOX_TMO;
2007 if (phba->work_wait)
2008 wake_up(phba->work_wait);
2010 spin_unlock_irqrestore(phba->host->host_lock, iflag);
2014 lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
2016 LPFC_MBOXQ_t *pmbox;
2018 struct lpfc_sli *psli = &phba->sli;
2019 struct lpfc_sli_ring *pring;
2021 spin_lock_irq(phba->host->host_lock);
2022 if (!(phba->work_hba_events & WORKER_MBOX_TMO)) {
2023 spin_unlock_irq(phba->host->host_lock);
2027 pmbox = phba->sli.mbox_active;
2030 /* Mbox cmd <mbxCommand> timeout */
2031 lpfc_printf_log(phba,
2034 "%d:0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
2039 phba->sli.mbox_active);
2041 /* Setting state unknown so lpfc_sli_abort_iocb_ring
2042 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
2043 * it to fail all oustanding SCSI IO.
2045 phba->hba_state = LPFC_STATE_UNKNOWN;
2046 phba->work_hba_events &= ~WORKER_MBOX_TMO;
2047 phba->fc_flag |= FC_ESTABLISH_LINK;
2048 psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
2049 spin_unlock_irq(phba->host->host_lock);
2051 pring = &psli->ring[psli->fcp_ring];
2052 lpfc_sli_abort_iocb_ring(phba, pring);
2054 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2055 "%d:0316 Resetting board due to mailbox timeout\n",
2058 * lpfc_offline calls lpfc_sli_hba_down which will clean up
2059 * on oustanding mailbox commands.
2061 lpfc_offline_prep(phba);
2063 lpfc_sli_brdrestart(phba);
2064 if (lpfc_online(phba) == 0) /* Initialize the HBA */
2065 mod_timer(&phba->fc_estabtmo, jiffies + HZ * 60);
2066 lpfc_unblock_mgmt_io(phba);
2071 lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
2074 struct lpfc_sli *psli;
2075 uint32_t status, evtctr;
2078 unsigned long drvr_flag = 0;
2079 volatile uint32_t word0, ldata;
2080 void __iomem *to_slim;
2082 /* If the PCI channel is in offline state, do not post mbox. */
2083 if (unlikely(pci_channel_offline(phba->pcidev)))
2084 return MBX_NOT_FINISHED;
2088 spin_lock_irqsave(phba->host->host_lock, drvr_flag);
2092 status = MBX_SUCCESS;
2094 if (phba->hba_state == LPFC_HBA_ERROR) {
2095 spin_unlock_irqrestore(phba->host->host_lock, drvr_flag);
2097 /* Mbox command <mbxCommand> cannot issue */
2098 LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag)
2099 return (MBX_NOT_FINISHED);
2102 if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT &&
2103 !(readl(phba->HCregaddr) & HC_MBINT_ENA)) {
2104 spin_unlock_irqrestore(phba->host->host_lock, drvr_flag);
2105 LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag)
2106 return (MBX_NOT_FINISHED);
2109 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
2110 /* Polling for a mbox command when another one is already active
2111 * is not allowed in SLI. Also, the driver must have established
2112 * SLI2 mode to queue and process multiple mbox commands.
2115 if (flag & MBX_POLL) {
2116 spin_unlock_irqrestore(phba->host->host_lock,
2119 /* Mbox command <mbxCommand> cannot issue */
2120 LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag)
2121 return (MBX_NOT_FINISHED);
2124 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) {
2125 spin_unlock_irqrestore(phba->host->host_lock,
2127 /* Mbox command <mbxCommand> cannot issue */
2128 LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag)
2129 return (MBX_NOT_FINISHED);
2132 /* Handle STOP IOCB processing flag. This is only meaningful
2133 * if we are not polling for mbox completion.
2135 if (flag & MBX_STOP_IOCB) {
2136 flag &= ~MBX_STOP_IOCB;
2137 /* Now flag each ring */
2138 for (i = 0; i < psli->num_rings; i++) {
2139 /* If the ring is active, flag it */
2140 if (psli->ring[i].cmdringaddr) {
2141 psli->ring[i].flag |=
2147 /* Another mailbox command is still being processed, queue this
2148 * command to be processed later.
2150 lpfc_mbox_put(phba, pmbox);
2152 /* Mbox cmd issue - BUSY */
2153 lpfc_printf_log(phba,
2156 "%d:0308 Mbox cmd issue - BUSY Data: x%x x%x x%x x%x\n",
2163 psli->slistat.mbox_busy++;
2164 spin_unlock_irqrestore(phba->host->host_lock,
2170 /* Handle STOP IOCB processing flag. This is only meaningful
2171 * if we are not polling for mbox completion.
2173 if (flag & MBX_STOP_IOCB) {
2174 flag &= ~MBX_STOP_IOCB;
2175 if (flag == MBX_NOWAIT) {
2176 /* Now flag each ring */
2177 for (i = 0; i < psli->num_rings; i++) {
2178 /* If the ring is active, flag it */
2179 if (psli->ring[i].cmdringaddr) {
2180 psli->ring[i].flag |=
2187 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
2189 /* If we are not polling, we MUST be in SLI2 mode */
2190 if (flag != MBX_POLL) {
2191 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE) &&
2192 (mb->mbxCommand != MBX_KILL_BOARD)) {
2193 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2194 spin_unlock_irqrestore(phba->host->host_lock,
2196 /* Mbox command <mbxCommand> cannot issue */
2197 LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag);
2198 return (MBX_NOT_FINISHED);
2200 /* timeout active mbox command */
2201 mod_timer(&psli->mbox_tmo, (jiffies +
2202 (HZ * lpfc_mbox_tmo_val(phba, mb->mbxCommand))));
2205 /* Mailbox cmd <cmd> issue */
2206 lpfc_printf_log(phba,
2209 "%d:0309 Mailbox cmd x%x issue Data: x%x x%x x%x\n",
2216 psli->slistat.mbox_cmd++;
2217 evtctr = psli->slistat.mbox_event;
2219 /* next set own bit for the adapter and copy over command word */
2220 mb->mbxOwner = OWN_CHIP;
2222 if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
2223 /* First copy command data to host SLIM area */
2224 lpfc_sli_pcimem_bcopy(mb, &phba->slim2p->mbx, MAILBOX_CMD_SIZE);
2226 if (mb->mbxCommand == MBX_CONFIG_PORT) {
2227 /* copy command data into host mbox for cmpl */
2228 lpfc_sli_pcimem_bcopy(mb, &phba->slim2p->mbx,
2232 /* First copy mbox command data to HBA SLIM, skip past first
2234 to_slim = phba->MBslimaddr + sizeof (uint32_t);
2235 lpfc_memcpy_to_slim(to_slim, &mb->un.varWords[0],
2236 MAILBOX_CMD_SIZE - sizeof (uint32_t));
2238 /* Next copy over first word, with mbxOwner set */
2239 ldata = *((volatile uint32_t *)mb);
2240 to_slim = phba->MBslimaddr;
2241 writel(ldata, to_slim);
2242 readl(to_slim); /* flush */
2244 if (mb->mbxCommand == MBX_CONFIG_PORT) {
2245 /* switch over to host mailbox */
2246 psli->sli_flag |= LPFC_SLI2_ACTIVE;
2251 /* interrupt board to doit right away */
2252 writel(CA_MBATT, phba->CAregaddr);
2253 readl(phba->CAregaddr); /* flush */
2257 /* Don't wait for it to finish, just return */
2258 psli->mbox_active = pmbox;
2262 psli->mbox_active = NULL;
2263 if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
2264 /* First read mbox status word */
2265 word0 = *((volatile uint32_t *)&phba->slim2p->mbx);
2266 word0 = le32_to_cpu(word0);
2268 /* First read mbox status word */
2269 word0 = readl(phba->MBslimaddr);
2272 /* Read the HBA Host Attention Register */
2273 ha_copy = readl(phba->HAregaddr);
2275 i = lpfc_mbox_tmo_val(phba, mb->mbxCommand);
2276 i *= 1000; /* Convert to ms */
2278 /* Wait for command to complete */
2279 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
2280 (!(ha_copy & HA_MBATT) &&
2281 (phba->hba_state > LPFC_WARM_START))) {
2283 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2284 spin_unlock_irqrestore(phba->host->host_lock,
2286 return (MBX_NOT_FINISHED);
2289 /* Check if we took a mbox interrupt while we were
2291 if (((word0 & OWN_CHIP) != OWN_CHIP)
2292 && (evtctr != psli->slistat.mbox_event))
2295 spin_unlock_irqrestore(phba->host->host_lock,
2300 spin_lock_irqsave(phba->host->host_lock, drvr_flag);
2302 if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
2303 /* First copy command data */
2304 word0 = *((volatile uint32_t *)
2305 &phba->slim2p->mbx);
2306 word0 = le32_to_cpu(word0);
2307 if (mb->mbxCommand == MBX_CONFIG_PORT) {
2309 volatile uint32_t slimword0;
2310 /* Check real SLIM for any errors */
2311 slimword0 = readl(phba->MBslimaddr);
2312 slimmb = (MAILBOX_t *) & slimword0;
2313 if (((slimword0 & OWN_CHIP) != OWN_CHIP)
2314 && slimmb->mbxStatus) {
2321 /* First copy command data */
2322 word0 = readl(phba->MBslimaddr);
2324 /* Read the HBA Host Attention Register */
2325 ha_copy = readl(phba->HAregaddr);
2328 if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
2329 /* copy results back to user */
2330 lpfc_sli_pcimem_bcopy(&phba->slim2p->mbx, mb,
2333 /* First copy command data */
2334 lpfc_memcpy_from_slim(mb, phba->MBslimaddr,
2336 if ((mb->mbxCommand == MBX_DUMP_MEMORY) &&
2338 lpfc_memcpy_from_slim((void *)pmbox->context2,
2339 phba->MBslimaddr + DMP_RSP_OFFSET,
2340 mb->un.varDmp.word_cnt);
2344 writel(HA_MBATT, phba->HAregaddr);
2345 readl(phba->HAregaddr); /* flush */
2347 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2348 status = mb->mbxStatus;
2351 spin_unlock_irqrestore(phba->host->host_lock, drvr_flag);
2356 lpfc_sli_ringtx_put(struct lpfc_hba * phba, struct lpfc_sli_ring * pring,
2357 struct lpfc_iocbq * piocb)
2359 /* Insert the caller's iocb in the txq tail for later processing. */
2360 list_add_tail(&piocb->list, &pring->txq);
2365 static struct lpfc_iocbq *
2366 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2367 struct lpfc_iocbq ** piocb)
2369 struct lpfc_iocbq * nextiocb;
2371 nextiocb = lpfc_sli_ringtx_get(phba, pring);
2381 lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2382 struct lpfc_iocbq *piocb, uint32_t flag)
2384 struct lpfc_iocbq *nextiocb;
2387 /* If the PCI channel is in offline state, do not post iocbs. */
2388 if (unlikely(pci_channel_offline(phba->pcidev)))
2392 * We should never get an IOCB if we are in a < LINK_DOWN state
2394 if (unlikely(phba->hba_state < LPFC_LINK_DOWN))
2398 * Check to see if we are blocking IOCB processing because of a
2399 * outstanding mbox command.
2401 if (unlikely(pring->flag & LPFC_STOP_IOCB_MBX))
2404 if (unlikely(phba->hba_state == LPFC_LINK_DOWN)) {
2406 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
2407 * can be issued if the link is not up.
2409 switch (piocb->iocb.ulpCommand) {
2410 case CMD_QUE_RING_BUF_CN:
2411 case CMD_QUE_RING_BUF64_CN:
2413 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
2414 * completion, iocb_cmpl MUST be 0.
2416 if (piocb->iocb_cmpl)
2417 piocb->iocb_cmpl = NULL;
2419 case CMD_CREATE_XRI_CR:
2420 case CMD_CLOSE_XRI_CN:
2421 case CMD_CLOSE_XRI_CX:
2428 * For FCP commands, we must be in a state where we can process link
2431 } else if (unlikely(pring->ringno == phba->sli.fcp_ring &&
2432 !(phba->sli.sli_flag & LPFC_PROCESS_LA)))
2435 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
2436 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
2437 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
2440 lpfc_sli_update_ring(phba, pring);
2442 lpfc_sli_update_full_ring(phba, pring);
2445 return IOCB_SUCCESS;
2450 pring->stats.iocb_cmd_delay++;
2454 if (!(flag & SLI_IOCB_RET_IOCB)) {
2455 lpfc_sli_ringtx_put(phba, pring, piocb);
2456 return IOCB_SUCCESS;
2463 lpfc_extra_ring_setup( struct lpfc_hba *phba)
2465 struct lpfc_sli *psli;
2466 struct lpfc_sli_ring *pring;
2470 /* Adjust cmd/rsp ring iocb entries more evenly */
2472 /* Take some away from the FCP ring */
2473 pring = &psli->ring[psli->fcp_ring];
2474 pring->numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
2475 pring->numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
2476 pring->numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
2477 pring->numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
2479 /* and give them to the extra ring */
2480 pring = &psli->ring[psli->extra_ring];
2482 pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
2483 pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
2484 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
2485 pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
2487 /* Setup default profile for this ring */
2488 pring->iotag_max = 4096;
2489 pring->num_mask = 1;
2490 pring->prt[0].profile = 0; /* Mask 0 */
2491 pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
2492 pring->prt[0].type = phba->cfg_multi_ring_type;
2493 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
2498 lpfc_sli_setup(struct lpfc_hba *phba)
2501 struct lpfc_sli *psli = &phba->sli;
2502 struct lpfc_sli_ring *pring;
2504 psli->num_rings = MAX_CONFIGURED_RINGS;
2506 psli->fcp_ring = LPFC_FCP_RING;
2507 psli->next_ring = LPFC_FCP_NEXT_RING;
2508 psli->extra_ring = LPFC_EXTRA_RING;
2510 psli->iocbq_lookup = NULL;
2511 psli->iocbq_lookup_len = 0;
2512 psli->last_iotag = 0;
2514 for (i = 0; i < psli->num_rings; i++) {
2515 pring = &psli->ring[i];
2517 case LPFC_FCP_RING: /* ring 0 - FCP */
2518 /* numCiocb and numRiocb are used in config_port */
2519 pring->numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
2520 pring->numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
2521 pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
2522 pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
2523 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
2524 pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
2525 pring->iotag_ctr = 0;
2527 (phba->cfg_hba_queue_depth * 2);
2528 pring->fast_iotag = pring->iotag_max;
2529 pring->num_mask = 0;
2531 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */
2532 /* numCiocb and numRiocb are used in config_port */
2533 pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
2534 pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
2535 pring->num_mask = 0;
2537 case LPFC_ELS_RING: /* ring 2 - ELS / CT */
2538 /* numCiocb and numRiocb are used in config_port */
2539 pring->numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
2540 pring->numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
2541 pring->fast_iotag = 0;
2542 pring->iotag_ctr = 0;
2543 pring->iotag_max = 4096;
2544 pring->num_mask = 4;
2545 pring->prt[0].profile = 0; /* Mask 0 */
2546 pring->prt[0].rctl = FC_ELS_REQ;
2547 pring->prt[0].type = FC_ELS_DATA;
2548 pring->prt[0].lpfc_sli_rcv_unsol_event =
2549 lpfc_els_unsol_event;
2550 pring->prt[1].profile = 0; /* Mask 1 */
2551 pring->prt[1].rctl = FC_ELS_RSP;
2552 pring->prt[1].type = FC_ELS_DATA;
2553 pring->prt[1].lpfc_sli_rcv_unsol_event =
2554 lpfc_els_unsol_event;
2555 pring->prt[2].profile = 0; /* Mask 2 */
2556 /* NameServer Inquiry */
2557 pring->prt[2].rctl = FC_UNSOL_CTL;
2559 pring->prt[2].type = FC_COMMON_TRANSPORT_ULP;
2560 pring->prt[2].lpfc_sli_rcv_unsol_event =
2561 lpfc_ct_unsol_event;
2562 pring->prt[3].profile = 0; /* Mask 3 */
2563 /* NameServer response */
2564 pring->prt[3].rctl = FC_SOL_CTL;
2566 pring->prt[3].type = FC_COMMON_TRANSPORT_ULP;
2567 pring->prt[3].lpfc_sli_rcv_unsol_event =
2568 lpfc_ct_unsol_event;
2571 totiocb += (pring->numCiocb + pring->numRiocb);
2573 if (totiocb > MAX_SLI2_IOCB) {
2574 /* Too many cmd / rsp ring entries in SLI2 SLIM */
2575 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2576 "%d:0462 Too many cmd / rsp ring entries in "
2577 "SLI2 SLIM Data: x%x x%x\n",
2578 phba->brd_no, totiocb, MAX_SLI2_IOCB);
2580 if (phba->cfg_multi_ring_support == 2)
2581 lpfc_extra_ring_setup(phba);
2587 lpfc_sli_queue_setup(struct lpfc_hba * phba)
2589 struct lpfc_sli *psli;
2590 struct lpfc_sli_ring *pring;
2594 spin_lock_irq(phba->host->host_lock);
2595 INIT_LIST_HEAD(&psli->mboxq);
2596 /* Initialize list headers for txq and txcmplq as double linked lists */
2597 for (i = 0; i < psli->num_rings; i++) {
2598 pring = &psli->ring[i];
2600 pring->next_cmdidx = 0;
2601 pring->local_getidx = 0;
2603 INIT_LIST_HEAD(&pring->txq);
2604 INIT_LIST_HEAD(&pring->txcmplq);
2605 INIT_LIST_HEAD(&pring->iocb_continueq);
2606 INIT_LIST_HEAD(&pring->postbufq);
2608 spin_unlock_irq(phba->host->host_lock);
2613 lpfc_sli_hba_down(struct lpfc_hba * phba)
2615 LIST_HEAD(completions);
2616 struct lpfc_sli *psli;
2617 struct lpfc_sli_ring *pring;
2619 struct lpfc_iocbq *iocb;
2622 unsigned long flags = 0;
2625 lpfc_hba_down_prep(phba);
2627 spin_lock_irqsave(phba->host->host_lock, flags);
2628 for (i = 0; i < psli->num_rings; i++) {
2629 pring = &psli->ring[i];
2630 pring->flag |= LPFC_DEFERRED_RING_EVENT;
2633 * Error everything on the txq since these iocbs have not been
2634 * given to the FW yet.
2636 list_splice_init(&pring->txq, &completions);
2640 spin_unlock_irqrestore(phba->host->host_lock, flags);
2642 while (!list_empty(&completions)) {
2643 iocb = list_get_first(&completions, struct lpfc_iocbq, list);
2645 list_del(&iocb->list);
2647 if (iocb->iocb_cmpl) {
2648 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
2649 cmd->un.ulpWord[4] = IOERR_SLI_DOWN;
2650 (iocb->iocb_cmpl) (phba, iocb, iocb);
2652 lpfc_sli_release_iocbq(phba, iocb);
2655 /* Return any active mbox cmds */
2656 del_timer_sync(&psli->mbox_tmo);
2657 spin_lock_irqsave(phba->host->host_lock, flags);
2658 phba->work_hba_events &= ~WORKER_MBOX_TMO;
2659 if (psli->mbox_active) {
2660 pmb = psli->mbox_active;
2661 pmb->mb.mbxStatus = MBX_NOT_FINISHED;
2662 if (pmb->mbox_cmpl) {
2663 spin_unlock_irqrestore(phba->host->host_lock, flags);
2664 pmb->mbox_cmpl(phba,pmb);
2665 spin_lock_irqsave(phba->host->host_lock, flags);
2668 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2669 psli->mbox_active = NULL;
2671 /* Return any pending mbox cmds */
2672 while ((pmb = lpfc_mbox_get(phba)) != NULL) {
2673 pmb->mb.mbxStatus = MBX_NOT_FINISHED;
2674 if (pmb->mbox_cmpl) {
2675 spin_unlock_irqrestore(phba->host->host_lock, flags);
2676 pmb->mbox_cmpl(phba,pmb);
2677 spin_lock_irqsave(phba->host->host_lock, flags);
2681 INIT_LIST_HEAD(&psli->mboxq);
2683 spin_unlock_irqrestore(phba->host->host_lock, flags);
2689 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
2691 uint32_t *src = srcp;
2692 uint32_t *dest = destp;
2696 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
2698 ldata = le32_to_cpu(ldata);
2706 lpfc_sli_ringpostbuf_put(struct lpfc_hba * phba, struct lpfc_sli_ring * pring,
2707 struct lpfc_dmabuf * mp)
2709 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
2711 list_add_tail(&mp->list, &pring->postbufq);
2713 pring->postbufq_cnt++;
2718 struct lpfc_dmabuf *
2719 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2722 struct lpfc_dmabuf *mp, *next_mp;
2723 struct list_head *slp = &pring->postbufq;
2725 /* Search postbufq, from the begining, looking for a match on phys */
2726 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
2727 if (mp->phys == phys) {
2728 list_del_init(&mp->list);
2729 pring->postbufq_cnt--;
2734 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2735 "%d:0410 Cannot find virtual addr for mapped buf on "
2736 "ring %d Data x%llx x%p x%p x%x\n",
2737 phba->brd_no, pring->ringno, (unsigned long long)phys,
2738 slp->next, slp->prev, pring->postbufq_cnt);
2743 lpfc_sli_abort_els_cmpl(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
2744 struct lpfc_iocbq * rspiocb)
2747 uint16_t abort_iotag, abort_context;
2748 struct lpfc_iocbq *abort_iocb, *rsp_ab_iocb;
2749 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
2752 irsp = &rspiocb->iocb;
2754 spin_lock_irq(phba->host->host_lock);
2756 if (irsp->ulpStatus) {
2757 abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
2758 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
2760 if (abort_iotag != 0 && abort_iotag <= phba->sli.last_iotag)
2761 abort_iocb = phba->sli.iocbq_lookup[abort_iotag];
2763 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2764 "%d:0327 Cannot abort els iocb %p"
2765 " with tag %x context %x\n",
2766 phba->brd_no, abort_iocb,
2767 abort_iotag, abort_context);
2770 * make sure we have the right iocbq before taking it
2771 * off the txcmplq and try to call completion routine.
2774 abort_iocb->iocb.ulpContext == abort_context &&
2775 abort_iocb->iocb_flag & LPFC_DRIVER_ABORTED) {
2776 list_del(&abort_iocb->list);
2777 pring->txcmplq_cnt--;
2779 rsp_ab_iocb = lpfc_sli_get_iocbq(phba);
2780 if (rsp_ab_iocb == NULL)
2781 lpfc_sli_release_iocbq(phba, abort_iocb);
2783 abort_iocb->iocb_flag &=
2784 ~LPFC_DRIVER_ABORTED;
2785 rsp_ab_iocb->iocb.ulpStatus =
2786 IOSTAT_LOCAL_REJECT;
2787 rsp_ab_iocb->iocb.un.ulpWord[4] =
2789 spin_unlock_irq(phba->host->host_lock);
2790 (abort_iocb->iocb_cmpl)
2791 (phba, abort_iocb, rsp_ab_iocb);
2792 spin_lock_irq(phba->host->host_lock);
2793 lpfc_sli_release_iocbq(phba, rsp_ab_iocb);
2798 lpfc_sli_release_iocbq(phba, cmdiocb);
2799 spin_unlock_irq(phba->host->host_lock);
2804 lpfc_sli_issue_abort_iotag(struct lpfc_hba * phba,
2805 struct lpfc_sli_ring * pring,
2806 struct lpfc_iocbq * cmdiocb)
2808 struct lpfc_iocbq *abtsiocbp;
2809 IOCB_t *icmd = NULL;
2810 IOCB_t *iabt = NULL;
2811 int retval = IOCB_ERROR;
2813 /* There are certain command types we don't want
2816 icmd = &cmdiocb->iocb;
2817 if ((icmd->ulpCommand == CMD_ABORT_XRI_CN) ||
2818 (icmd->ulpCommand == CMD_CLOSE_XRI_CN))
2821 /* If we're unloading, interrupts are disabled so we
2822 * need to cleanup the iocb here.
2824 if (phba->fc_flag & FC_UNLOADING)
2825 goto abort_iotag_exit;
2827 /* issue ABTS for this IOCB based on iotag */
2828 abtsiocbp = lpfc_sli_get_iocbq(phba);
2829 if (abtsiocbp == NULL)
2832 /* This signals the response to set the correct status
2833 * before calling the completion handler.
2835 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
2837 iabt = &abtsiocbp->iocb;
2838 iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
2839 iabt->un.acxri.abortContextTag = icmd->ulpContext;
2840 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
2842 iabt->ulpClass = icmd->ulpClass;
2844 if (phba->hba_state >= LPFC_LINK_UP)
2845 iabt->ulpCommand = CMD_ABORT_XRI_CN;
2847 iabt->ulpCommand = CMD_CLOSE_XRI_CN;
2849 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
2851 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
2852 "%d:0339 Abort xri x%x, original iotag x%x, abort "
2854 phba->brd_no, iabt->un.acxri.abortContextTag,
2855 iabt->un.acxri.abortIoTag, abtsiocbp->iotag);
2856 retval = lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0);
2860 /* If we could not issue an abort dequeue the iocb and handle
2861 * the completion here.
2863 if (retval == IOCB_ERROR) {
2864 list_del(&cmdiocb->list);
2865 pring->txcmplq_cnt--;
2867 if (cmdiocb->iocb_cmpl) {
2868 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
2869 icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
2870 spin_unlock_irq(phba->host->host_lock);
2871 (cmdiocb->iocb_cmpl) (phba, cmdiocb, cmdiocb);
2872 spin_lock_irq(phba->host->host_lock);
2874 lpfc_sli_release_iocbq(phba, cmdiocb);
2881 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, uint16_t tgt_id,
2882 uint64_t lun_id, uint32_t ctx,
2883 lpfc_ctx_cmd ctx_cmd)
2885 struct lpfc_scsi_buf *lpfc_cmd;
2886 struct scsi_cmnd *cmnd;
2889 if (!(iocbq->iocb_flag & LPFC_IO_FCP))
2892 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
2893 cmnd = lpfc_cmd->pCmd;
2900 if ((cmnd->device->id == tgt_id) &&
2901 (cmnd->device->lun == lun_id))
2905 if (cmnd->device->id == tgt_id)
2909 if (iocbq->iocb.ulpContext == ctx)
2916 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
2917 __FUNCTION__, ctx_cmd);
2925 lpfc_sli_sum_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2926 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd ctx_cmd)
2928 struct lpfc_iocbq *iocbq;
2931 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
2932 iocbq = phba->sli.iocbq_lookup[i];
2934 if (lpfc_sli_validate_fcp_iocb (iocbq, tgt_id, lun_id,
2943 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
2944 struct lpfc_iocbq * rspiocb)
2946 unsigned long iflags;
2948 spin_lock_irqsave(phba->host->host_lock, iflags);
2949 lpfc_sli_release_iocbq(phba, cmdiocb);
2950 spin_unlock_irqrestore(phba->host->host_lock, iflags);
2955 lpfc_sli_abort_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2956 uint16_t tgt_id, uint64_t lun_id, uint32_t ctx,
2957 lpfc_ctx_cmd abort_cmd)
2959 struct lpfc_iocbq *iocbq;
2960 struct lpfc_iocbq *abtsiocb;
2962 int errcnt = 0, ret_val = 0;
2965 for (i = 1; i <= phba->sli.last_iotag; i++) {
2966 iocbq = phba->sli.iocbq_lookup[i];
2968 if (lpfc_sli_validate_fcp_iocb (iocbq, tgt_id, lun_id,
2972 /* issue ABTS for this IOCB based on iotag */
2973 abtsiocb = lpfc_sli_get_iocbq(phba);
2974 if (abtsiocb == NULL) {
2980 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
2981 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
2982 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
2983 abtsiocb->iocb.ulpLe = 1;
2984 abtsiocb->iocb.ulpClass = cmd->ulpClass;
2986 if (phba->hba_state >= LPFC_LINK_UP)
2987 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
2989 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
2991 /* Setup callback routine and issue the command. */
2992 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
2993 ret_val = lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0);
2994 if (ret_val == IOCB_ERROR) {
2995 lpfc_sli_release_iocbq(phba, abtsiocb);
3005 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
3006 struct lpfc_iocbq *cmdiocbq,
3007 struct lpfc_iocbq *rspiocbq)
3009 wait_queue_head_t *pdone_q;
3010 unsigned long iflags;
3012 spin_lock_irqsave(phba->host->host_lock, iflags);
3013 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
3014 if (cmdiocbq->context2 && rspiocbq)
3015 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
3016 &rspiocbq->iocb, sizeof(IOCB_t));
3018 pdone_q = cmdiocbq->context_un.wait_queue;
3019 spin_unlock_irqrestore(phba->host->host_lock, iflags);
3026 * Issue the caller's iocb and wait for its completion, but no longer than the
3027 * caller's timeout. Note that iocb_flags is cleared before the
3028 * lpfc_sli_issue_call since the wake routine sets a unique value and by
3029 * definition this is a wait function.
3032 lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba,
3033 struct lpfc_sli_ring * pring,
3034 struct lpfc_iocbq * piocb,
3035 struct lpfc_iocbq * prspiocbq,
3038 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
3039 long timeleft, timeout_req = 0;
3040 int retval = IOCB_SUCCESS;
3044 * If the caller has provided a response iocbq buffer, then context2
3045 * is NULL or its an error.
3048 if (piocb->context2)
3050 piocb->context2 = prspiocbq;
3053 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
3054 piocb->context_un.wait_queue = &done_q;
3055 piocb->iocb_flag &= ~LPFC_IO_WAKE;
3057 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
3058 creg_val = readl(phba->HCregaddr);
3059 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
3060 writel(creg_val, phba->HCregaddr);
3061 readl(phba->HCregaddr); /* flush */
3064 retval = lpfc_sli_issue_iocb(phba, pring, piocb, 0);
3065 if (retval == IOCB_SUCCESS) {
3066 timeout_req = timeout * HZ;
3067 spin_unlock_irq(phba->host->host_lock);
3068 timeleft = wait_event_timeout(done_q,
3069 piocb->iocb_flag & LPFC_IO_WAKE,
3071 spin_lock_irq(phba->host->host_lock);
3073 if (piocb->iocb_flag & LPFC_IO_WAKE) {
3074 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3075 "%d:0331 IOCB wake signaled\n",
3077 } else if (timeleft == 0) {
3078 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3079 "%d:0338 IOCB wait timeout error - no "
3080 "wake response Data x%x\n",
3081 phba->brd_no, timeout);
3082 retval = IOCB_TIMEDOUT;
3084 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3085 "%d:0330 IOCB wake NOT set, "
3086 "Data x%x x%lx\n", phba->brd_no,
3087 timeout, (timeleft / jiffies));
3088 retval = IOCB_TIMEDOUT;
3091 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3092 "%d:0332 IOCB wait issue failed, Data x%x\n",
3093 phba->brd_no, retval);
3094 retval = IOCB_ERROR;
3097 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
3098 creg_val = readl(phba->HCregaddr);
3099 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
3100 writel(creg_val, phba->HCregaddr);
3101 readl(phba->HCregaddr); /* flush */
3105 piocb->context2 = NULL;
3107 piocb->context_un.wait_queue = NULL;
3108 piocb->iocb_cmpl = NULL;
3113 lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq,
3116 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
3119 /* The caller must leave context1 empty. */
3120 if (pmboxq->context1 != 0) {
3121 return (MBX_NOT_FINISHED);
3124 /* setup wake call as IOCB callback */
3125 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
3126 /* setup context field to pass wait_queue pointer to wake function */
3127 pmboxq->context1 = &done_q;
3129 /* now issue the command */
3130 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
3132 if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
3133 wait_event_interruptible_timeout(done_q,
3134 pmboxq->mbox_flag & LPFC_MBX_WAKE,
3137 pmboxq->context1 = NULL;
3139 * if LPFC_MBX_WAKE flag is set the mailbox is completed
3140 * else do not free the resources.
3142 if (pmboxq->mbox_flag & LPFC_MBX_WAKE)
3143 retval = MBX_SUCCESS;
3145 retval = MBX_TIMEOUT;
3152 lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba)
3156 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE && !phba->stopped) {
3157 if (i++ > LPFC_MBOX_TMO * 1000)
3160 if (lpfc_sli_handle_mb_event(phba) == 0)
3166 return (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) ? 1 : 0;
3170 lpfc_intr_handler(int irq, void *dev_id)
3172 struct lpfc_hba *phba;
3174 uint32_t work_ha_copy;
3175 unsigned long status;
3180 * Get the driver's phba structure from the dev_id and
3181 * assume the HBA is not interrupting.
3183 phba = (struct lpfc_hba *) dev_id;
3185 if (unlikely(!phba))
3188 /* If the pci channel is offline, ignore all the interrupts. */
3189 if (unlikely(pci_channel_offline(phba->pcidev)))
3192 phba->sli.slistat.sli_intr++;
3195 * Call the HBA to see if it is interrupting. If not, don't claim
3199 /* Ignore all interrupts during initialization. */
3200 if (unlikely(phba->hba_state < LPFC_LINK_DOWN))
3204 * Read host attention register to determine interrupt source
3205 * Clear Attention Sources, except Error Attention (to
3206 * preserve status) and Link Attention
3208 spin_lock(phba->host->host_lock);
3209 ha_copy = readl(phba->HAregaddr);
3210 writel((ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
3211 readl(phba->HAregaddr); /* flush */
3212 spin_unlock(phba->host->host_lock);
3214 if (unlikely(!ha_copy))
3217 work_ha_copy = ha_copy & phba->work_ha_mask;
3219 if (unlikely(work_ha_copy)) {
3220 if (work_ha_copy & HA_LATT) {
3221 if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
3223 * Turn off Link Attention interrupts
3224 * until CLEAR_LA done
3226 spin_lock(phba->host->host_lock);
3227 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
3228 control = readl(phba->HCregaddr);
3229 control &= ~HC_LAINT_ENA;
3230 writel(control, phba->HCregaddr);
3231 readl(phba->HCregaddr); /* flush */
3232 spin_unlock(phba->host->host_lock);
3235 work_ha_copy &= ~HA_LATT;
3238 if (work_ha_copy & ~(HA_ERATT|HA_MBATT|HA_LATT)) {
3239 for (i = 0; i < phba->sli.num_rings; i++) {
3240 if (work_ha_copy & (HA_RXATT << (4*i))) {
3242 * Turn off Slow Rings interrupts
3244 spin_lock(phba->host->host_lock);
3245 control = readl(phba->HCregaddr);
3246 control &= ~(HC_R0INT_ENA << i);
3247 writel(control, phba->HCregaddr);
3248 readl(phba->HCregaddr); /* flush */
3249 spin_unlock(phba->host->host_lock);
3254 if (work_ha_copy & HA_ERATT) {
3255 phba->hba_state = LPFC_HBA_ERROR;
3257 * There was a link/board error. Read the
3258 * status register to retrieve the error event
3261 phba->sli.slistat.err_attn_event++;
3262 /* Save status info */
3263 phba->work_hs = readl(phba->HSregaddr);
3264 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
3265 phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
3267 /* Clear Chip error bit */
3268 writel(HA_ERATT, phba->HAregaddr);
3269 readl(phba->HAregaddr); /* flush */
3273 spin_lock(phba->host->host_lock);
3274 phba->work_ha |= work_ha_copy;
3275 if (phba->work_wait)
3276 wake_up(phba->work_wait);
3277 spin_unlock(phba->host->host_lock);
3280 ha_copy &= ~(phba->work_ha_mask);
3283 * Process all events on FCP ring. Take the optimized path for
3284 * FCP IO. Any other IO is slow path and is handled by
3285 * the worker thread.
3287 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
3288 status >>= (4*LPFC_FCP_RING);
3289 if (status & HA_RXATT)
3290 lpfc_sli_handle_fast_ring_event(phba,
3291 &phba->sli.ring[LPFC_FCP_RING],
3294 if (phba->cfg_multi_ring_support == 2) {
3296 * Process all events on extra ring. Take the optimized path
3297 * for extra ring IO. Any other IO is slow path and is handled
3298 * by the worker thread.
3300 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
3301 status >>= (4*LPFC_EXTRA_RING);
3302 if (status & HA_RXATT) {
3303 lpfc_sli_handle_fast_ring_event(phba,
3304 &phba->sli.ring[LPFC_EXTRA_RING],
3310 } /* lpfc_intr_handler */