[SCSI] lpfc: bug fixes
[linux-2.6] / drivers / scsi / lpfc / lpfc_sli.c
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2004-2007 Emulex.  All rights reserved.           *
5  * EMULEX and SLI are trademarks of Emulex.                        *
6  * www.emulex.com                                                  *
7  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8  *                                                                 *
9  * This program is free software; you can redistribute it and/or   *
10  * modify it under the terms of version 2 of the GNU General       *
11  * Public License as published by the Free Software Foundation.    *
12  * This program is distributed in the hope that it will be useful. *
13  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
14  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
15  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
16  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
18  * more details, a copy of which can be found in the file COPYING  *
19  * included with this package.                                     *
20  *******************************************************************/
21
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/interrupt.h>
25 #include <linux/delay.h>
26
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_cmnd.h>
29 #include <scsi/scsi_device.h>
30 #include <scsi/scsi_host.h>
31 #include <scsi/scsi_transport_fc.h>
32
33 #include "lpfc_hw.h"
34 #include "lpfc_sli.h"
35 #include "lpfc_disc.h"
36 #include "lpfc_scsi.h"
37 #include "lpfc.h"
38 #include "lpfc_crtn.h"
39 #include "lpfc_logmsg.h"
40 #include "lpfc_compat.h"
41 #include "lpfc_debugfs.h"
42
43 /*
44  * Define macro to log: Mailbox command x%x cannot issue Data
45  * This allows multiple uses of lpfc_msgBlk0311
46  * w/o perturbing log msg utility.
47  */
48 #define LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag) \
49                         lpfc_printf_log(phba, \
50                                 KERN_INFO, \
51                                 LOG_MBOX | LOG_SLI, \
52                                 "%d (%d):0311 Mailbox command x%x cannot " \
53                                 "issue Data: x%x x%x x%x\n", \
54                                 phba->brd_no, \
55                                 pmbox->vport ? pmbox->vport->vpi : 0, \
56                                 pmbox->mb.mbxCommand,           \
57                                 phba->pport->port_state,        \
58                                 psli->sli_flag, \
59                                 flag)
60
61
62 /* There are only four IOCB completion types. */
63 typedef enum _lpfc_iocb_type {
64         LPFC_UNKNOWN_IOCB,
65         LPFC_UNSOL_IOCB,
66         LPFC_SOL_IOCB,
67         LPFC_ABORT_IOCB
68 } lpfc_iocb_type;
69
70                 /* SLI-2/SLI-3 provide different sized iocbs.  Given a pointer
71                  * to the start of the ring, and the slot number of the
72                  * desired iocb entry, calc a pointer to that entry.
73                  */
74 static inline IOCB_t *
75 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
76 {
77         return (IOCB_t *) (((char *) pring->cmdringaddr) +
78                            pring->cmdidx * phba->iocb_cmd_size);
79 }
80
81 static inline IOCB_t *
82 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
83 {
84         return (IOCB_t *) (((char *) pring->rspringaddr) +
85                            pring->rspidx * phba->iocb_rsp_size);
86 }
87
88 static struct lpfc_iocbq *
89 __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
90 {
91         struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
92         struct lpfc_iocbq * iocbq = NULL;
93
94         list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
95         return iocbq;
96 }
97
98 struct lpfc_iocbq *
99 lpfc_sli_get_iocbq(struct lpfc_hba *phba)
100 {
101         struct lpfc_iocbq * iocbq = NULL;
102         unsigned long iflags;
103
104         spin_lock_irqsave(&phba->hbalock, iflags);
105         iocbq = __lpfc_sli_get_iocbq(phba);
106         spin_unlock_irqrestore(&phba->hbalock, iflags);
107         return iocbq;
108 }
109
110 void
111 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
112 {
113         size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
114
115         /*
116          * Clean all volatile data fields, preserve iotag and node struct.
117          */
118         memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
119         list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
120 }
121
122 void
123 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
124 {
125         unsigned long iflags;
126
127         /*
128          * Clean all volatile data fields, preserve iotag and node struct.
129          */
130         spin_lock_irqsave(&phba->hbalock, iflags);
131         __lpfc_sli_release_iocbq(phba, iocbq);
132         spin_unlock_irqrestore(&phba->hbalock, iflags);
133 }
134
135 /*
136  * Translate the iocb command to an iocb command type used to decide the final
137  * disposition of each completed IOCB.
138  */
139 static lpfc_iocb_type
140 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
141 {
142         lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
143
144         if (iocb_cmnd > CMD_MAX_IOCB_CMD)
145                 return 0;
146
147         switch (iocb_cmnd) {
148         case CMD_XMIT_SEQUENCE_CR:
149         case CMD_XMIT_SEQUENCE_CX:
150         case CMD_XMIT_BCAST_CN:
151         case CMD_XMIT_BCAST_CX:
152         case CMD_ELS_REQUEST_CR:
153         case CMD_ELS_REQUEST_CX:
154         case CMD_CREATE_XRI_CR:
155         case CMD_CREATE_XRI_CX:
156         case CMD_GET_RPI_CN:
157         case CMD_XMIT_ELS_RSP_CX:
158         case CMD_GET_RPI_CR:
159         case CMD_FCP_IWRITE_CR:
160         case CMD_FCP_IWRITE_CX:
161         case CMD_FCP_IREAD_CR:
162         case CMD_FCP_IREAD_CX:
163         case CMD_FCP_ICMND_CR:
164         case CMD_FCP_ICMND_CX:
165         case CMD_FCP_TSEND_CX:
166         case CMD_FCP_TRSP_CX:
167         case CMD_FCP_TRECEIVE_CX:
168         case CMD_FCP_AUTO_TRSP_CX:
169         case CMD_ADAPTER_MSG:
170         case CMD_ADAPTER_DUMP:
171         case CMD_XMIT_SEQUENCE64_CR:
172         case CMD_XMIT_SEQUENCE64_CX:
173         case CMD_XMIT_BCAST64_CN:
174         case CMD_XMIT_BCAST64_CX:
175         case CMD_ELS_REQUEST64_CR:
176         case CMD_ELS_REQUEST64_CX:
177         case CMD_FCP_IWRITE64_CR:
178         case CMD_FCP_IWRITE64_CX:
179         case CMD_FCP_IREAD64_CR:
180         case CMD_FCP_IREAD64_CX:
181         case CMD_FCP_ICMND64_CR:
182         case CMD_FCP_ICMND64_CX:
183         case CMD_FCP_TSEND64_CX:
184         case CMD_FCP_TRSP64_CX:
185         case CMD_FCP_TRECEIVE64_CX:
186         case CMD_GEN_REQUEST64_CR:
187         case CMD_GEN_REQUEST64_CX:
188         case CMD_XMIT_ELS_RSP64_CX:
189                 type = LPFC_SOL_IOCB;
190                 break;
191         case CMD_ABORT_XRI_CN:
192         case CMD_ABORT_XRI_CX:
193         case CMD_CLOSE_XRI_CN:
194         case CMD_CLOSE_XRI_CX:
195         case CMD_XRI_ABORTED_CX:
196         case CMD_ABORT_MXRI64_CN:
197                 type = LPFC_ABORT_IOCB;
198                 break;
199         case CMD_RCV_SEQUENCE_CX:
200         case CMD_RCV_ELS_REQ_CX:
201         case CMD_RCV_SEQUENCE64_CX:
202         case CMD_RCV_ELS_REQ64_CX:
203         case CMD_IOCB_RCV_SEQ64_CX:
204         case CMD_IOCB_RCV_ELS64_CX:
205         case CMD_IOCB_RCV_CONT64_CX:
206                 type = LPFC_UNSOL_IOCB;
207                 break;
208         default:
209                 type = LPFC_UNKNOWN_IOCB;
210                 break;
211         }
212
213         return type;
214 }
215
216 static int
217 lpfc_sli_ring_map(struct lpfc_hba *phba)
218 {
219         struct lpfc_sli *psli = &phba->sli;
220         LPFC_MBOXQ_t *pmb;
221         MAILBOX_t *pmbox;
222         int i, rc, ret = 0;
223
224         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
225         if (!pmb)
226                 return -ENOMEM;
227         pmbox = &pmb->mb;
228         phba->link_state = LPFC_INIT_MBX_CMDS;
229         for (i = 0; i < psli->num_rings; i++) {
230                 lpfc_config_ring(phba, i, pmb);
231                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
232                 if (rc != MBX_SUCCESS) {
233                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
234                                         "%d:0446 Adapter failed to init (%d), "
235                                         "mbxCmd x%x CFG_RING, mbxStatus x%x, "
236                                         "ring %d\n",
237                                         phba->brd_no, rc,
238                                         pmbox->mbxCommand,
239                                         pmbox->mbxStatus,
240                                         i);
241                         phba->link_state = LPFC_HBA_ERROR;
242                         ret = -ENXIO;
243                         break;
244                 }
245         }
246         mempool_free(pmb, phba->mbox_mem_pool);
247         return ret;
248 }
249
250 static int
251 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
252                         struct lpfc_iocbq *piocb)
253 {
254         list_add_tail(&piocb->list, &pring->txcmplq);
255         pring->txcmplq_cnt++;
256         if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
257            (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
258            (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
259                 if (!piocb->vport)
260                         BUG();
261                 else
262                         mod_timer(&piocb->vport->els_tmofunc,
263                                   jiffies + HZ * (phba->fc_ratov << 1));
264         }
265
266
267         return 0;
268 }
269
270 static struct lpfc_iocbq *
271 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
272 {
273         struct lpfc_iocbq *cmd_iocb;
274
275         list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
276         if (cmd_iocb != NULL)
277                 pring->txq_cnt--;
278         return cmd_iocb;
279 }
280
281 static IOCB_t *
282 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
283 {
284         struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
285                 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
286                 &phba->slim2p->mbx.us.s2.port[pring->ringno];
287         uint32_t  max_cmd_idx = pring->numCiocb;
288
289         if ((pring->next_cmdidx == pring->cmdidx) &&
290            (++pring->next_cmdidx >= max_cmd_idx))
291                 pring->next_cmdidx = 0;
292
293         if (unlikely(pring->local_getidx == pring->next_cmdidx)) {
294
295                 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
296
297                 if (unlikely(pring->local_getidx >= max_cmd_idx)) {
298                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
299                                         "%d:0315 Ring %d issue: portCmdGet %d "
300                                         "is bigger then cmd ring %d\n",
301                                         phba->brd_no, pring->ringno,
302                                         pring->local_getidx, max_cmd_idx);
303
304                         phba->link_state = LPFC_HBA_ERROR;
305                         /*
306                          * All error attention handlers are posted to
307                          * worker thread
308                          */
309                         phba->work_ha |= HA_ERATT;
310                         phba->work_hs = HS_FFER3;
311
312                         /* hbalock should already be held */
313                         if (phba->work_wait)
314                                 lpfc_worker_wake_up(phba);
315
316                         return NULL;
317                 }
318
319                 if (pring->local_getidx == pring->next_cmdidx)
320                         return NULL;
321         }
322
323         return lpfc_cmd_iocb(phba, pring);
324 }
325
326 uint16_t
327 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
328 {
329         struct lpfc_iocbq **new_arr;
330         struct lpfc_iocbq **old_arr;
331         size_t new_len;
332         struct lpfc_sli *psli = &phba->sli;
333         uint16_t iotag;
334
335         spin_lock_irq(&phba->hbalock);
336         iotag = psli->last_iotag;
337         if(++iotag < psli->iocbq_lookup_len) {
338                 psli->last_iotag = iotag;
339                 psli->iocbq_lookup[iotag] = iocbq;
340                 spin_unlock_irq(&phba->hbalock);
341                 iocbq->iotag = iotag;
342                 return iotag;
343         } else if (psli->iocbq_lookup_len < (0xffff
344                                            - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
345                 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
346                 spin_unlock_irq(&phba->hbalock);
347                 new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *),
348                                   GFP_KERNEL);
349                 if (new_arr) {
350                         spin_lock_irq(&phba->hbalock);
351                         old_arr = psli->iocbq_lookup;
352                         if (new_len <= psli->iocbq_lookup_len) {
353                                 /* highly unprobable case */
354                                 kfree(new_arr);
355                                 iotag = psli->last_iotag;
356                                 if(++iotag < psli->iocbq_lookup_len) {
357                                         psli->last_iotag = iotag;
358                                         psli->iocbq_lookup[iotag] = iocbq;
359                                         spin_unlock_irq(&phba->hbalock);
360                                         iocbq->iotag = iotag;
361                                         return iotag;
362                                 }
363                                 spin_unlock_irq(&phba->hbalock);
364                                 return 0;
365                         }
366                         if (psli->iocbq_lookup)
367                                 memcpy(new_arr, old_arr,
368                                        ((psli->last_iotag  + 1) *
369                                         sizeof (struct lpfc_iocbq *)));
370                         psli->iocbq_lookup = new_arr;
371                         psli->iocbq_lookup_len = new_len;
372                         psli->last_iotag = iotag;
373                         psli->iocbq_lookup[iotag] = iocbq;
374                         spin_unlock_irq(&phba->hbalock);
375                         iocbq->iotag = iotag;
376                         kfree(old_arr);
377                         return iotag;
378                 }
379         } else
380                 spin_unlock_irq(&phba->hbalock);
381
382         lpfc_printf_log(phba, KERN_ERR,LOG_SLI,
383                         "%d:0318 Failed to allocate IOTAG.last IOTAG is %d\n",
384                         phba->brd_no, psli->last_iotag);
385
386         return 0;
387 }
388
389 static void
390 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
391                 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
392 {
393         /*
394          * Set up an iotag
395          */
396         nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
397
398         /*
399          * Issue iocb command to adapter
400          */
401         lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
402         wmb();
403         pring->stats.iocb_cmd++;
404
405         /*
406          * If there is no completion routine to call, we can release the
407          * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
408          * that have no rsp ring completion, iocb_cmpl MUST be NULL.
409          */
410         if (nextiocb->iocb_cmpl)
411                 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
412         else
413                 __lpfc_sli_release_iocbq(phba, nextiocb);
414
415         /*
416          * Let the HBA know what IOCB slot will be the next one the
417          * driver will put a command into.
418          */
419         pring->cmdidx = pring->next_cmdidx;
420         writel(pring->cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
421 }
422
423 static void
424 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
425 {
426         int ringno = pring->ringno;
427
428         pring->flag |= LPFC_CALL_RING_AVAILABLE;
429
430         wmb();
431
432         /*
433          * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
434          * The HBA will tell us when an IOCB entry is available.
435          */
436         writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
437         readl(phba->CAregaddr); /* flush */
438
439         pring->stats.iocb_cmd_full++;
440 }
441
442 static void
443 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
444 {
445         int ringno = pring->ringno;
446
447         /*
448          * Tell the HBA that there is work to do in this ring.
449          */
450         wmb();
451         writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
452         readl(phba->CAregaddr); /* flush */
453 }
454
455 static void
456 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
457 {
458         IOCB_t *iocb;
459         struct lpfc_iocbq *nextiocb;
460
461         /*
462          * Check to see if:
463          *  (a) there is anything on the txq to send
464          *  (b) link is up
465          *  (c) link attention events can be processed (fcp ring only)
466          *  (d) IOCB processing is not blocked by the outstanding mbox command.
467          */
468         if (pring->txq_cnt &&
469             lpfc_is_link_up(phba) &&
470             (pring->ringno != phba->sli.fcp_ring ||
471              phba->sli.sli_flag & LPFC_PROCESS_LA) &&
472             !(pring->flag & LPFC_STOP_IOCB_MBX)) {
473
474                 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
475                        (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
476                         lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
477
478                 if (iocb)
479                         lpfc_sli_update_ring(phba, pring);
480                 else
481                         lpfc_sli_update_full_ring(phba, pring);
482         }
483
484         return;
485 }
486
487 /* lpfc_sli_turn_on_ring is only called by lpfc_sli_handle_mb_event below */
488 static void
489 lpfc_sli_turn_on_ring(struct lpfc_hba *phba, int ringno)
490 {
491         struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
492                 &phba->slim2p->mbx.us.s3_pgp.port[ringno] :
493                 &phba->slim2p->mbx.us.s2.port[ringno];
494         unsigned long iflags;
495
496         /* If the ring is active, flag it */
497         spin_lock_irqsave(&phba->hbalock, iflags);
498         if (phba->sli.ring[ringno].cmdringaddr) {
499                 if (phba->sli.ring[ringno].flag & LPFC_STOP_IOCB_MBX) {
500                         phba->sli.ring[ringno].flag &= ~LPFC_STOP_IOCB_MBX;
501                         /*
502                          * Force update of the local copy of cmdGetInx
503                          */
504                         phba->sli.ring[ringno].local_getidx
505                                 = le32_to_cpu(pgp->cmdGetInx);
506                         lpfc_sli_resume_iocb(phba, &phba->sli.ring[ringno]);
507                 }
508         }
509         spin_unlock_irqrestore(&phba->hbalock, iflags);
510 }
511
512 struct lpfc_hbq_entry *
513 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
514 {
515         struct hbq_s *hbqp = &phba->hbqs[hbqno];
516
517         if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
518             ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
519                 hbqp->next_hbqPutIdx = 0;
520
521         if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
522                 uint32_t raw_index = phba->hbq_get[hbqno];
523                 uint32_t getidx = le32_to_cpu(raw_index);
524
525                 hbqp->local_hbqGetIdx = getidx;
526
527                 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
528                         lpfc_printf_log(phba, KERN_ERR,
529                                         LOG_SLI | LOG_VPORT,
530                                         "%d:1802 HBQ %d: local_hbqGetIdx "
531                                         "%u is > than hbqp->entry_count %u\n",
532                                         phba->brd_no, hbqno,
533                                         hbqp->local_hbqGetIdx,
534                                         hbqp->entry_count);
535
536                         phba->link_state = LPFC_HBA_ERROR;
537                         return NULL;
538                 }
539
540                 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
541                         return NULL;
542         }
543
544         return (struct lpfc_hbq_entry *) phba->hbqslimp.virt + hbqp->hbqPutIdx;
545 }
546
547 void
548 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
549 {
550         struct lpfc_dmabuf *dmabuf, *next_dmabuf;
551         struct hbq_dmabuf *hbq_buf;
552
553         /* Return all memory used by all HBQs */
554         list_for_each_entry_safe(dmabuf, next_dmabuf,
555                                  &phba->hbq_buffer_list, list) {
556                 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
557                 list_del(&hbq_buf->dbuf.list);
558                 lpfc_hbq_free(phba, hbq_buf->dbuf.virt, hbq_buf->dbuf.phys);
559                 kfree(hbq_buf);
560         }
561 }
562
563 static void
564 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
565                          struct hbq_dmabuf *hbq_buf)
566 {
567         struct lpfc_hbq_entry *hbqe;
568         dma_addr_t physaddr = hbq_buf->dbuf.phys;
569
570         /* Get next HBQ entry slot to use */
571         hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
572         if (hbqe) {
573                 struct hbq_s *hbqp = &phba->hbqs[hbqno];
574
575                 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
576                 hbqe->bde.addrLow  = le32_to_cpu(putPaddrLow(physaddr));
577                 hbqe->bde.tus.f.bdeSize = FCELSSIZE;
578                 hbqe->bde.tus.f.bdeFlags = 0;
579                 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
580                 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
581                                 /* Sync SLIM */
582                 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
583                 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
584                                 /* flush */
585                 readl(phba->hbq_put + hbqno);
586                 list_add_tail(&hbq_buf->dbuf.list, &phba->hbq_buffer_list);
587         }
588 }
589
590 static struct lpfc_hbq_init lpfc_els_hbq = {
591         .rn = 1,
592         .entry_count = 200,
593         .mask_count = 0,
594         .profile = 0,
595         .ring_mask = 1 << LPFC_ELS_RING,
596         .buffer_count = 0,
597         .init_count = 20,
598         .add_count = 5,
599 };
600
601 static struct lpfc_hbq_init *lpfc_hbq_defs[] = {
602         &lpfc_els_hbq,
603 };
604
605 int
606 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
607 {
608         uint32_t i, start, end;
609         struct hbq_dmabuf *hbq_buffer;
610
611         start = lpfc_hbq_defs[hbqno]->buffer_count;
612         end = count + lpfc_hbq_defs[hbqno]->buffer_count;
613         if (end > lpfc_hbq_defs[hbqno]->entry_count) {
614                 end = lpfc_hbq_defs[hbqno]->entry_count;
615         }
616
617         /* Populate HBQ entries */
618         for (i = start; i < end; i++) {
619                 hbq_buffer = kmalloc(sizeof(struct hbq_dmabuf),
620                                      GFP_KERNEL);
621                 if (!hbq_buffer)
622                         return 1;
623                 hbq_buffer->dbuf.virt = lpfc_hbq_alloc(phba, MEM_PRI,
624                                                         &hbq_buffer->dbuf.phys);
625                 if (hbq_buffer->dbuf.virt == NULL)
626                         return 1;
627                 hbq_buffer->tag = (i | (hbqno << 16));
628                 lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer);
629                 lpfc_hbq_defs[hbqno]->buffer_count++;
630         }
631         return 0;
632 }
633
634 int
635 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
636 {
637         return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
638                                          lpfc_hbq_defs[qno]->add_count));
639 }
640
641 int
642 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
643 {
644         return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
645                                          lpfc_hbq_defs[qno]->init_count));
646 }
647
648 struct hbq_dmabuf *
649 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
650 {
651         struct lpfc_dmabuf *d_buf;
652         struct hbq_dmabuf *hbq_buf;
653
654         list_for_each_entry(d_buf, &phba->hbq_buffer_list, list) {
655                 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
656                 if ((hbq_buf->tag & 0xffff) == tag) {
657                         return hbq_buf;
658                 }
659         }
660         lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
661                         "%d:1803 Bad hbq tag. Data: x%x x%x\n",
662                         phba->brd_no, tag,
663                         lpfc_hbq_defs[tag >> 16]->buffer_count);
664         return NULL;
665 }
666
667 void
668 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *sp)
669 {
670         uint32_t hbqno;
671
672         if (sp) {
673                 hbqno = sp->tag >> 16;
674                 lpfc_sli_hbq_to_firmware(phba, hbqno, sp);
675         }
676 }
677
678 static int
679 lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
680 {
681         uint8_t ret;
682
683         switch (mbxCommand) {
684         case MBX_LOAD_SM:
685         case MBX_READ_NV:
686         case MBX_WRITE_NV:
687         case MBX_RUN_BIU_DIAG:
688         case MBX_INIT_LINK:
689         case MBX_DOWN_LINK:
690         case MBX_CONFIG_LINK:
691         case MBX_CONFIG_RING:
692         case MBX_RESET_RING:
693         case MBX_READ_CONFIG:
694         case MBX_READ_RCONFIG:
695         case MBX_READ_SPARM:
696         case MBX_READ_STATUS:
697         case MBX_READ_RPI:
698         case MBX_READ_XRI:
699         case MBX_READ_REV:
700         case MBX_READ_LNK_STAT:
701         case MBX_REG_LOGIN:
702         case MBX_UNREG_LOGIN:
703         case MBX_READ_LA:
704         case MBX_CLEAR_LA:
705         case MBX_DUMP_MEMORY:
706         case MBX_DUMP_CONTEXT:
707         case MBX_RUN_DIAGS:
708         case MBX_RESTART:
709         case MBX_UPDATE_CFG:
710         case MBX_DOWN_LOAD:
711         case MBX_DEL_LD_ENTRY:
712         case MBX_RUN_PROGRAM:
713         case MBX_SET_MASK:
714         case MBX_SET_SLIM:
715         case MBX_UNREG_D_ID:
716         case MBX_KILL_BOARD:
717         case MBX_CONFIG_FARP:
718         case MBX_BEACON:
719         case MBX_LOAD_AREA:
720         case MBX_RUN_BIU_DIAG64:
721         case MBX_CONFIG_PORT:
722         case MBX_READ_SPARM64:
723         case MBX_READ_RPI64:
724         case MBX_REG_LOGIN64:
725         case MBX_READ_LA64:
726         case MBX_FLASH_WR_ULA:
727         case MBX_SET_DEBUG:
728         case MBX_LOAD_EXP_ROM:
729         case MBX_REG_VPI:
730         case MBX_UNREG_VPI:
731         case MBX_HEARTBEAT:
732                 ret = mbxCommand;
733                 break;
734         default:
735                 ret = MBX_SHUTDOWN;
736                 break;
737         }
738         return ret;
739 }
740 static void
741 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
742 {
743         wait_queue_head_t *pdone_q;
744         unsigned long drvr_flag;
745
746         /*
747          * If pdone_q is empty, the driver thread gave up waiting and
748          * continued running.
749          */
750         pmboxq->mbox_flag |= LPFC_MBX_WAKE;
751         spin_lock_irqsave(&phba->hbalock, drvr_flag);
752         pdone_q = (wait_queue_head_t *) pmboxq->context1;
753         if (pdone_q)
754                 wake_up_interruptible(pdone_q);
755         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
756         return;
757 }
758
759 void
760 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
761 {
762         struct lpfc_dmabuf *mp;
763         uint16_t rpi;
764         int rc;
765
766         mp = (struct lpfc_dmabuf *) (pmb->context1);
767
768         if (mp) {
769                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
770                 kfree(mp);
771         }
772
773         /*
774          * If a REG_LOGIN succeeded  after node is destroyed or node
775          * is in re-discovery driver need to cleanup the RPI.
776          */
777         if (!(phba->pport->load_flag & FC_UNLOADING) &&
778             pmb->mb.mbxCommand == MBX_REG_LOGIN64 &&
779             !pmb->mb.mbxStatus) {
780
781                 rpi = pmb->mb.un.varWords[0];
782                 lpfc_unreg_login(phba, pmb->mb.un.varRegLogin.vpi, rpi, pmb);
783                 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
784                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
785                 if (rc != MBX_NOT_FINISHED)
786                         return;
787         }
788
789         mempool_free(pmb, phba->mbox_mem_pool);
790         return;
791 }
792
793 int
794 lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
795 {
796         MAILBOX_t *pmbox;
797         LPFC_MBOXQ_t *pmb;
798         int rc;
799         LIST_HEAD(cmplq);
800
801         phba->sli.slistat.mbox_event++;
802
803         /* Get all completed mailboxe buffers into the cmplq */
804         spin_lock_irq(&phba->hbalock);
805         list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
806         spin_unlock_irq(&phba->hbalock);
807
808         /* Get a Mailbox buffer to setup mailbox commands for callback */
809         do {
810                 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
811                 if (pmb == NULL)
812                         break;
813
814                 pmbox = &pmb->mb;
815
816                 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
817                         if (pmb->vport) {
818                                 lpfc_debugfs_disc_trc(pmb->vport,
819                                         LPFC_DISC_TRC_MBOX_VPORT,
820                                         "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
821                                         (uint32_t)pmbox->mbxCommand,
822                                         pmbox->un.varWords[0],
823                                         pmbox->un.varWords[1]);
824                         }
825                         else {
826                                 lpfc_debugfs_disc_trc(phba->pport,
827                                         LPFC_DISC_TRC_MBOX,
828                                         "MBOX cmpl:       cmd:x%x mb:x%x x%x",
829                                         (uint32_t)pmbox->mbxCommand,
830                                         pmbox->un.varWords[0],
831                                         pmbox->un.varWords[1]);
832                         }
833                 }
834
835                 /*
836                  * It is a fatal error if unknown mbox command completion.
837                  */
838                 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
839                     MBX_SHUTDOWN) {
840
841                         /* Unknow mailbox command compl */
842                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
843                                         "%d (%d):0323 Unknown Mailbox command "
844                                         "%x Cmpl\n",
845                                         phba->brd_no,
846                                         pmb->vport ? pmb->vport->vpi : 0,
847                                         pmbox->mbxCommand);
848                         phba->link_state = LPFC_HBA_ERROR;
849                         phba->work_hs = HS_FFER3;
850                         lpfc_handle_eratt(phba);
851                         continue;
852                 }
853
854                 if (pmbox->mbxStatus) {
855                         phba->sli.slistat.mbox_stat_err++;
856                         if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
857                                 /* Mbox cmd cmpl error - RETRYing */
858                                 lpfc_printf_log(phba, KERN_INFO,
859                                                 LOG_MBOX | LOG_SLI,
860                                                 "%d (%d):0305 Mbox cmd cmpl "
861                                                 "error - RETRYing Data: x%x "
862                                                 "x%x x%x x%x\n",
863                                                 phba->brd_no,
864                                                 pmb->vport ? pmb->vport->vpi :0,
865                                                 pmbox->mbxCommand,
866                                                 pmbox->mbxStatus,
867                                                 pmbox->un.varWords[0],
868                                                 pmb->vport->port_state);
869                                 pmbox->mbxStatus = 0;
870                                 pmbox->mbxOwner = OWN_HOST;
871                                 spin_lock_irq(&phba->hbalock);
872                                 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
873                                 spin_unlock_irq(&phba->hbalock);
874                                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
875                                 if (rc == MBX_SUCCESS)
876                                         continue;
877                         }
878                 }
879
880                 /* Mailbox cmd <cmd> Cmpl <cmpl> */
881                 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
882                                 "%d (%d):0307 Mailbox cmd x%x Cmpl x%p "
883                                 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
884                                 phba->brd_no,
885                                 pmb->vport ? pmb->vport->vpi : 0,
886                                 pmbox->mbxCommand,
887                                 pmb->mbox_cmpl,
888                                 *((uint32_t *) pmbox),
889                                 pmbox->un.varWords[0],
890                                 pmbox->un.varWords[1],
891                                 pmbox->un.varWords[2],
892                                 pmbox->un.varWords[3],
893                                 pmbox->un.varWords[4],
894                                 pmbox->un.varWords[5],
895                                 pmbox->un.varWords[6],
896                                 pmbox->un.varWords[7]);
897
898                 if (pmb->mbox_cmpl)
899                         pmb->mbox_cmpl(phba,pmb);
900         } while (1);
901         return 0;
902 }
903
904 static struct lpfc_dmabuf *
905 lpfc_sli_replace_hbqbuff(struct lpfc_hba *phba, uint32_t tag)
906 {
907         struct hbq_dmabuf *hbq_entry, *new_hbq_entry;
908
909         hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
910         if (hbq_entry == NULL)
911                 return NULL;
912         list_del(&hbq_entry->dbuf.list);
913         new_hbq_entry = kmalloc(sizeof(struct hbq_dmabuf), GFP_ATOMIC);
914         if (new_hbq_entry == NULL)
915                 return &hbq_entry->dbuf;
916         new_hbq_entry->dbuf = hbq_entry->dbuf;
917         new_hbq_entry->tag = -1;
918         hbq_entry->dbuf.virt = lpfc_hbq_alloc(phba, 0, &hbq_entry->dbuf.phys);
919         if (hbq_entry->dbuf.virt == NULL) {
920                 kfree(new_hbq_entry);
921                 return &hbq_entry->dbuf;
922         }
923         lpfc_sli_free_hbq(phba, hbq_entry);
924         return &new_hbq_entry->dbuf;
925 }
926
927 static int
928 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
929                             struct lpfc_iocbq *saveq)
930 {
931         IOCB_t           * irsp;
932         WORD5            * w5p;
933         uint32_t           Rctl, Type;
934         uint32_t           match, i;
935
936         match = 0;
937         irsp = &(saveq->iocb);
938         if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX)
939             || (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX)
940             || (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)
941             || (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX)) {
942                 Rctl = FC_ELS_REQ;
943                 Type = FC_ELS_DATA;
944         } else {
945                 w5p =
946                     (WORD5 *) & (saveq->iocb.un.
947                                  ulpWord[5]);
948                 Rctl = w5p->hcsw.Rctl;
949                 Type = w5p->hcsw.Type;
950
951                 /* Firmware Workaround */
952                 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
953                         (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
954                          irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
955                         Rctl = FC_ELS_REQ;
956                         Type = FC_ELS_DATA;
957                         w5p->hcsw.Rctl = Rctl;
958                         w5p->hcsw.Type = Type;
959                 }
960         }
961
962         if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
963                 if (irsp->ulpBdeCount != 0)
964                         saveq->context2 = lpfc_sli_replace_hbqbuff(phba,
965                                                 irsp->un.ulpWord[3]);
966                 if (irsp->ulpBdeCount == 2)
967                         saveq->context3 = lpfc_sli_replace_hbqbuff(phba,
968                                                 irsp->un.ulpWord[15]);
969         }
970
971         /* unSolicited Responses */
972         if (pring->prt[0].profile) {
973                 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
974                         (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
975                                                                         saveq);
976                 match = 1;
977         } else {
978                 /* We must search, based on rctl / type
979                    for the right routine */
980                 for (i = 0; i < pring->num_mask;
981                      i++) {
982                         if ((pring->prt[i].rctl ==
983                              Rctl)
984                             && (pring->prt[i].
985                                 type == Type)) {
986                                 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
987                                         (pring->prt[i].lpfc_sli_rcv_unsol_event)
988                                                         (phba, pring, saveq);
989                                 match = 1;
990                                 break;
991                         }
992                 }
993         }
994         if (match == 0) {
995                 /* Unexpected Rctl / Type received */
996                 /* Ring <ringno> handler: unexpected
997                    Rctl <Rctl> Type <Type> received */
998                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
999                                 "%d:0313 Ring %d handler: unexpected Rctl x%x "
1000                                 "Type x%x received\n",
1001                                 phba->brd_no,
1002                                 pring->ringno,
1003                                 Rctl,
1004                                 Type);
1005         }
1006         return 1;
1007 }
1008
1009 static struct lpfc_iocbq *
1010 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
1011                       struct lpfc_sli_ring *pring,
1012                       struct lpfc_iocbq *prspiocb)
1013 {
1014         struct lpfc_iocbq *cmd_iocb = NULL;
1015         uint16_t iotag;
1016
1017         iotag = prspiocb->iocb.ulpIoTag;
1018
1019         if (iotag != 0 && iotag <= phba->sli.last_iotag) {
1020                 cmd_iocb = phba->sli.iocbq_lookup[iotag];
1021                 list_del_init(&cmd_iocb->list);
1022                 pring->txcmplq_cnt--;
1023                 return cmd_iocb;
1024         }
1025
1026         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1027                         "%d:0317 iotag x%x is out off "
1028                         "range: max iotag x%x wd0 x%x\n",
1029                         phba->brd_no, iotag,
1030                         phba->sli.last_iotag,
1031                         *(((uint32_t *) &prspiocb->iocb) + 7));
1032         return NULL;
1033 }
1034
1035 static int
1036 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1037                           struct lpfc_iocbq *saveq)
1038 {
1039         struct lpfc_iocbq *cmdiocbp;
1040         int rc = 1;
1041         unsigned long iflag;
1042
1043         /* Based on the iotag field, get the cmd IOCB from the txcmplq */
1044         spin_lock_irqsave(&phba->hbalock, iflag);
1045         cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
1046         spin_unlock_irqrestore(&phba->hbalock, iflag);
1047
1048         if (cmdiocbp) {
1049                 if (cmdiocbp->iocb_cmpl) {
1050                         /*
1051                          * Post all ELS completions to the worker thread.
1052                          * All other are passed to the completion callback.
1053                          */
1054                         if (pring->ringno == LPFC_ELS_RING) {
1055                                 if (cmdiocbp->iocb_flag & LPFC_DRIVER_ABORTED) {
1056                                         cmdiocbp->iocb_flag &=
1057                                                 ~LPFC_DRIVER_ABORTED;
1058                                         saveq->iocb.ulpStatus =
1059                                                 IOSTAT_LOCAL_REJECT;
1060                                         saveq->iocb.un.ulpWord[4] =
1061                                                 IOERR_SLI_ABORTED;
1062                                 }
1063                         }
1064                         (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
1065                 } else
1066                         lpfc_sli_release_iocbq(phba, cmdiocbp);
1067         } else {
1068                 /*
1069                  * Unknown initiating command based on the response iotag.
1070                  * This could be the case on the ELS ring because of
1071                  * lpfc_els_abort().
1072                  */
1073                 if (pring->ringno != LPFC_ELS_RING) {
1074                         /*
1075                          * Ring <ringno> handler: unexpected completion IoTag
1076                          * <IoTag>
1077                          */
1078                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1079                                         "%d (%d):0322 Ring %d handler: "
1080                                         "unexpected completion IoTag x%x "
1081                                         "Data: x%x x%x x%x x%x\n",
1082                                         phba->brd_no,
1083                                         cmdiocbp->vport->vpi,
1084                                         pring->ringno,
1085                                         saveq->iocb.ulpIoTag,
1086                                         saveq->iocb.ulpStatus,
1087                                         saveq->iocb.un.ulpWord[4],
1088                                         saveq->iocb.ulpCommand,
1089                                         saveq->iocb.ulpContext);
1090                 }
1091         }
1092
1093         return rc;
1094 }
1095
1096 static void
1097 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1098 {
1099         struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
1100                 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
1101                 &phba->slim2p->mbx.us.s2.port[pring->ringno];
1102         /*
1103          * Ring <ringno> handler: portRspPut <portRspPut> is bigger then
1104          * rsp ring <portRspMax>
1105          */
1106         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1107                         "%d:0312 Ring %d handler: portRspPut %d "
1108                         "is bigger then rsp ring %d\n",
1109                         phba->brd_no, pring->ringno,
1110                         le32_to_cpu(pgp->rspPutInx),
1111                         pring->numRiocb);
1112
1113         phba->link_state = LPFC_HBA_ERROR;
1114
1115         /*
1116          * All error attention handlers are posted to
1117          * worker thread
1118          */
1119         phba->work_ha |= HA_ERATT;
1120         phba->work_hs = HS_FFER3;
1121
1122         /* hbalock should already be held */
1123         if (phba->work_wait)
1124                 lpfc_worker_wake_up(phba);
1125
1126         return;
1127 }
1128
1129 void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba)
1130 {
1131         struct lpfc_sli      *psli  = &phba->sli;
1132         struct lpfc_sli_ring *pring = &psli->ring[LPFC_FCP_RING];
1133         IOCB_t *irsp = NULL;
1134         IOCB_t *entry = NULL;
1135         struct lpfc_iocbq *cmdiocbq = NULL;
1136         struct lpfc_iocbq rspiocbq;
1137         struct lpfc_pgp *pgp;
1138         uint32_t status;
1139         uint32_t portRspPut, portRspMax;
1140         int type;
1141         uint32_t rsp_cmpl = 0;
1142         uint32_t ha_copy;
1143         unsigned long iflags;
1144
1145         pring->stats.iocb_event++;
1146
1147         pgp = (phba->sli_rev == 3) ?
1148                 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
1149                 &phba->slim2p->mbx.us.s2.port[pring->ringno];
1150
1151
1152         /*
1153          * The next available response entry should never exceed the maximum
1154          * entries.  If it does, treat it as an adapter hardware error.
1155          */
1156         portRspMax = pring->numRiocb;
1157         portRspPut = le32_to_cpu(pgp->rspPutInx);
1158         if (unlikely(portRspPut >= portRspMax)) {
1159                 lpfc_sli_rsp_pointers_error(phba, pring);
1160                 return;
1161         }
1162
1163         rmb();
1164         while (pring->rspidx != portRspPut) {
1165                 entry = lpfc_resp_iocb(phba, pring);
1166                 if (++pring->rspidx >= portRspMax)
1167                         pring->rspidx = 0;
1168
1169                 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
1170                                       (uint32_t *) &rspiocbq.iocb,
1171                                       phba->iocb_rsp_size);
1172                 irsp = &rspiocbq.iocb;
1173                 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
1174                 pring->stats.iocb_rsp++;
1175                 rsp_cmpl++;
1176
1177                 if (unlikely(irsp->ulpStatus)) {
1178                         /* Rsp ring <ringno> error: IOCB */
1179                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1180                                         "%d:0326 Rsp Ring %d error: IOCB Data: "
1181                                         "x%x x%x x%x x%x x%x x%x x%x x%x\n",
1182                                         phba->brd_no, pring->ringno,
1183                                         irsp->un.ulpWord[0],
1184                                         irsp->un.ulpWord[1],
1185                                         irsp->un.ulpWord[2],
1186                                         irsp->un.ulpWord[3],
1187                                         irsp->un.ulpWord[4],
1188                                         irsp->un.ulpWord[5],
1189                                         *(((uint32_t *) irsp) + 6),
1190                                         *(((uint32_t *) irsp) + 7));
1191                 }
1192
1193                 switch (type) {
1194                 case LPFC_ABORT_IOCB:
1195                 case LPFC_SOL_IOCB:
1196                         /*
1197                          * Idle exchange closed via ABTS from port.  No iocb
1198                          * resources need to be recovered.
1199                          */
1200                         if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
1201                                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1202                                                 "%d:0314 IOCB cmd 0x%x"
1203                                                 " processed. Skipping"
1204                                                 " completion", phba->brd_no,
1205                                                 irsp->ulpCommand);
1206                                 break;
1207                         }
1208
1209                         spin_lock_irqsave(&phba->hbalock, iflags);
1210                         cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
1211                                                          &rspiocbq);
1212                         spin_unlock_irqrestore(&phba->hbalock, iflags);
1213                         if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) {
1214                                 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
1215                                                       &rspiocbq);
1216                         }
1217                         break;
1218                 default:
1219                         if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
1220                                 char adaptermsg[LPFC_MAX_ADPTMSG];
1221                                 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
1222                                 memcpy(&adaptermsg[0], (uint8_t *) irsp,
1223                                        MAX_MSG_DATA);
1224                                 dev_warn(&((phba->pcidev)->dev), "lpfc%d: %s",
1225                                          phba->brd_no, adaptermsg);
1226                         } else {
1227                                 /* Unknown IOCB command */
1228                                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1229                                                 "%d:0321 Unknown IOCB command "
1230                                                 "Data: x%x, x%x x%x x%x x%x\n",
1231                                                 phba->brd_no, type,
1232                                                 irsp->ulpCommand,
1233                                                 irsp->ulpStatus,
1234                                                 irsp->ulpIoTag,
1235                                                 irsp->ulpContext);
1236                         }
1237                         break;
1238                 }
1239
1240                 /*
1241                  * The response IOCB has been processed.  Update the ring
1242                  * pointer in SLIM.  If the port response put pointer has not
1243                  * been updated, sync the pgp->rspPutInx and fetch the new port
1244                  * response put pointer.
1245                  */
1246                 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
1247
1248                 if (pring->rspidx == portRspPut)
1249                         portRspPut = le32_to_cpu(pgp->rspPutInx);
1250         }
1251
1252         ha_copy = readl(phba->HAregaddr);
1253         ha_copy >>= (LPFC_FCP_RING * 4);
1254
1255         if ((rsp_cmpl > 0) && (ha_copy & HA_R0RE_REQ)) {
1256                 spin_lock_irqsave(&phba->hbalock, iflags);
1257                 pring->stats.iocb_rsp_full++;
1258                 status = ((CA_R0ATT | CA_R0RE_RSP) << (LPFC_FCP_RING * 4));
1259                 writel(status, phba->CAregaddr);
1260                 readl(phba->CAregaddr);
1261                 spin_unlock_irqrestore(&phba->hbalock, iflags);
1262         }
1263         if ((ha_copy & HA_R0CE_RSP) &&
1264             (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
1265                 spin_lock_irqsave(&phba->hbalock, iflags);
1266                 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
1267                 pring->stats.iocb_cmd_empty++;
1268
1269                 /* Force update of the local copy of cmdGetInx */
1270                 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
1271                 lpfc_sli_resume_iocb(phba, pring);
1272
1273                 if ((pring->lpfc_sli_cmd_available))
1274                         (pring->lpfc_sli_cmd_available) (phba, pring);
1275
1276                 spin_unlock_irqrestore(&phba->hbalock, iflags);
1277         }
1278
1279         return;
1280 }
1281
1282 /*
1283  * This routine presumes LPFC_FCP_RING handling and doesn't bother
1284  * to check it explicitly.
1285  */
1286 static int
1287 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
1288                                 struct lpfc_sli_ring *pring, uint32_t mask)
1289 {
1290         struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
1291                 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
1292                 &phba->slim2p->mbx.us.s2.port[pring->ringno];
1293         IOCB_t *irsp = NULL;
1294         IOCB_t *entry = NULL;
1295         struct lpfc_iocbq *cmdiocbq = NULL;
1296         struct lpfc_iocbq rspiocbq;
1297         uint32_t status;
1298         uint32_t portRspPut, portRspMax;
1299         int rc = 1;
1300         lpfc_iocb_type type;
1301         unsigned long iflag;
1302         uint32_t rsp_cmpl = 0;
1303
1304         spin_lock_irqsave(&phba->hbalock, iflag);
1305         pring->stats.iocb_event++;
1306
1307         /*
1308          * The next available response entry should never exceed the maximum
1309          * entries.  If it does, treat it as an adapter hardware error.
1310          */
1311         portRspMax = pring->numRiocb;
1312         portRspPut = le32_to_cpu(pgp->rspPutInx);
1313         if (unlikely(portRspPut >= portRspMax)) {
1314                 lpfc_sli_rsp_pointers_error(phba, pring);
1315                 spin_unlock_irqrestore(&phba->hbalock, iflag);
1316                 return 1;
1317         }
1318
1319         rmb();
1320         while (pring->rspidx != portRspPut) {
1321                 /*
1322                  * Fetch an entry off the ring and copy it into a local data
1323                  * structure.  The copy involves a byte-swap since the
1324                  * network byte order and pci byte orders are different.
1325                  */
1326                 entry = lpfc_resp_iocb(phba, pring);
1327                 phba->last_completion_time = jiffies;
1328
1329                 if (++pring->rspidx >= portRspMax)
1330                         pring->rspidx = 0;
1331
1332                 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
1333                                       (uint32_t *) &rspiocbq.iocb,
1334                                       phba->iocb_rsp_size);
1335                 INIT_LIST_HEAD(&(rspiocbq.list));
1336                 irsp = &rspiocbq.iocb;
1337
1338                 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
1339                 pring->stats.iocb_rsp++;
1340                 rsp_cmpl++;
1341
1342                 if (unlikely(irsp->ulpStatus)) {
1343                         /*
1344                          * If resource errors reported from HBA, reduce
1345                          * queuedepths of the SCSI device.
1346                          */
1347                         if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
1348                                 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
1349                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
1350                                 lpfc_adjust_queue_depth(phba);
1351                                 spin_lock_irqsave(&phba->hbalock, iflag);
1352                         }
1353
1354                         /* Rsp ring <ringno> error: IOCB */
1355                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1356                                         "%d:0336 Rsp Ring %d error: IOCB Data: "
1357                                         "x%x x%x x%x x%x x%x x%x x%x x%x\n",
1358                                         phba->brd_no, pring->ringno,
1359                                         irsp->un.ulpWord[0],
1360                                         irsp->un.ulpWord[1],
1361                                         irsp->un.ulpWord[2],
1362                                         irsp->un.ulpWord[3],
1363                                         irsp->un.ulpWord[4],
1364                                         irsp->un.ulpWord[5],
1365                                         *(((uint32_t *) irsp) + 6),
1366                                         *(((uint32_t *) irsp) + 7));
1367                 }
1368
1369                 switch (type) {
1370                 case LPFC_ABORT_IOCB:
1371                 case LPFC_SOL_IOCB:
1372                         /*
1373                          * Idle exchange closed via ABTS from port.  No iocb
1374                          * resources need to be recovered.
1375                          */
1376                         if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
1377                                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1378                                                 "%d:0333 IOCB cmd 0x%x"
1379                                                 " processed. Skipping"
1380                                                 " completion\n",
1381                                                 phba->brd_no,
1382                                                 irsp->ulpCommand);
1383                                 break;
1384                         }
1385
1386                         cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
1387                                                          &rspiocbq);
1388                         if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) {
1389                                 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
1390                                         (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
1391                                                               &rspiocbq);
1392                                 } else {
1393                                         spin_unlock_irqrestore(&phba->hbalock,
1394                                                                iflag);
1395                                         (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
1396                                                               &rspiocbq);
1397                                         spin_lock_irqsave(&phba->hbalock,
1398                                                           iflag);
1399                                 }
1400                         }
1401                         break;
1402                 case LPFC_UNSOL_IOCB:
1403                         spin_unlock_irqrestore(&phba->hbalock, iflag);
1404                         lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
1405                         spin_lock_irqsave(&phba->hbalock, iflag);
1406                         break;
1407                 default:
1408                         if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
1409                                 char adaptermsg[LPFC_MAX_ADPTMSG];
1410                                 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
1411                                 memcpy(&adaptermsg[0], (uint8_t *) irsp,
1412                                        MAX_MSG_DATA);
1413                                 dev_warn(&((phba->pcidev)->dev), "lpfc%d: %s",
1414                                          phba->brd_no, adaptermsg);
1415                         } else {
1416                                 /* Unknown IOCB command */
1417                                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1418                                                 "%d:0334 Unknown IOCB command "
1419                                                 "Data: x%x, x%x x%x x%x x%x\n",
1420                                                 phba->brd_no, type,
1421                                                 irsp->ulpCommand,
1422                                                 irsp->ulpStatus,
1423                                                 irsp->ulpIoTag,
1424                                                 irsp->ulpContext);
1425                         }
1426                         break;
1427                 }
1428
1429                 /*
1430                  * The response IOCB has been processed.  Update the ring
1431                  * pointer in SLIM.  If the port response put pointer has not
1432                  * been updated, sync the pgp->rspPutInx and fetch the new port
1433                  * response put pointer.
1434                  */
1435                 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
1436
1437                 if (pring->rspidx == portRspPut)
1438                         portRspPut = le32_to_cpu(pgp->rspPutInx);
1439         }
1440
1441         if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
1442                 pring->stats.iocb_rsp_full++;
1443                 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
1444                 writel(status, phba->CAregaddr);
1445                 readl(phba->CAregaddr);
1446         }
1447         if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
1448                 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
1449                 pring->stats.iocb_cmd_empty++;
1450
1451                 /* Force update of the local copy of cmdGetInx */
1452                 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
1453                 lpfc_sli_resume_iocb(phba, pring);
1454
1455                 if ((pring->lpfc_sli_cmd_available))
1456                         (pring->lpfc_sli_cmd_available) (phba, pring);
1457
1458         }
1459
1460         spin_unlock_irqrestore(&phba->hbalock, iflag);
1461         return rc;
1462 }
1463
1464 int
1465 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
1466                                 struct lpfc_sli_ring *pring, uint32_t mask)
1467 {
1468         struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
1469                 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
1470                 &phba->slim2p->mbx.us.s2.port[pring->ringno];
1471         IOCB_t *entry;
1472         IOCB_t *irsp = NULL;
1473         struct lpfc_iocbq *rspiocbp = NULL;
1474         struct lpfc_iocbq *next_iocb;
1475         struct lpfc_iocbq *cmdiocbp;
1476         struct lpfc_iocbq *saveq;
1477         uint8_t iocb_cmd_type;
1478         lpfc_iocb_type type;
1479         uint32_t status, free_saveq;
1480         uint32_t portRspPut, portRspMax;
1481         int rc = 1;
1482         unsigned long iflag;
1483
1484         spin_lock_irqsave(&phba->hbalock, iflag);
1485         pring->stats.iocb_event++;
1486
1487         /*
1488          * The next available response entry should never exceed the maximum
1489          * entries.  If it does, treat it as an adapter hardware error.
1490          */
1491         portRspMax = pring->numRiocb;
1492         portRspPut = le32_to_cpu(pgp->rspPutInx);
1493         if (portRspPut >= portRspMax) {
1494                 /*
1495                  * Ring <ringno> handler: portRspPut <portRspPut> is bigger then
1496                  * rsp ring <portRspMax>
1497                  */
1498                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1499                                 "%d:0303 Ring %d handler: portRspPut %d "
1500                                 "is bigger then rsp ring %d\n",
1501                                 phba->brd_no, pring->ringno, portRspPut,
1502                                 portRspMax);
1503
1504                 phba->link_state = LPFC_HBA_ERROR;
1505                 spin_unlock_irqrestore(&phba->hbalock, iflag);
1506
1507                 phba->work_hs = HS_FFER3;
1508                 lpfc_handle_eratt(phba);
1509
1510                 return 1;
1511         }
1512
1513         rmb();
1514         while (pring->rspidx != portRspPut) {
1515                 /*
1516                  * Build a completion list and call the appropriate handler.
1517                  * The process is to get the next available response iocb, get
1518                  * a free iocb from the list, copy the response data into the
1519                  * free iocb, insert to the continuation list, and update the
1520                  * next response index to slim.  This process makes response
1521                  * iocb's in the ring available to DMA as fast as possible but
1522                  * pays a penalty for a copy operation.  Since the iocb is
1523                  * only 32 bytes, this penalty is considered small relative to
1524                  * the PCI reads for register values and a slim write.  When
1525                  * the ulpLe field is set, the entire Command has been
1526                  * received.
1527                  */
1528                 entry = lpfc_resp_iocb(phba, pring);
1529
1530                 phba->last_completion_time = jiffies;
1531                 rspiocbp = __lpfc_sli_get_iocbq(phba);
1532                 if (rspiocbp == NULL) {
1533                         printk(KERN_ERR "%s: out of buffers! Failing "
1534                                "completion.\n", __FUNCTION__);
1535                         break;
1536                 }
1537
1538                 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
1539                                       phba->iocb_rsp_size);
1540                 irsp = &rspiocbp->iocb;
1541
1542                 if (++pring->rspidx >= portRspMax)
1543                         pring->rspidx = 0;
1544
1545                 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
1546
1547                 if (list_empty(&(pring->iocb_continueq))) {
1548                         list_add(&rspiocbp->list, &(pring->iocb_continueq));
1549                 } else {
1550                         list_add_tail(&rspiocbp->list,
1551                                       &(pring->iocb_continueq));
1552                 }
1553
1554                 pring->iocb_continueq_cnt++;
1555                 if (irsp->ulpLe) {
1556                         /*
1557                          * By default, the driver expects to free all resources
1558                          * associated with this iocb completion.
1559                          */
1560                         free_saveq = 1;
1561                         saveq = list_get_first(&pring->iocb_continueq,
1562                                                struct lpfc_iocbq, list);
1563                         irsp = &(saveq->iocb);
1564                         list_del_init(&pring->iocb_continueq);
1565                         pring->iocb_continueq_cnt = 0;
1566
1567                         pring->stats.iocb_rsp++;
1568
1569                         /*
1570                          * If resource errors reported from HBA, reduce
1571                          * queuedepths of the SCSI device.
1572                          */
1573                         if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
1574                              (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
1575                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
1576                                 lpfc_adjust_queue_depth(phba);
1577                                 spin_lock_irqsave(&phba->hbalock, iflag);
1578                         }
1579
1580                         if (irsp->ulpStatus) {
1581                                 /* Rsp ring <ringno> error: IOCB */
1582                                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1583                                                 "%d:0328 Rsp Ring %d error: "
1584                                                 "IOCB Data: "
1585                                                 "x%x x%x x%x x%x "
1586                                                 "x%x x%x x%x x%x "
1587                                                 "x%x x%x x%x x%x "
1588                                                 "x%x x%x x%x x%x\n",
1589                                                 phba->brd_no,
1590                                                 pring->ringno,
1591                                                 irsp->un.ulpWord[0],
1592                                                 irsp->un.ulpWord[1],
1593                                                 irsp->un.ulpWord[2],
1594                                                 irsp->un.ulpWord[3],
1595                                                 irsp->un.ulpWord[4],
1596                                                 irsp->un.ulpWord[5],
1597                                                 *(((uint32_t *) irsp) + 6),
1598                                                 *(((uint32_t *) irsp) + 7),
1599                                                 *(((uint32_t *) irsp) + 8),
1600                                                 *(((uint32_t *) irsp) + 9),
1601                                                 *(((uint32_t *) irsp) + 10),
1602                                                 *(((uint32_t *) irsp) + 11),
1603                                                 *(((uint32_t *) irsp) + 12),
1604                                                 *(((uint32_t *) irsp) + 13),
1605                                                 *(((uint32_t *) irsp) + 14),
1606                                                 *(((uint32_t *) irsp) + 15));
1607                         }
1608
1609                         /*
1610                          * Fetch the IOCB command type and call the correct
1611                          * completion routine.  Solicited and Unsolicited
1612                          * IOCBs on the ELS ring get freed back to the
1613                          * lpfc_iocb_list by the discovery kernel thread.
1614                          */
1615                         iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
1616                         type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
1617                         if (type == LPFC_SOL_IOCB) {
1618                                 spin_unlock_irqrestore(&phba->hbalock,
1619                                                        iflag);
1620                                 rc = lpfc_sli_process_sol_iocb(phba, pring,
1621                                                                saveq);
1622                                 spin_lock_irqsave(&phba->hbalock, iflag);
1623                         } else if (type == LPFC_UNSOL_IOCB) {
1624                                 spin_unlock_irqrestore(&phba->hbalock,
1625                                                        iflag);
1626                                 rc = lpfc_sli_process_unsol_iocb(phba, pring,
1627                                                                  saveq);
1628                                 spin_lock_irqsave(&phba->hbalock, iflag);
1629                         } else if (type == LPFC_ABORT_IOCB) {
1630                                 if ((irsp->ulpCommand != CMD_XRI_ABORTED_CX) &&
1631                                     ((cmdiocbp =
1632                                       lpfc_sli_iocbq_lookup(phba, pring,
1633                                                             saveq)))) {
1634                                         /* Call the specified completion
1635                                            routine */
1636                                         if (cmdiocbp->iocb_cmpl) {
1637                                                 spin_unlock_irqrestore(
1638                                                        &phba->hbalock,
1639                                                        iflag);
1640                                                 (cmdiocbp->iocb_cmpl) (phba,
1641                                                              cmdiocbp, saveq);
1642                                                 spin_lock_irqsave(
1643                                                           &phba->hbalock,
1644                                                           iflag);
1645                                         } else
1646                                                 __lpfc_sli_release_iocbq(phba,
1647                                                                       cmdiocbp);
1648                                 }
1649                         } else if (type == LPFC_UNKNOWN_IOCB) {
1650                                 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
1651
1652                                         char adaptermsg[LPFC_MAX_ADPTMSG];
1653
1654                                         memset(adaptermsg, 0,
1655                                                LPFC_MAX_ADPTMSG);
1656                                         memcpy(&adaptermsg[0], (uint8_t *) irsp,
1657                                                MAX_MSG_DATA);
1658                                         dev_warn(&((phba->pcidev)->dev),
1659                                                  "lpfc%d: %s",
1660                                                  phba->brd_no, adaptermsg);
1661                                 } else {
1662                                         /* Unknown IOCB command */
1663                                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1664                                                         "%d:0335 Unknown IOCB "
1665                                                         "command Data: x%x "
1666                                                         "x%x x%x x%x\n",
1667                                                         phba->brd_no,
1668                                                         irsp->ulpCommand,
1669                                                         irsp->ulpStatus,
1670                                                         irsp->ulpIoTag,
1671                                                         irsp->ulpContext);
1672                                 }
1673                         }
1674
1675                         if (free_saveq) {
1676                                 list_for_each_entry_safe(rspiocbp, next_iocb,
1677                                                          &saveq->list, list) {
1678                                         list_del(&rspiocbp->list);
1679                                         __lpfc_sli_release_iocbq(phba,
1680                                                                  rspiocbp);
1681                                 }
1682                                 __lpfc_sli_release_iocbq(phba, saveq);
1683                         }
1684                         rspiocbp = NULL;
1685                 }
1686
1687                 /*
1688                  * If the port response put pointer has not been updated, sync
1689                  * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
1690                  * response put pointer.
1691                  */
1692                 if (pring->rspidx == portRspPut) {
1693                         portRspPut = le32_to_cpu(pgp->rspPutInx);
1694                 }
1695         } /* while (pring->rspidx != portRspPut) */
1696
1697         if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
1698                 /* At least one response entry has been freed */
1699                 pring->stats.iocb_rsp_full++;
1700                 /* SET RxRE_RSP in Chip Att register */
1701                 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
1702                 writel(status, phba->CAregaddr);
1703                 readl(phba->CAregaddr); /* flush */
1704         }
1705         if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
1706                 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
1707                 pring->stats.iocb_cmd_empty++;
1708
1709                 /* Force update of the local copy of cmdGetInx */
1710                 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
1711                 lpfc_sli_resume_iocb(phba, pring);
1712
1713                 if ((pring->lpfc_sli_cmd_available))
1714                         (pring->lpfc_sli_cmd_available) (phba, pring);
1715
1716         }
1717
1718         spin_unlock_irqrestore(&phba->hbalock, iflag);
1719         return rc;
1720 }
1721
1722 void
1723 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1724 {
1725         LIST_HEAD(completions);
1726         struct lpfc_iocbq *iocb, *next_iocb;
1727         IOCB_t *cmd = NULL;
1728
1729         if (pring->ringno == LPFC_ELS_RING) {
1730                 lpfc_fabric_abort_hba(phba);
1731         }
1732
1733         /* Error everything on txq and txcmplq
1734          * First do the txq.
1735          */
1736         spin_lock_irq(&phba->hbalock);
1737         list_splice_init(&pring->txq, &completions);
1738         pring->txq_cnt = 0;
1739
1740         /* Next issue ABTS for everything on the txcmplq */
1741         list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
1742                 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
1743
1744         spin_unlock_irq(&phba->hbalock);
1745
1746         while (!list_empty(&completions)) {
1747                 iocb = list_get_first(&completions, struct lpfc_iocbq, list);
1748                 cmd = &iocb->iocb;
1749                 list_del_init(&iocb->list);
1750
1751                 if (!iocb->iocb_cmpl)
1752                         lpfc_sli_release_iocbq(phba, iocb);
1753                 else {
1754                         cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
1755                         cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
1756                         (iocb->iocb_cmpl) (phba, iocb, iocb);
1757                 }
1758         }
1759 }
1760
1761 int
1762 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
1763 {
1764         uint32_t status;
1765         int i = 0;
1766         int retval = 0;
1767
1768         /* Read the HBA Host Status Register */
1769         status = readl(phba->HSregaddr);
1770
1771         /*
1772          * Check status register every 100ms for 5 retries, then every
1773          * 500ms for 5, then every 2.5 sec for 5, then reset board and
1774          * every 2.5 sec for 4.
1775          * Break our of the loop if errors occurred during init.
1776          */
1777         while (((status & mask) != mask) &&
1778                !(status & HS_FFERM) &&
1779                i++ < 20) {
1780
1781                 if (i <= 5)
1782                         msleep(10);
1783                 else if (i <= 10)
1784                         msleep(500);
1785                 else
1786                         msleep(2500);
1787
1788                 if (i == 15) {
1789                                 /* Do post */
1790                         phba->pport->port_state = LPFC_VPORT_UNKNOWN;
1791                         lpfc_sli_brdrestart(phba);
1792                 }
1793                 /* Read the HBA Host Status Register */
1794                 status = readl(phba->HSregaddr);
1795         }
1796
1797         /* Check to see if any errors occurred during init */
1798         if ((status & HS_FFERM) || (i >= 20)) {
1799                 phba->link_state = LPFC_HBA_ERROR;
1800                 retval = 1;
1801         }
1802
1803         return retval;
1804 }
1805
1806 #define BARRIER_TEST_PATTERN (0xdeadbeef)
1807
1808 void lpfc_reset_barrier(struct lpfc_hba *phba)
1809 {
1810         uint32_t __iomem *resp_buf;
1811         uint32_t __iomem *mbox_buf;
1812         volatile uint32_t mbox;
1813         uint32_t hc_copy;
1814         int  i;
1815         uint8_t hdrtype;
1816
1817         pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
1818         if (hdrtype != 0x80 ||
1819             (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
1820              FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
1821                 return;
1822
1823         /*
1824          * Tell the other part of the chip to suspend temporarily all
1825          * its DMA activity.
1826          */
1827         resp_buf = phba->MBslimaddr;
1828
1829         /* Disable the error attention */
1830         hc_copy = readl(phba->HCregaddr);
1831         writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
1832         readl(phba->HCregaddr); /* flush */
1833         phba->link_flag |= LS_IGNORE_ERATT;
1834
1835         if (readl(phba->HAregaddr) & HA_ERATT) {
1836                 /* Clear Chip error bit */
1837                 writel(HA_ERATT, phba->HAregaddr);
1838                 phba->pport->stopped = 1;
1839         }
1840
1841         mbox = 0;
1842         ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
1843         ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
1844
1845         writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
1846         mbox_buf = phba->MBslimaddr;
1847         writel(mbox, mbox_buf);
1848
1849         for (i = 0;
1850              readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN) && i < 50; i++)
1851                 mdelay(1);
1852
1853         if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) {
1854                 if (phba->sli.sli_flag & LPFC_SLI2_ACTIVE ||
1855                     phba->pport->stopped)
1856                         goto restore_hc;
1857                 else
1858                         goto clear_errat;
1859         }
1860
1861         ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
1862         for (i = 0; readl(resp_buf) != mbox &&  i < 500; i++)
1863                 mdelay(1);
1864
1865 clear_errat:
1866
1867         while (!(readl(phba->HAregaddr) & HA_ERATT) && ++i < 500)
1868                 mdelay(1);
1869
1870         if (readl(phba->HAregaddr) & HA_ERATT) {
1871                 writel(HA_ERATT, phba->HAregaddr);
1872                 phba->pport->stopped = 1;
1873         }
1874
1875 restore_hc:
1876         phba->link_flag &= ~LS_IGNORE_ERATT;
1877         writel(hc_copy, phba->HCregaddr);
1878         readl(phba->HCregaddr); /* flush */
1879 }
1880
1881 int
1882 lpfc_sli_brdkill(struct lpfc_hba *phba)
1883 {
1884         struct lpfc_sli *psli;
1885         LPFC_MBOXQ_t *pmb;
1886         uint32_t status;
1887         uint32_t ha_copy;
1888         int retval;
1889         int i = 0;
1890
1891         psli = &phba->sli;
1892
1893         /* Kill HBA */
1894         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1895                         "%d:0329 Kill HBA Data: x%x x%x\n",
1896                         phba->brd_no, phba->pport->port_state, psli->sli_flag);
1897
1898         if ((pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
1899                                                   GFP_KERNEL)) == 0)
1900                 return 1;
1901
1902         /* Disable the error attention */
1903         spin_lock_irq(&phba->hbalock);
1904         status = readl(phba->HCregaddr);
1905         status &= ~HC_ERINT_ENA;
1906         writel(status, phba->HCregaddr);
1907         readl(phba->HCregaddr); /* flush */
1908         phba->link_flag |= LS_IGNORE_ERATT;
1909         spin_unlock_irq(&phba->hbalock);
1910
1911         lpfc_kill_board(phba, pmb);
1912         pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1913         retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
1914
1915         if (retval != MBX_SUCCESS) {
1916                 if (retval != MBX_BUSY)
1917                         mempool_free(pmb, phba->mbox_mem_pool);
1918                 spin_lock_irq(&phba->hbalock);
1919                 phba->link_flag &= ~LS_IGNORE_ERATT;
1920                 spin_unlock_irq(&phba->hbalock);
1921                 return 1;
1922         }
1923
1924         psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
1925
1926         mempool_free(pmb, phba->mbox_mem_pool);
1927
1928         /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
1929          * attention every 100ms for 3 seconds. If we don't get ERATT after
1930          * 3 seconds we still set HBA_ERROR state because the status of the
1931          * board is now undefined.
1932          */
1933         ha_copy = readl(phba->HAregaddr);
1934
1935         while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
1936                 mdelay(100);
1937                 ha_copy = readl(phba->HAregaddr);
1938         }
1939
1940         del_timer_sync(&psli->mbox_tmo);
1941         if (ha_copy & HA_ERATT) {
1942                 writel(HA_ERATT, phba->HAregaddr);
1943                 phba->pport->stopped = 1;
1944         }
1945         spin_lock_irq(&phba->hbalock);
1946         psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
1947         phba->link_flag &= ~LS_IGNORE_ERATT;
1948         spin_unlock_irq(&phba->hbalock);
1949
1950         psli->mbox_active = NULL;
1951         lpfc_hba_down_post(phba);
1952         phba->link_state = LPFC_HBA_ERROR;
1953
1954         return ha_copy & HA_ERATT ? 0 : 1;
1955 }
1956
1957 int
1958 lpfc_sli_brdreset(struct lpfc_hba *phba)
1959 {
1960         struct lpfc_sli *psli;
1961         struct lpfc_sli_ring *pring;
1962         uint16_t cfg_value;
1963         int i;
1964
1965         psli = &phba->sli;
1966
1967         /* Reset HBA */
1968         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1969                         "%d:0325 Reset HBA Data: x%x x%x\n", phba->brd_no,
1970                         phba->pport->port_state, psli->sli_flag);
1971
1972         /* perform board reset */
1973         phba->fc_eventTag = 0;
1974         phba->pport->fc_myDID = 0;
1975         phba->pport->fc_prevDID = 0;
1976
1977         /* Turn off parity checking and serr during the physical reset */
1978         pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
1979         pci_write_config_word(phba->pcidev, PCI_COMMAND,
1980                               (cfg_value &
1981                                ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
1982
1983         psli->sli_flag &= ~(LPFC_SLI2_ACTIVE | LPFC_PROCESS_LA);
1984         /* Now toggle INITFF bit in the Host Control Register */
1985         writel(HC_INITFF, phba->HCregaddr);
1986         mdelay(1);
1987         readl(phba->HCregaddr); /* flush */
1988         writel(0, phba->HCregaddr);
1989         readl(phba->HCregaddr); /* flush */
1990
1991         /* Restore PCI cmd register */
1992         pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
1993
1994         /* Initialize relevant SLI info */
1995         for (i = 0; i < psli->num_rings; i++) {
1996                 pring = &psli->ring[i];
1997                 pring->flag = 0;
1998                 pring->rspidx = 0;
1999                 pring->next_cmdidx  = 0;
2000                 pring->local_getidx = 0;
2001                 pring->cmdidx = 0;
2002                 pring->missbufcnt = 0;
2003         }
2004
2005         phba->link_state = LPFC_WARM_START;
2006         return 0;
2007 }
2008
2009 int
2010 lpfc_sli_brdrestart(struct lpfc_hba *phba)
2011 {
2012         MAILBOX_t *mb;
2013         struct lpfc_sli *psli;
2014         uint16_t skip_post;
2015         volatile uint32_t word0;
2016         void __iomem *to_slim;
2017
2018         spin_lock_irq(&phba->hbalock);
2019
2020         psli = &phba->sli;
2021
2022         /* Restart HBA */
2023         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
2024                         "%d:0337 Restart HBA Data: x%x x%x\n", phba->brd_no,
2025                         phba->pport->port_state, psli->sli_flag);
2026
2027         word0 = 0;
2028         mb = (MAILBOX_t *) &word0;
2029         mb->mbxCommand = MBX_RESTART;
2030         mb->mbxHc = 1;
2031
2032         lpfc_reset_barrier(phba);
2033
2034         to_slim = phba->MBslimaddr;
2035         writel(*(uint32_t *) mb, to_slim);
2036         readl(to_slim); /* flush */
2037
2038         /* Only skip post after fc_ffinit is completed */
2039         if (phba->pport->port_state) {
2040                 skip_post = 1;
2041                 word0 = 1;      /* This is really setting up word1 */
2042         } else {
2043                 skip_post = 0;
2044                 word0 = 0;      /* This is really setting up word1 */
2045         }
2046         to_slim = phba->MBslimaddr + sizeof (uint32_t);
2047         writel(*(uint32_t *) mb, to_slim);
2048         readl(to_slim); /* flush */
2049
2050         lpfc_sli_brdreset(phba);
2051         phba->pport->stopped = 0;
2052         phba->link_state = LPFC_INIT_START;
2053
2054         spin_unlock_irq(&phba->hbalock);
2055
2056         memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
2057         psli->stats_start = get_seconds();
2058
2059         if (skip_post)
2060                 mdelay(100);
2061         else
2062                 mdelay(2000);
2063
2064         lpfc_hba_down_post(phba);
2065
2066         return 0;
2067 }
2068
2069 static int
2070 lpfc_sli_chipset_init(struct lpfc_hba *phba)
2071 {
2072         uint32_t status, i = 0;
2073
2074         /* Read the HBA Host Status Register */
2075         status = readl(phba->HSregaddr);
2076
2077         /* Check status register to see what current state is */
2078         i = 0;
2079         while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
2080
2081                 /* Check every 100ms for 5 retries, then every 500ms for 5, then
2082                  * every 2.5 sec for 5, then reset board and every 2.5 sec for
2083                  * 4.
2084                  */
2085                 if (i++ >= 20) {
2086                         /* Adapter failed to init, timeout, status reg
2087                            <status> */
2088                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2089                                         "%d:0436 Adapter failed to init, "
2090                                         "timeout, status reg x%x\n",
2091                                         phba->brd_no, status);
2092                         phba->link_state = LPFC_HBA_ERROR;
2093                         return -ETIMEDOUT;
2094                 }
2095
2096                 /* Check to see if any errors occurred during init */
2097                 if (status & HS_FFERM) {
2098                         /* ERROR: During chipset initialization */
2099                         /* Adapter failed to init, chipset, status reg
2100                            <status> */
2101                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2102                                         "%d:0437 Adapter failed to init, "
2103                                         "chipset, status reg x%x\n",
2104                                         phba->brd_no,
2105                                         status);
2106                         phba->link_state = LPFC_HBA_ERROR;
2107                         return -EIO;
2108                 }
2109
2110                 if (i <= 5) {
2111                         msleep(10);
2112                 } else if (i <= 10) {
2113                         msleep(500);
2114                 } else {
2115                         msleep(2500);
2116                 }
2117
2118                 if (i == 15) {
2119                                 /* Do post */
2120                         phba->pport->port_state = LPFC_VPORT_UNKNOWN;
2121                         lpfc_sli_brdrestart(phba);
2122                 }
2123                 /* Read the HBA Host Status Register */
2124                 status = readl(phba->HSregaddr);
2125         }
2126
2127         /* Check to see if any errors occurred during init */
2128         if (status & HS_FFERM) {
2129                 /* ERROR: During chipset initialization */
2130                 /* Adapter failed to init, chipset, status reg <status> */
2131                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2132                                 "%d:0438 Adapter failed to init, chipset, "
2133                                 "status reg x%x\n",
2134                                 phba->brd_no,
2135                                 status);
2136                 phba->link_state = LPFC_HBA_ERROR;
2137                 return -EIO;
2138         }
2139
2140         /* Clear all interrupt enable conditions */
2141         writel(0, phba->HCregaddr);
2142         readl(phba->HCregaddr); /* flush */
2143
2144         /* setup host attn register */
2145         writel(0xffffffff, phba->HAregaddr);
2146         readl(phba->HAregaddr); /* flush */
2147         return 0;
2148 }
2149
2150 static int
2151 lpfc_sli_hbq_count(void)
2152 {
2153         return ARRAY_SIZE(lpfc_hbq_defs);
2154 }
2155
2156 static int
2157 lpfc_sli_hbq_entry_count(void)
2158 {
2159         int  hbq_count = lpfc_sli_hbq_count();
2160         int  count = 0;
2161         int  i;
2162
2163         for (i = 0; i < hbq_count; ++i)
2164                 count += lpfc_hbq_defs[i]->entry_count;
2165         return count;
2166 }
2167
2168 int
2169 lpfc_sli_hbq_size(void)
2170 {
2171         return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
2172 }
2173
2174 static int
2175 lpfc_sli_hbq_setup(struct lpfc_hba *phba)
2176 {
2177         int  hbq_count = lpfc_sli_hbq_count();
2178         LPFC_MBOXQ_t *pmb;
2179         MAILBOX_t *pmbox;
2180         uint32_t hbqno;
2181         uint32_t hbq_entry_index;
2182
2183                                 /* Get a Mailbox buffer to setup mailbox
2184                                  * commands for HBA initialization
2185                                  */
2186         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2187
2188         if (!pmb)
2189                 return -ENOMEM;
2190
2191         pmbox = &pmb->mb;
2192
2193         /* Initialize the struct lpfc_sli_hbq structure for each hbq */
2194         phba->link_state = LPFC_INIT_MBX_CMDS;
2195
2196         hbq_entry_index = 0;
2197         for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
2198                 phba->hbqs[hbqno].next_hbqPutIdx = 0;
2199                 phba->hbqs[hbqno].hbqPutIdx      = 0;
2200                 phba->hbqs[hbqno].local_hbqGetIdx   = 0;
2201                 phba->hbqs[hbqno].entry_count =
2202                         lpfc_hbq_defs[hbqno]->entry_count;
2203                 lpfc_config_hbq(phba, lpfc_hbq_defs[hbqno], hbq_entry_index,
2204                                 pmb);
2205                 hbq_entry_index += phba->hbqs[hbqno].entry_count;
2206
2207                 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
2208                         /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
2209                            mbxStatus <status>, ring <num> */
2210
2211                         lpfc_printf_log(phba, KERN_ERR,
2212                                         LOG_SLI | LOG_VPORT,
2213                                         "%d:1805 Adapter failed to init. "
2214                                         "Data: x%x x%x x%x\n",
2215                                         phba->brd_no, pmbox->mbxCommand,
2216                                         pmbox->mbxStatus, hbqno);
2217
2218                         phba->link_state = LPFC_HBA_ERROR;
2219                         mempool_free(pmb, phba->mbox_mem_pool);
2220                         return ENXIO;
2221                 }
2222         }
2223         phba->hbq_count = hbq_count;
2224
2225         mempool_free(pmb, phba->mbox_mem_pool);
2226
2227         /* Initially populate or replenish the HBQs */
2228         for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
2229                 if (lpfc_sli_hbqbuf_init_hbqs(phba, hbqno))
2230                         return -ENOMEM;
2231         }
2232         return 0;
2233 }
2234
2235 static int
2236 lpfc_do_config_port(struct lpfc_hba *phba, int sli_mode)
2237 {
2238         LPFC_MBOXQ_t *pmb;
2239         uint32_t resetcount = 0, rc = 0, done = 0;
2240
2241         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2242         if (!pmb) {
2243                 phba->link_state = LPFC_HBA_ERROR;
2244                 return -ENOMEM;
2245         }
2246
2247         phba->sli_rev = sli_mode;
2248         while (resetcount < 2 && !done) {
2249                 spin_lock_irq(&phba->hbalock);
2250                 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
2251                 spin_unlock_irq(&phba->hbalock);
2252                 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
2253                 lpfc_sli_brdrestart(phba);
2254                 msleep(2500);
2255                 rc = lpfc_sli_chipset_init(phba);
2256                 if (rc)
2257                         break;
2258
2259                 spin_lock_irq(&phba->hbalock);
2260                 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2261                 spin_unlock_irq(&phba->hbalock);
2262                 resetcount++;
2263
2264                 /* Call pre CONFIG_PORT mailbox command initialization.  A
2265                  * value of 0 means the call was successful.  Any other
2266                  * nonzero value is a failure, but if ERESTART is returned,
2267                  * the driver may reset the HBA and try again.
2268                  */
2269                 rc = lpfc_config_port_prep(phba);
2270                 if (rc == -ERESTART) {
2271                         phba->link_state = LPFC_LINK_UNKNOWN;
2272                         continue;
2273                 } else if (rc) {
2274                         break;
2275                 }
2276
2277                 phba->link_state = LPFC_INIT_MBX_CMDS;
2278                 lpfc_config_port(phba, pmb);
2279                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
2280                 if (rc != MBX_SUCCESS) {
2281                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2282                                 "%d:0442 Adapter failed to init, mbxCmd x%x "
2283                                 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
2284                                 phba->brd_no, pmb->mb.mbxCommand,
2285                                 pmb->mb.mbxStatus, 0);
2286                         spin_lock_irq(&phba->hbalock);
2287                         phba->sli.sli_flag &= ~LPFC_SLI2_ACTIVE;
2288                         spin_unlock_irq(&phba->hbalock);
2289                         rc = -ENXIO;
2290                 } else {
2291                         done = 1;
2292                         phba->max_vpi = (phba->max_vpi &&
2293                                          pmb->mb.un.varCfgPort.gmv) != 0
2294                                 ? pmb->mb.un.varCfgPort.max_vpi
2295                                 : 0;
2296                 }
2297         }
2298
2299         if (!done) {
2300                 rc = -EINVAL;
2301                 goto do_prep_failed;
2302         }
2303
2304         if ((pmb->mb.un.varCfgPort.sli_mode == 3) &&
2305                 (!pmb->mb.un.varCfgPort.cMA)) {
2306                 rc = -ENXIO;
2307                 goto do_prep_failed;
2308         }
2309         return rc;
2310
2311 do_prep_failed:
2312         mempool_free(pmb, phba->mbox_mem_pool);
2313         return rc;
2314 }
2315
2316 int
2317 lpfc_sli_hba_setup(struct lpfc_hba *phba)
2318 {
2319         uint32_t rc;
2320         int  mode = 3;
2321
2322         switch (lpfc_sli_mode) {
2323         case 2:
2324                 if (phba->cfg_npiv_enable) {
2325                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
2326                                 "%d:1824 NPIV enabled: Override lpfc_sli_mode "
2327                                 "parameter (%d) to auto (0).\n",
2328                                 phba->brd_no, lpfc_sli_mode);
2329                         break;
2330                 }
2331                 mode = 2;
2332                 break;
2333         case 0:
2334         case 3:
2335                 break;
2336         default:
2337                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
2338                                 "%d:1819 Unrecognized lpfc_sli_mode "
2339                                 "parameter: %d.\n",
2340                                 phba->brd_no, lpfc_sli_mode);
2341
2342                 break;
2343         }
2344
2345         rc = lpfc_do_config_port(phba, mode);
2346         if (rc && lpfc_sli_mode == 3)
2347                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
2348                                 "%d:1820 Unable to select SLI-3.  "
2349                                 "Not supported by adapter.\n",
2350                                 phba->brd_no);
2351         if (rc && mode != 2)
2352                 rc = lpfc_do_config_port(phba, 2);
2353         if (rc)
2354                 goto lpfc_sli_hba_setup_error;
2355
2356         if (phba->sli_rev == 3) {
2357                 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
2358                 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
2359                 phba->sli3_options |= LPFC_SLI3_ENABLED;
2360                 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
2361
2362         } else {
2363                 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
2364                 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
2365                 phba->sli3_options = 0;
2366         }
2367
2368         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2369                         "%d:0444 Firmware in SLI %x mode. Max_vpi %d\n",
2370                         phba->brd_no, phba->sli_rev, phba->max_vpi);
2371         rc = lpfc_sli_ring_map(phba);
2372
2373         if (rc)
2374                 goto lpfc_sli_hba_setup_error;
2375
2376                                 /* Init HBQs */
2377
2378         if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
2379                 rc = lpfc_sli_hbq_setup(phba);
2380                 if (rc)
2381                         goto lpfc_sli_hba_setup_error;
2382         }
2383
2384         phba->sli.sli_flag |= LPFC_PROCESS_LA;
2385
2386         rc = lpfc_config_port_post(phba);
2387         if (rc)
2388                 goto lpfc_sli_hba_setup_error;
2389
2390         return rc;
2391
2392 lpfc_sli_hba_setup_error:
2393         phba->link_state = LPFC_HBA_ERROR;
2394         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2395                         "%d:0445 Firmware initialization failed\n",
2396                         phba->brd_no);
2397         return rc;
2398 }
2399
2400 /*! lpfc_mbox_timeout
2401  *
2402  * \pre
2403  * \post
2404  * \param hba Pointer to per struct lpfc_hba structure
2405  * \param l1  Pointer to the driver's mailbox queue.
2406  * \return
2407  *   void
2408  *
2409  * \b Description:
2410  *
2411  * This routine handles mailbox timeout events at timer interrupt context.
2412  */
2413 void
2414 lpfc_mbox_timeout(unsigned long ptr)
2415 {
2416         struct lpfc_hba  *phba = (struct lpfc_hba *) ptr;
2417         unsigned long iflag;
2418         uint32_t tmo_posted;
2419
2420         spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
2421         tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
2422         if (!tmo_posted)
2423                 phba->pport->work_port_events |= WORKER_MBOX_TMO;
2424         spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
2425
2426         if (!tmo_posted) {
2427                 spin_lock_irqsave(&phba->hbalock, iflag);
2428                 if (phba->work_wait)
2429                         lpfc_worker_wake_up(phba);
2430                 spin_unlock_irqrestore(&phba->hbalock, iflag);
2431         }
2432 }
2433
2434 void
2435 lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
2436 {
2437         LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
2438         MAILBOX_t *mb = &pmbox->mb;
2439         struct lpfc_sli *psli = &phba->sli;
2440         struct lpfc_sli_ring *pring;
2441
2442         if (!(phba->pport->work_port_events & WORKER_MBOX_TMO)) {
2443                 return;
2444         }
2445
2446         /* Mbox cmd <mbxCommand> timeout */
2447         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2448                         "%d:0310 Mailbox command x%x timeout Data: x%x x%x "
2449                         "x%p\n",
2450                         phba->brd_no,
2451                         mb->mbxCommand,
2452                         phba->pport->port_state,
2453                         phba->sli.sli_flag,
2454                         phba->sli.mbox_active);
2455
2456         /* Setting state unknown so lpfc_sli_abort_iocb_ring
2457          * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
2458          * it to fail all oustanding SCSI IO.
2459          */
2460         spin_lock_irq(&phba->pport->work_port_lock);
2461         phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
2462         spin_unlock_irq(&phba->pport->work_port_lock);
2463         spin_lock_irq(&phba->hbalock);
2464         phba->link_state = LPFC_LINK_UNKNOWN;
2465         phba->pport->fc_flag |= FC_ESTABLISH_LINK;
2466         psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
2467         spin_unlock_irq(&phba->hbalock);
2468
2469         pring = &psli->ring[psli->fcp_ring];
2470         lpfc_sli_abort_iocb_ring(phba, pring);
2471
2472         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2473                         "%d:0316 Resetting board due to mailbox timeout\n",
2474                         phba->brd_no);
2475         /*
2476          * lpfc_offline calls lpfc_sli_hba_down which will clean up
2477          * on oustanding mailbox commands.
2478          */
2479         lpfc_offline_prep(phba);
2480         lpfc_offline(phba);
2481         lpfc_sli_brdrestart(phba);
2482         if (lpfc_online(phba) == 0)             /* Initialize the HBA */
2483                 mod_timer(&phba->fc_estabtmo, jiffies + HZ * 60);
2484         lpfc_unblock_mgmt_io(phba);
2485         return;
2486 }
2487
2488 int
2489 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
2490 {
2491         MAILBOX_t *mb;
2492         struct lpfc_sli *psli = &phba->sli;
2493         uint32_t status, evtctr;
2494         uint32_t ha_copy;
2495         int i;
2496         unsigned long drvr_flag = 0;
2497         volatile uint32_t word0, ldata;
2498         void __iomem *to_slim;
2499
2500         if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
2501                 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
2502                 if(!pmbox->vport) {
2503                         lpfc_printf_log(phba, KERN_ERR,
2504                                         LOG_MBOX | LOG_VPORT,
2505                                         "%d:1806 Mbox x%x failed. No vport\n",
2506                                         phba->brd_no,
2507                                         pmbox->mb.mbxCommand);
2508                         dump_stack();
2509                         return MBXERR_ERROR;
2510                 }
2511         }
2512
2513
2514         /* If the PCI channel is in offline state, do not post mbox. */
2515         if (unlikely(pci_channel_offline(phba->pcidev)))
2516                 return MBX_NOT_FINISHED;
2517
2518         spin_lock_irqsave(&phba->hbalock, drvr_flag);
2519         psli = &phba->sli;
2520
2521
2522         mb = &pmbox->mb;
2523         status = MBX_SUCCESS;
2524
2525         if (phba->link_state == LPFC_HBA_ERROR) {
2526                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2527
2528                 /* Mbox command <mbxCommand> cannot issue */
2529                 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag)
2530                 return MBX_NOT_FINISHED;
2531         }
2532
2533         if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT &&
2534             !(readl(phba->HCregaddr) & HC_MBINT_ENA)) {
2535                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2536                 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag)
2537                 return MBX_NOT_FINISHED;
2538         }
2539
2540         if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
2541                 /* Polling for a mbox command when another one is already active
2542                  * is not allowed in SLI. Also, the driver must have established
2543                  * SLI2 mode to queue and process multiple mbox commands.
2544                  */
2545
2546                 if (flag & MBX_POLL) {
2547                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2548
2549                         /* Mbox command <mbxCommand> cannot issue */
2550                         LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
2551                         return MBX_NOT_FINISHED;
2552                 }
2553
2554                 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) {
2555                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2556                         /* Mbox command <mbxCommand> cannot issue */
2557                         LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
2558                         return MBX_NOT_FINISHED;
2559                 }
2560
2561                 /* Handle STOP IOCB processing flag. This is only meaningful
2562                  * if we are not polling for mbox completion.
2563                  */
2564                 if (flag & MBX_STOP_IOCB) {
2565                         flag &= ~MBX_STOP_IOCB;
2566                         /* Now flag each ring */
2567                         for (i = 0; i < psli->num_rings; i++) {
2568                                 /* If the ring is active, flag it */
2569                                 if (psli->ring[i].cmdringaddr) {
2570                                         psli->ring[i].flag |=
2571                                             LPFC_STOP_IOCB_MBX;
2572                                 }
2573                         }
2574                 }
2575
2576                 /* Another mailbox command is still being processed, queue this
2577                  * command to be processed later.
2578                  */
2579                 lpfc_mbox_put(phba, pmbox);
2580
2581                 /* Mbox cmd issue - BUSY */
2582                 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2583                                 "%d (%d):0308 Mbox cmd issue - BUSY Data: "
2584                                 "x%x x%x x%x x%x\n",
2585                                 phba->brd_no,
2586                                 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
2587                                 mb->mbxCommand, phba->pport->port_state,
2588                                 psli->sli_flag, flag);
2589
2590                 psli->slistat.mbox_busy++;
2591                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2592
2593                 if (pmbox->vport) {
2594                         lpfc_debugfs_disc_trc(pmbox->vport,
2595                                 LPFC_DISC_TRC_MBOX_VPORT,
2596                                 "MBOX Bsy vport:  cmd:x%x mb:x%x x%x",
2597                                 (uint32_t)mb->mbxCommand,
2598                                 mb->un.varWords[0], mb->un.varWords[1]);
2599                 }
2600                 else {
2601                         lpfc_debugfs_disc_trc(phba->pport,
2602                                 LPFC_DISC_TRC_MBOX,
2603                                 "MBOX Bsy:        cmd:x%x mb:x%x x%x",
2604                                 (uint32_t)mb->mbxCommand,
2605                                 mb->un.varWords[0], mb->un.varWords[1]);
2606                 }
2607
2608                 return MBX_BUSY;
2609         }
2610
2611         /* Handle STOP IOCB processing flag. This is only meaningful
2612          * if we are not polling for mbox completion.
2613          */
2614         if (flag & MBX_STOP_IOCB) {
2615                 flag &= ~MBX_STOP_IOCB;
2616                 if (flag == MBX_NOWAIT) {
2617                         /* Now flag each ring */
2618                         for (i = 0; i < psli->num_rings; i++) {
2619                                 /* If the ring is active, flag it */
2620                                 if (psli->ring[i].cmdringaddr) {
2621                                         psli->ring[i].flag |=
2622                                             LPFC_STOP_IOCB_MBX;
2623                                 }
2624                         }
2625                 }
2626         }
2627
2628         psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
2629
2630         /* If we are not polling, we MUST be in SLI2 mode */
2631         if (flag != MBX_POLL) {
2632                 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE) &&
2633                     (mb->mbxCommand != MBX_KILL_BOARD)) {
2634                         psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2635                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2636                         /* Mbox command <mbxCommand> cannot issue */
2637                         LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
2638                         return MBX_NOT_FINISHED;
2639                 }
2640                 /* timeout active mbox command */
2641                 mod_timer(&psli->mbox_tmo, (jiffies +
2642                                (HZ * lpfc_mbox_tmo_val(phba, mb->mbxCommand))));
2643         }
2644
2645         /* Mailbox cmd <cmd> issue */
2646         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2647                         "%d (%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
2648                         "x%x\n",
2649                         phba->brd_no, pmbox->vport ? pmbox->vport->vpi : 0,
2650                         mb->mbxCommand, phba->pport->port_state,
2651                         psli->sli_flag, flag);
2652
2653         if (mb->mbxCommand != MBX_HEARTBEAT) {
2654                 if (pmbox->vport) {
2655                         lpfc_debugfs_disc_trc(pmbox->vport,
2656                                 LPFC_DISC_TRC_MBOX_VPORT,
2657                                 "MBOX Send vport: cmd:x%x mb:x%x x%x",
2658                                 (uint32_t)mb->mbxCommand,
2659                                 mb->un.varWords[0], mb->un.varWords[1]);
2660                 }
2661                 else {
2662                         lpfc_debugfs_disc_trc(phba->pport,
2663                                 LPFC_DISC_TRC_MBOX,
2664                                 "MBOX Send:       cmd:x%x mb:x%x x%x",
2665                                 (uint32_t)mb->mbxCommand,
2666                                 mb->un.varWords[0], mb->un.varWords[1]);
2667                 }
2668         }
2669
2670         psli->slistat.mbox_cmd++;
2671         evtctr = psli->slistat.mbox_event;
2672
2673         /* next set own bit for the adapter and copy over command word */
2674         mb->mbxOwner = OWN_CHIP;
2675
2676         if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
2677                 /* First copy command data to host SLIM area */
2678                 lpfc_sli_pcimem_bcopy(mb, &phba->slim2p->mbx, MAILBOX_CMD_SIZE);
2679         } else {
2680                 if (mb->mbxCommand == MBX_CONFIG_PORT) {
2681                         /* copy command data into host mbox for cmpl */
2682                         lpfc_sli_pcimem_bcopy(mb, &phba->slim2p->mbx,
2683                                               MAILBOX_CMD_SIZE);
2684                 }
2685
2686                 /* First copy mbox command data to HBA SLIM, skip past first
2687                    word */
2688                 to_slim = phba->MBslimaddr + sizeof (uint32_t);
2689                 lpfc_memcpy_to_slim(to_slim, &mb->un.varWords[0],
2690                             MAILBOX_CMD_SIZE - sizeof (uint32_t));
2691
2692                 /* Next copy over first word, with mbxOwner set */
2693                 ldata = *((volatile uint32_t *)mb);
2694                 to_slim = phba->MBslimaddr;
2695                 writel(ldata, to_slim);
2696                 readl(to_slim); /* flush */
2697
2698                 if (mb->mbxCommand == MBX_CONFIG_PORT) {
2699                         /* switch over to host mailbox */
2700                         psli->sli_flag |= LPFC_SLI2_ACTIVE;
2701                 }
2702         }
2703
2704         wmb();
2705         /* interrupt board to doit right away */
2706         writel(CA_MBATT, phba->CAregaddr);
2707         readl(phba->CAregaddr); /* flush */
2708
2709         switch (flag) {
2710         case MBX_NOWAIT:
2711                 /* Don't wait for it to finish, just return */
2712                 psli->mbox_active = pmbox;
2713                 break;
2714
2715         case MBX_POLL:
2716                 psli->mbox_active = NULL;
2717                 if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
2718                         /* First read mbox status word */
2719                         word0 = *((volatile uint32_t *)&phba->slim2p->mbx);
2720                         word0 = le32_to_cpu(word0);
2721                 } else {
2722                         /* First read mbox status word */
2723                         word0 = readl(phba->MBslimaddr);
2724                 }
2725
2726                 /* Read the HBA Host Attention Register */
2727                 ha_copy = readl(phba->HAregaddr);
2728
2729                 i = lpfc_mbox_tmo_val(phba, mb->mbxCommand);
2730                 i *= 1000; /* Convert to ms */
2731
2732                 /* Wait for command to complete */
2733                 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
2734                        (!(ha_copy & HA_MBATT) &&
2735                         (phba->link_state > LPFC_WARM_START))) {
2736                         if (i-- <= 0) {
2737                                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2738                                 spin_unlock_irqrestore(&phba->hbalock,
2739                                                        drvr_flag);
2740                                 return MBX_NOT_FINISHED;
2741                         }
2742
2743                         /* Check if we took a mbox interrupt while we were
2744                            polling */
2745                         if (((word0 & OWN_CHIP) != OWN_CHIP)
2746                             && (evtctr != psli->slistat.mbox_event))
2747                                 break;
2748
2749                         spin_unlock_irqrestore(&phba->hbalock,
2750                                                drvr_flag);
2751
2752                         msleep(1);
2753
2754                         spin_lock_irqsave(&phba->hbalock, drvr_flag);
2755
2756                         if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
2757                                 /* First copy command data */
2758                                 word0 = *((volatile uint32_t *)
2759                                                 &phba->slim2p->mbx);
2760                                 word0 = le32_to_cpu(word0);
2761                                 if (mb->mbxCommand == MBX_CONFIG_PORT) {
2762                                         MAILBOX_t *slimmb;
2763                                         volatile uint32_t slimword0;
2764                                         /* Check real SLIM for any errors */
2765                                         slimword0 = readl(phba->MBslimaddr);
2766                                         slimmb = (MAILBOX_t *) & slimword0;
2767                                         if (((slimword0 & OWN_CHIP) != OWN_CHIP)
2768                                             && slimmb->mbxStatus) {
2769                                                 psli->sli_flag &=
2770                                                     ~LPFC_SLI2_ACTIVE;
2771                                                 word0 = slimword0;
2772                                         }
2773                                 }
2774                         } else {
2775                                 /* First copy command data */
2776                                 word0 = readl(phba->MBslimaddr);
2777                         }
2778                         /* Read the HBA Host Attention Register */
2779                         ha_copy = readl(phba->HAregaddr);
2780                 }
2781
2782                 if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
2783                         /* copy results back to user */
2784                         lpfc_sli_pcimem_bcopy(&phba->slim2p->mbx, mb,
2785                                               MAILBOX_CMD_SIZE);
2786                 } else {
2787                         /* First copy command data */
2788                         lpfc_memcpy_from_slim(mb, phba->MBslimaddr,
2789                                                         MAILBOX_CMD_SIZE);
2790                         if ((mb->mbxCommand == MBX_DUMP_MEMORY) &&
2791                                 pmbox->context2) {
2792                                 lpfc_memcpy_from_slim((void *)pmbox->context2,
2793                                       phba->MBslimaddr + DMP_RSP_OFFSET,
2794                                                       mb->un.varDmp.word_cnt);
2795                         }
2796                 }
2797
2798                 writel(HA_MBATT, phba->HAregaddr);
2799                 readl(phba->HAregaddr); /* flush */
2800
2801                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2802                 status = mb->mbxStatus;
2803         }
2804
2805         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2806         return status;
2807 }
2808
2809 /*
2810  * Caller needs to hold lock.
2811  */
2812 static void
2813 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2814                     struct lpfc_iocbq *piocb)
2815 {
2816         /* Insert the caller's iocb in the txq tail for later processing. */
2817         list_add_tail(&piocb->list, &pring->txq);
2818         pring->txq_cnt++;
2819 }
2820
2821 static struct lpfc_iocbq *
2822 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2823                    struct lpfc_iocbq **piocb)
2824 {
2825         struct lpfc_iocbq * nextiocb;
2826
2827         nextiocb = lpfc_sli_ringtx_get(phba, pring);
2828         if (!nextiocb) {
2829                 nextiocb = *piocb;
2830                 *piocb = NULL;
2831         }
2832
2833         return nextiocb;
2834 }
2835
2836 /*
2837  * Lockless version of lpfc_sli_issue_iocb.
2838  */
2839 int
2840 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2841                     struct lpfc_iocbq *piocb, uint32_t flag)
2842 {
2843         struct lpfc_iocbq *nextiocb;
2844         IOCB_t *iocb;
2845
2846         if (piocb->iocb_cmpl && (!piocb->vport) &&
2847            (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
2848            (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
2849                 lpfc_printf_log(phba, KERN_ERR,
2850                                 LOG_SLI | LOG_VPORT,
2851                                 "%d:1807 IOCB x%x failed. No vport\n",
2852                                 phba->brd_no,
2853                                 piocb->iocb.ulpCommand);
2854                 dump_stack();
2855                 return IOCB_ERROR;
2856         }
2857
2858
2859         /* If the PCI channel is in offline state, do not post iocbs. */
2860         if (unlikely(pci_channel_offline(phba->pcidev)))
2861                 return IOCB_ERROR;
2862
2863         /*
2864          * We should never get an IOCB if we are in a < LINK_DOWN state
2865          */
2866         if (unlikely(phba->link_state < LPFC_LINK_DOWN))
2867                 return IOCB_ERROR;
2868
2869         /*
2870          * Check to see if we are blocking IOCB processing because of a
2871          * outstanding mbox command.
2872          */
2873         if (unlikely(pring->flag & LPFC_STOP_IOCB_MBX))
2874                 goto iocb_busy;
2875
2876         if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
2877                 /*
2878                  * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
2879                  * can be issued if the link is not up.
2880                  */
2881                 switch (piocb->iocb.ulpCommand) {
2882                 case CMD_QUE_RING_BUF_CN:
2883                 case CMD_QUE_RING_BUF64_CN:
2884                         /*
2885                          * For IOCBs, like QUE_RING_BUF, that have no rsp ring
2886                          * completion, iocb_cmpl MUST be 0.
2887                          */
2888                         if (piocb->iocb_cmpl)
2889                                 piocb->iocb_cmpl = NULL;
2890                         /*FALLTHROUGH*/
2891                 case CMD_CREATE_XRI_CR:
2892                 case CMD_CLOSE_XRI_CN:
2893                 case CMD_CLOSE_XRI_CX:
2894                         break;
2895                 default:
2896                         goto iocb_busy;
2897                 }
2898
2899         /*
2900          * For FCP commands, we must be in a state where we can process link
2901          * attention events.
2902          */
2903         } else if (unlikely(pring->ringno == phba->sli.fcp_ring &&
2904                             !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
2905                 goto iocb_busy;
2906         }
2907
2908         while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
2909                (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
2910                 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
2911
2912         if (iocb)
2913                 lpfc_sli_update_ring(phba, pring);
2914         else
2915                 lpfc_sli_update_full_ring(phba, pring);
2916
2917         if (!piocb)
2918                 return IOCB_SUCCESS;
2919
2920         goto out_busy;
2921
2922  iocb_busy:
2923         pring->stats.iocb_cmd_delay++;
2924
2925  out_busy:
2926
2927         if (!(flag & SLI_IOCB_RET_IOCB)) {
2928                 __lpfc_sli_ringtx_put(phba, pring, piocb);
2929                 return IOCB_SUCCESS;
2930         }
2931
2932         return IOCB_BUSY;
2933 }
2934
2935
2936 int
2937 lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2938                     struct lpfc_iocbq *piocb, uint32_t flag)
2939 {
2940         unsigned long iflags;
2941         int rc;
2942
2943         spin_lock_irqsave(&phba->hbalock, iflags);
2944         rc = __lpfc_sli_issue_iocb(phba, pring, piocb, flag);
2945         spin_unlock_irqrestore(&phba->hbalock, iflags);
2946
2947         return rc;
2948 }
2949
2950 static int
2951 lpfc_extra_ring_setup( struct lpfc_hba *phba)
2952 {
2953         struct lpfc_sli *psli;
2954         struct lpfc_sli_ring *pring;
2955
2956         psli = &phba->sli;
2957
2958         /* Adjust cmd/rsp ring iocb entries more evenly */
2959
2960         /* Take some away from the FCP ring */
2961         pring = &psli->ring[psli->fcp_ring];
2962         pring->numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
2963         pring->numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
2964         pring->numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
2965         pring->numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
2966
2967         /* and give them to the extra ring */
2968         pring = &psli->ring[psli->extra_ring];
2969
2970         pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
2971         pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
2972         pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
2973         pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
2974
2975         /* Setup default profile for this ring */
2976         pring->iotag_max = 4096;
2977         pring->num_mask = 1;
2978         pring->prt[0].profile = 0;      /* Mask 0 */
2979         pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
2980         pring->prt[0].type = phba->cfg_multi_ring_type;
2981         pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
2982         return 0;
2983 }
2984
2985 int
2986 lpfc_sli_setup(struct lpfc_hba *phba)
2987 {
2988         int i, totiocbsize = 0;
2989         struct lpfc_sli *psli = &phba->sli;
2990         struct lpfc_sli_ring *pring;
2991
2992         psli->num_rings = MAX_CONFIGURED_RINGS;
2993         psli->sli_flag = 0;
2994         psli->fcp_ring = LPFC_FCP_RING;
2995         psli->next_ring = LPFC_FCP_NEXT_RING;
2996         psli->extra_ring = LPFC_EXTRA_RING;
2997
2998         psli->iocbq_lookup = NULL;
2999         psli->iocbq_lookup_len = 0;
3000         psli->last_iotag = 0;
3001
3002         for (i = 0; i < psli->num_rings; i++) {
3003                 pring = &psli->ring[i];
3004                 switch (i) {
3005                 case LPFC_FCP_RING:     /* ring 0 - FCP */
3006                         /* numCiocb and numRiocb are used in config_port */
3007                         pring->numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
3008                         pring->numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
3009                         pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
3010                         pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
3011                         pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
3012                         pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
3013                         pring->sizeCiocb = (phba->sli_rev == 3) ?
3014                                                         SLI3_IOCB_CMD_SIZE :
3015                                                         SLI2_IOCB_CMD_SIZE;
3016                         pring->sizeRiocb = (phba->sli_rev == 3) ?
3017                                                         SLI3_IOCB_RSP_SIZE :
3018                                                         SLI2_IOCB_RSP_SIZE;
3019                         pring->iotag_ctr = 0;
3020                         pring->iotag_max =
3021                             (phba->cfg_hba_queue_depth * 2);
3022                         pring->fast_iotag = pring->iotag_max;
3023                         pring->num_mask = 0;
3024                         break;
3025                 case LPFC_EXTRA_RING:   /* ring 1 - EXTRA */
3026                         /* numCiocb and numRiocb are used in config_port */
3027                         pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
3028                         pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
3029                         pring->sizeCiocb = (phba->sli_rev == 3) ?
3030                                                         SLI3_IOCB_CMD_SIZE :
3031                                                         SLI2_IOCB_CMD_SIZE;
3032                         pring->sizeRiocb = (phba->sli_rev == 3) ?
3033                                                         SLI3_IOCB_RSP_SIZE :
3034                                                         SLI2_IOCB_RSP_SIZE;
3035                         pring->iotag_max = phba->cfg_hba_queue_depth;
3036                         pring->num_mask = 0;
3037                         break;
3038                 case LPFC_ELS_RING:     /* ring 2 - ELS / CT */
3039                         /* numCiocb and numRiocb are used in config_port */
3040                         pring->numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
3041                         pring->numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
3042                         pring->sizeCiocb = (phba->sli_rev == 3) ?
3043                                                         SLI3_IOCB_CMD_SIZE :
3044                                                         SLI2_IOCB_CMD_SIZE;
3045                         pring->sizeRiocb = (phba->sli_rev == 3) ?
3046                                                         SLI3_IOCB_RSP_SIZE :
3047                                                         SLI2_IOCB_RSP_SIZE;
3048                         pring->fast_iotag = 0;
3049                         pring->iotag_ctr = 0;
3050                         pring->iotag_max = 4096;
3051                         pring->num_mask = 4;
3052                         pring->prt[0].profile = 0;      /* Mask 0 */
3053                         pring->prt[0].rctl = FC_ELS_REQ;
3054                         pring->prt[0].type = FC_ELS_DATA;
3055                         pring->prt[0].lpfc_sli_rcv_unsol_event =
3056                             lpfc_els_unsol_event;
3057                         pring->prt[1].profile = 0;      /* Mask 1 */
3058                         pring->prt[1].rctl = FC_ELS_RSP;
3059                         pring->prt[1].type = FC_ELS_DATA;
3060                         pring->prt[1].lpfc_sli_rcv_unsol_event =
3061                             lpfc_els_unsol_event;
3062                         pring->prt[2].profile = 0;      /* Mask 2 */
3063                         /* NameServer Inquiry */
3064                         pring->prt[2].rctl = FC_UNSOL_CTL;
3065                         /* NameServer */
3066                         pring->prt[2].type = FC_COMMON_TRANSPORT_ULP;
3067                         pring->prt[2].lpfc_sli_rcv_unsol_event =
3068                             lpfc_ct_unsol_event;
3069                         pring->prt[3].profile = 0;      /* Mask 3 */
3070                         /* NameServer response */
3071                         pring->prt[3].rctl = FC_SOL_CTL;
3072                         /* NameServer */
3073                         pring->prt[3].type = FC_COMMON_TRANSPORT_ULP;
3074                         pring->prt[3].lpfc_sli_rcv_unsol_event =
3075                             lpfc_ct_unsol_event;
3076                         break;
3077                 }
3078                 totiocbsize += (pring->numCiocb * pring->sizeCiocb) +
3079                                 (pring->numRiocb * pring->sizeRiocb);
3080         }
3081         if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
3082                 /* Too many cmd / rsp ring entries in SLI2 SLIM */
3083                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3084                                 "%d:0462 Too many cmd / rsp ring entries in "
3085                                 "SLI2 SLIM Data: x%x x%lx\n",
3086                                 phba->brd_no, totiocbsize,
3087                                 (unsigned long) MAX_SLIM_IOCB_SIZE);
3088         }
3089         if (phba->cfg_multi_ring_support == 2)
3090                 lpfc_extra_ring_setup(phba);
3091
3092         return 0;
3093 }
3094
3095 int
3096 lpfc_sli_queue_setup(struct lpfc_hba *phba)
3097 {
3098         struct lpfc_sli *psli;
3099         struct lpfc_sli_ring *pring;
3100         int i;
3101
3102         psli = &phba->sli;
3103         spin_lock_irq(&phba->hbalock);
3104         INIT_LIST_HEAD(&psli->mboxq);
3105         INIT_LIST_HEAD(&psli->mboxq_cmpl);
3106         /* Initialize list headers for txq and txcmplq as double linked lists */
3107         for (i = 0; i < psli->num_rings; i++) {
3108                 pring = &psli->ring[i];
3109                 pring->ringno = i;
3110                 pring->next_cmdidx  = 0;
3111                 pring->local_getidx = 0;
3112                 pring->cmdidx = 0;
3113                 INIT_LIST_HEAD(&pring->txq);
3114                 INIT_LIST_HEAD(&pring->txcmplq);
3115                 INIT_LIST_HEAD(&pring->iocb_continueq);
3116                 INIT_LIST_HEAD(&pring->postbufq);
3117         }
3118         spin_unlock_irq(&phba->hbalock);
3119         return 1;
3120 }
3121
3122 int
3123 lpfc_sli_host_down(struct lpfc_vport *vport)
3124 {
3125         LIST_HEAD(completions);
3126         struct lpfc_hba *phba = vport->phba;
3127         struct lpfc_sli *psli = &phba->sli;
3128         struct lpfc_sli_ring *pring;
3129         struct lpfc_iocbq *iocb, *next_iocb;
3130         int i;
3131         unsigned long flags = 0;
3132         uint16_t prev_pring_flag;
3133
3134         lpfc_cleanup_discovery_resources(vport);
3135
3136         spin_lock_irqsave(&phba->hbalock, flags);
3137         for (i = 0; i < psli->num_rings; i++) {
3138                 pring = &psli->ring[i];
3139                 prev_pring_flag = pring->flag;
3140                 if (pring->ringno == LPFC_ELS_RING) /* Only slow rings */
3141                         pring->flag |= LPFC_DEFERRED_RING_EVENT;
3142                 /*
3143                  * Error everything on the txq since these iocbs have not been
3144                  * given to the FW yet.
3145                  */
3146                 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
3147                         if (iocb->vport != vport)
3148                                 continue;
3149                         list_move_tail(&iocb->list, &completions);
3150                         pring->txq_cnt--;
3151                 }
3152
3153                 /* Next issue ABTS for everything on the txcmplq */
3154                 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq,
3155                                                                         list) {
3156                         if (iocb->vport != vport)
3157                                 continue;
3158                         lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3159                 }
3160
3161                 pring->flag = prev_pring_flag;
3162         }
3163
3164         spin_unlock_irqrestore(&phba->hbalock, flags);
3165
3166         while (!list_empty(&completions)) {
3167                 list_remove_head(&completions, iocb, struct lpfc_iocbq, list);
3168
3169                 if (!iocb->iocb_cmpl)
3170                         lpfc_sli_release_iocbq(phba, iocb);
3171                 else {
3172                         iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
3173                         iocb->iocb.un.ulpWord[4] = IOERR_SLI_DOWN;
3174                         (iocb->iocb_cmpl) (phba, iocb, iocb);
3175                 }
3176         }
3177         return 1;
3178 }
3179
3180 int
3181 lpfc_sli_hba_down(struct lpfc_hba *phba)
3182 {
3183         LIST_HEAD(completions);
3184         struct lpfc_sli *psli = &phba->sli;
3185         struct lpfc_sli_ring *pring;
3186         LPFC_MBOXQ_t *pmb;
3187         struct lpfc_iocbq *iocb;
3188         IOCB_t *cmd = NULL;
3189         int i;
3190         unsigned long flags = 0;
3191
3192         lpfc_hba_down_prep(phba);
3193
3194         lpfc_fabric_abort_hba(phba);
3195
3196         spin_lock_irqsave(&phba->hbalock, flags);
3197         for (i = 0; i < psli->num_rings; i++) {
3198                 pring = &psli->ring[i];
3199                 if (pring->ringno == LPFC_ELS_RING) /* Only slow rings */
3200                         pring->flag |= LPFC_DEFERRED_RING_EVENT;
3201
3202                 /*
3203                  * Error everything on the txq since these iocbs have not been
3204                  * given to the FW yet.
3205                  */
3206                 list_splice_init(&pring->txq, &completions);
3207                 pring->txq_cnt = 0;
3208
3209         }
3210         spin_unlock_irqrestore(&phba->hbalock, flags);
3211
3212         while (!list_empty(&completions)) {
3213                 list_remove_head(&completions, iocb, struct lpfc_iocbq, list);
3214                 cmd = &iocb->iocb;
3215
3216                 if (!iocb->iocb_cmpl)
3217                         lpfc_sli_release_iocbq(phba, iocb);
3218                 else {
3219                         cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
3220                         cmd->un.ulpWord[4] = IOERR_SLI_DOWN;
3221                         (iocb->iocb_cmpl) (phba, iocb, iocb);
3222                 }
3223         }
3224
3225         /* Return any active mbox cmds */
3226         del_timer_sync(&psli->mbox_tmo);
3227         spin_lock_irqsave(&phba->hbalock, flags);
3228
3229         spin_lock(&phba->pport->work_port_lock);
3230         phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
3231         spin_unlock(&phba->pport->work_port_lock);
3232
3233         if (psli->mbox_active) {
3234                 list_add_tail(&psli->mbox_active->list, &completions);
3235                 psli->mbox_active = NULL;
3236                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
3237         }
3238
3239         /* Return any pending or completed mbox cmds */
3240         list_splice_init(&phba->sli.mboxq, &completions);
3241         list_splice_init(&phba->sli.mboxq_cmpl, &completions);
3242         INIT_LIST_HEAD(&psli->mboxq);
3243         INIT_LIST_HEAD(&psli->mboxq_cmpl);
3244
3245         spin_unlock_irqrestore(&phba->hbalock, flags);
3246
3247         while (!list_empty(&completions)) {
3248                 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
3249                 pmb->mb.mbxStatus = MBX_NOT_FINISHED;
3250                 if (pmb->mbox_cmpl) {
3251                         pmb->mbox_cmpl(phba,pmb);
3252                 }
3253         }
3254         return 1;
3255 }
3256
3257 void
3258 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
3259 {
3260         uint32_t *src = srcp;
3261         uint32_t *dest = destp;
3262         uint32_t ldata;
3263         int i;
3264
3265         for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
3266                 ldata = *src;
3267                 ldata = le32_to_cpu(ldata);
3268                 *dest = ldata;
3269                 src++;
3270                 dest++;
3271         }
3272 }
3273
3274 int
3275 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3276                          struct lpfc_dmabuf *mp)
3277 {
3278         /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
3279            later */
3280         spin_lock_irq(&phba->hbalock);
3281         list_add_tail(&mp->list, &pring->postbufq);
3282         pring->postbufq_cnt++;
3283         spin_unlock_irq(&phba->hbalock);
3284         return 0;
3285 }
3286
3287
3288 struct lpfc_dmabuf *
3289 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3290                          dma_addr_t phys)
3291 {
3292         struct lpfc_dmabuf *mp, *next_mp;
3293         struct list_head *slp = &pring->postbufq;
3294
3295         /* Search postbufq, from the begining, looking for a match on phys */
3296         spin_lock_irq(&phba->hbalock);
3297         list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
3298                 if (mp->phys == phys) {
3299                         list_del_init(&mp->list);
3300                         pring->postbufq_cnt--;
3301                         spin_unlock_irq(&phba->hbalock);
3302                         return mp;
3303                 }
3304         }
3305
3306         spin_unlock_irq(&phba->hbalock);
3307         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3308                         "%d:0410 Cannot find virtual addr for mapped buf on "
3309                         "ring %d Data x%llx x%p x%p x%x\n",
3310                         phba->brd_no, pring->ringno, (unsigned long long)phys,
3311                         slp->next, slp->prev, pring->postbufq_cnt);
3312         return NULL;
3313 }
3314
3315 static void
3316 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3317                         struct lpfc_iocbq *rspiocb)
3318 {
3319         IOCB_t *irsp = &rspiocb->iocb;
3320         uint16_t abort_iotag, abort_context;
3321         struct lpfc_iocbq *abort_iocb;
3322         struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
3323
3324         abort_iocb = NULL;
3325
3326         if (irsp->ulpStatus) {
3327                 abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
3328                 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
3329
3330                 spin_lock_irq(&phba->hbalock);
3331                 if (abort_iotag != 0 && abort_iotag <= phba->sli.last_iotag)
3332                         abort_iocb = phba->sli.iocbq_lookup[abort_iotag];
3333
3334                 lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_SLI,
3335                                 "%d:0327 Cannot abort els iocb %p "
3336                                 "with tag %x context %x, abort status %x, "
3337                                 "abort code %x\n",
3338                                 phba->brd_no, abort_iocb, abort_iotag,
3339                                 abort_context, irsp->ulpStatus,
3340                                 irsp->un.ulpWord[4]);
3341
3342                 /*
3343                  * make sure we have the right iocbq before taking it
3344                  * off the txcmplq and try to call completion routine.
3345                  */
3346                 if (!abort_iocb ||
3347                     abort_iocb->iocb.ulpContext != abort_context ||
3348                     (abort_iocb->iocb_flag & LPFC_DRIVER_ABORTED) == 0)
3349                         spin_unlock_irq(&phba->hbalock);
3350                 else {
3351                         list_del_init(&abort_iocb->list);
3352                         pring->txcmplq_cnt--;
3353                         spin_unlock_irq(&phba->hbalock);
3354
3355                         abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
3356                         abort_iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
3357                         abort_iocb->iocb.un.ulpWord[4] = IOERR_SLI_ABORTED;
3358                         (abort_iocb->iocb_cmpl)(phba, abort_iocb, abort_iocb);
3359                 }
3360         }
3361
3362         lpfc_sli_release_iocbq(phba, cmdiocb);
3363         return;
3364 }
3365
3366 static void
3367 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3368                      struct lpfc_iocbq *rspiocb)
3369 {
3370         IOCB_t *irsp = &rspiocb->iocb;
3371
3372         /* ELS cmd tag <ulpIoTag> completes */
3373         lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
3374                         "%d (X):0133 Ignoring ELS cmd tag x%x completion Data: "
3375                         "x%x x%x x%x\n",
3376                         phba->brd_no, irsp->ulpIoTag, irsp->ulpStatus,
3377                         irsp->un.ulpWord[4], irsp->ulpTimeout);
3378         if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
3379                 lpfc_ct_free_iocb(phba, cmdiocb);
3380         else
3381                 lpfc_els_free_iocb(phba, cmdiocb);
3382         return;
3383 }
3384
3385 int
3386 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3387                            struct lpfc_iocbq *cmdiocb)
3388 {
3389         struct lpfc_vport *vport = cmdiocb->vport;
3390         struct lpfc_iocbq *abtsiocbp;
3391         IOCB_t *icmd = NULL;
3392         IOCB_t *iabt = NULL;
3393         int retval = IOCB_ERROR;
3394
3395         /*
3396          * There are certain command types we don't want to abort.  And we
3397          * don't want to abort commands that are already in the process of
3398          * being aborted.
3399          */
3400         icmd = &cmdiocb->iocb;
3401         if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
3402             icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
3403             (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
3404                 return 0;
3405
3406         /* If we're unloading, don't abort iocb on the ELS ring, but change the
3407          * callback so that nothing happens when it finishes.
3408          */
3409         if ((vport->load_flag & FC_UNLOADING) &&
3410             (pring->ringno == LPFC_ELS_RING)) {
3411                 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
3412                         cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
3413                 else
3414                         cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
3415                 goto abort_iotag_exit;
3416         }
3417
3418         /* issue ABTS for this IOCB based on iotag */
3419         abtsiocbp = __lpfc_sli_get_iocbq(phba);
3420         if (abtsiocbp == NULL)
3421                 return 0;
3422
3423         /* This signals the response to set the correct status
3424          * before calling the completion handler.
3425          */
3426         cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
3427
3428         iabt = &abtsiocbp->iocb;
3429         iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
3430         iabt->un.acxri.abortContextTag = icmd->ulpContext;
3431         iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
3432         iabt->ulpLe = 1;
3433         iabt->ulpClass = icmd->ulpClass;
3434
3435         if (phba->link_state >= LPFC_LINK_UP)
3436                 iabt->ulpCommand = CMD_ABORT_XRI_CN;
3437         else
3438                 iabt->ulpCommand = CMD_CLOSE_XRI_CN;
3439
3440         abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
3441
3442         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3443                         "%d (%d):0339 Abort xri x%x, original iotag x%x, "
3444                         "abort cmd iotag x%x\n",
3445                         phba->brd_no, vport->vpi,
3446                         iabt->un.acxri.abortContextTag,
3447                         iabt->un.acxri.abortIoTag, abtsiocbp->iotag);
3448         retval = __lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0);
3449
3450 abort_iotag_exit:
3451         /*
3452          * Caller to this routine should check for IOCB_ERROR
3453          * and handle it properly.  This routine no longer removes
3454          * iocb off txcmplq and call compl in case of IOCB_ERROR.
3455          */
3456         return retval;
3457 }
3458
3459 static int
3460 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, uint16_t tgt_id,
3461                            uint64_t lun_id, uint32_t ctx,
3462                            lpfc_ctx_cmd ctx_cmd)
3463 {
3464         struct lpfc_scsi_buf *lpfc_cmd;
3465         struct scsi_cmnd *cmnd;
3466         int rc = 1;
3467
3468         if (!(iocbq->iocb_flag &  LPFC_IO_FCP))
3469                 return rc;
3470
3471         lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
3472         cmnd = lpfc_cmd->pCmd;
3473
3474         if (cmnd == NULL)
3475                 return rc;
3476
3477         switch (ctx_cmd) {
3478         case LPFC_CTX_LUN:
3479                 if ((cmnd->device->id == tgt_id) &&
3480                     (cmnd->device->lun == lun_id))
3481                         rc = 0;
3482                 break;
3483         case LPFC_CTX_TGT:
3484                 if (cmnd->device->id == tgt_id)
3485                         rc = 0;
3486                 break;
3487         case LPFC_CTX_CTX:
3488                 if (iocbq->iocb.ulpContext == ctx)
3489                         rc = 0;
3490                 break;
3491         case LPFC_CTX_HOST:
3492                 rc = 0;
3493                 break;
3494         default:
3495                 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
3496                         __FUNCTION__, ctx_cmd);
3497                 break;
3498         }
3499
3500         return rc;
3501 }
3502
3503 int
3504 lpfc_sli_sum_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3505                   uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd ctx_cmd)
3506 {
3507         struct lpfc_iocbq *iocbq;
3508         int sum, i;
3509
3510         for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
3511                 iocbq = phba->sli.iocbq_lookup[i];
3512
3513                 if (lpfc_sli_validate_fcp_iocb (iocbq, tgt_id, lun_id,
3514                                                 0, ctx_cmd) == 0)
3515                         sum++;
3516         }
3517
3518         return sum;
3519 }
3520
3521 void
3522 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3523                         struct lpfc_iocbq *rspiocb)
3524 {
3525         lpfc_sli_release_iocbq(phba, cmdiocb);
3526         return;
3527 }
3528
3529 int
3530 lpfc_sli_abort_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3531                     uint16_t tgt_id, uint64_t lun_id, uint32_t ctx,
3532                     lpfc_ctx_cmd abort_cmd)
3533 {
3534         struct lpfc_iocbq *iocbq;
3535         struct lpfc_iocbq *abtsiocb;
3536         IOCB_t *cmd = NULL;
3537         int errcnt = 0, ret_val = 0;
3538         int i;
3539
3540         for (i = 1; i <= phba->sli.last_iotag; i++) {
3541                 iocbq = phba->sli.iocbq_lookup[i];
3542
3543                 if (lpfc_sli_validate_fcp_iocb(iocbq, tgt_id, lun_id, 0,
3544                                                abort_cmd) != 0)
3545                         continue;
3546
3547                 /* issue ABTS for this IOCB based on iotag */
3548                 abtsiocb = lpfc_sli_get_iocbq(phba);
3549                 if (abtsiocb == NULL) {
3550                         errcnt++;
3551                         continue;
3552                 }
3553
3554                 cmd = &iocbq->iocb;
3555                 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
3556                 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
3557                 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
3558                 abtsiocb->iocb.ulpLe = 1;
3559                 abtsiocb->iocb.ulpClass = cmd->ulpClass;
3560                 abtsiocb->vport = phba->pport;
3561
3562                 if (lpfc_is_link_up(phba))
3563                         abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
3564                 else
3565                         abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
3566
3567                 /* Setup callback routine and issue the command. */
3568                 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
3569                 ret_val = lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0);
3570                 if (ret_val == IOCB_ERROR) {
3571                         lpfc_sli_release_iocbq(phba, abtsiocb);
3572                         errcnt++;
3573                         continue;
3574                 }
3575         }
3576
3577         return errcnt;
3578 }
3579
3580 static void
3581 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
3582                         struct lpfc_iocbq *cmdiocbq,
3583                         struct lpfc_iocbq *rspiocbq)
3584 {
3585         wait_queue_head_t *pdone_q;
3586         unsigned long iflags;
3587
3588         spin_lock_irqsave(&phba->hbalock, iflags);
3589         cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
3590         if (cmdiocbq->context2 && rspiocbq)
3591                 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
3592                        &rspiocbq->iocb, sizeof(IOCB_t));
3593
3594         pdone_q = cmdiocbq->context_un.wait_queue;
3595         if (pdone_q)
3596                 wake_up(pdone_q);
3597         spin_unlock_irqrestore(&phba->hbalock, iflags);
3598         return;
3599 }
3600
3601 /*
3602  * Issue the caller's iocb and wait for its completion, but no longer than the
3603  * caller's timeout.  Note that iocb_flags is cleared before the
3604  * lpfc_sli_issue_call since the wake routine sets a unique value and by
3605  * definition this is a wait function.
3606  */
3607
3608 int
3609 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
3610                          struct lpfc_sli_ring *pring,
3611                          struct lpfc_iocbq *piocb,
3612                          struct lpfc_iocbq *prspiocbq,
3613                          uint32_t timeout)
3614 {
3615         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
3616         long timeleft, timeout_req = 0;
3617         int retval = IOCB_SUCCESS;
3618         uint32_t creg_val;
3619
3620         /*
3621          * If the caller has provided a response iocbq buffer, then context2
3622          * is NULL or its an error.
3623          */
3624         if (prspiocbq) {
3625                 if (piocb->context2)
3626                         return IOCB_ERROR;
3627                 piocb->context2 = prspiocbq;
3628         }
3629
3630         piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
3631         piocb->context_un.wait_queue = &done_q;
3632         piocb->iocb_flag &= ~LPFC_IO_WAKE;
3633
3634         if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
3635                 creg_val = readl(phba->HCregaddr);
3636                 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
3637                 writel(creg_val, phba->HCregaddr);
3638                 readl(phba->HCregaddr); /* flush */
3639         }
3640
3641         retval = lpfc_sli_issue_iocb(phba, pring, piocb, 0);
3642         if (retval == IOCB_SUCCESS) {
3643                 timeout_req = timeout * HZ;
3644                 timeleft = wait_event_timeout(done_q,
3645                                 piocb->iocb_flag & LPFC_IO_WAKE,
3646                                 timeout_req);
3647
3648                 if (piocb->iocb_flag & LPFC_IO_WAKE) {
3649                         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3650                                         "%d:0331 IOCB wake signaled\n",
3651                                         phba->brd_no);
3652                 } else if (timeleft == 0) {
3653                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3654                                         "%d:0338 IOCB wait timeout error - no "
3655                                         "wake response Data x%x\n",
3656                                         phba->brd_no, timeout);
3657                         retval = IOCB_TIMEDOUT;
3658                 } else {
3659                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3660                                         "%d:0330 IOCB wake NOT set, "
3661                                         "Data x%x x%lx\n", phba->brd_no,
3662                                         timeout, (timeleft / jiffies));
3663                         retval = IOCB_TIMEDOUT;
3664                 }
3665         } else {
3666                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3667                                 "%d:0332 IOCB wait issue failed, Data x%x\n",
3668                                 phba->brd_no, retval);
3669                 retval = IOCB_ERROR;
3670         }
3671
3672         if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
3673                 creg_val = readl(phba->HCregaddr);
3674                 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
3675                 writel(creg_val, phba->HCregaddr);
3676                 readl(phba->HCregaddr); /* flush */
3677         }
3678
3679         if (prspiocbq)
3680                 piocb->context2 = NULL;
3681
3682         piocb->context_un.wait_queue = NULL;
3683         piocb->iocb_cmpl = NULL;
3684         return retval;
3685 }
3686
3687 int
3688 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
3689                          uint32_t timeout)
3690 {
3691         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
3692         int retval;
3693         unsigned long flag;
3694
3695         /* The caller must leave context1 empty. */
3696         if (pmboxq->context1 != 0)
3697                 return MBX_NOT_FINISHED;
3698
3699         /* setup wake call as IOCB callback */
3700         pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
3701         /* setup context field to pass wait_queue pointer to wake function  */
3702         pmboxq->context1 = &done_q;
3703
3704         /* now issue the command */
3705         retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
3706
3707         if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
3708                 wait_event_interruptible_timeout(done_q,
3709                                 pmboxq->mbox_flag & LPFC_MBX_WAKE,
3710                                 timeout * HZ);
3711
3712                 spin_lock_irqsave(&phba->hbalock, flag);
3713                 pmboxq->context1 = NULL;
3714                 /*
3715                  * if LPFC_MBX_WAKE flag is set the mailbox is completed
3716                  * else do not free the resources.
3717                  */
3718                 if (pmboxq->mbox_flag & LPFC_MBX_WAKE)
3719                         retval = MBX_SUCCESS;
3720                 else {
3721                         retval = MBX_TIMEOUT;
3722                         pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
3723                 }
3724                 spin_unlock_irqrestore(&phba->hbalock, flag);
3725         }
3726
3727         return retval;
3728 }
3729
3730 int
3731 lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba)
3732 {
3733         struct lpfc_vport *vport = phba->pport;
3734         int i = 0;
3735         uint32_t ha_copy;
3736
3737         while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE && !vport->stopped) {
3738                 if (i++ > LPFC_MBOX_TMO * 1000)
3739                         return 1;
3740
3741                 /*
3742                  * Call lpfc_sli_handle_mb_event only if a mailbox cmd
3743                  * did finish. This way we won't get the misleading
3744                  * "Stray Mailbox Interrupt" message.
3745                  */
3746                 spin_lock_irq(&phba->hbalock);
3747                 ha_copy = phba->work_ha;
3748                 phba->work_ha &= ~HA_MBATT;
3749                 spin_unlock_irq(&phba->hbalock);
3750
3751                 if (ha_copy & HA_MBATT)
3752                         if (lpfc_sli_handle_mb_event(phba) == 0)
3753                                 i = 0;
3754
3755                 msleep(1);
3756         }
3757
3758         return (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) ? 1 : 0;
3759 }
3760
3761 irqreturn_t
3762 lpfc_intr_handler(int irq, void *dev_id)
3763 {
3764         struct lpfc_hba  *phba;
3765         uint32_t ha_copy;
3766         uint32_t work_ha_copy;
3767         unsigned long status;
3768         int i;
3769         uint32_t control;
3770
3771         MAILBOX_t *mbox, *pmbox;
3772         struct lpfc_vport *vport;
3773         struct lpfc_nodelist *ndlp;
3774         struct lpfc_dmabuf *mp;
3775         LPFC_MBOXQ_t *pmb;
3776         int rc;
3777
3778         /*
3779          * Get the driver's phba structure from the dev_id and
3780          * assume the HBA is not interrupting.
3781          */
3782         phba = (struct lpfc_hba *) dev_id;
3783
3784         if (unlikely(!phba))
3785                 return IRQ_NONE;
3786
3787         /* If the pci channel is offline, ignore all the interrupts. */
3788         if (unlikely(pci_channel_offline(phba->pcidev)))
3789                 return IRQ_NONE;
3790
3791         phba->sli.slistat.sli_intr++;
3792
3793         /*
3794          * Call the HBA to see if it is interrupting.  If not, don't claim
3795          * the interrupt
3796          */
3797
3798         /* Ignore all interrupts during initialization. */
3799         if (unlikely(phba->link_state < LPFC_LINK_DOWN))
3800                 return IRQ_NONE;
3801
3802         /*
3803          * Read host attention register to determine interrupt source
3804          * Clear Attention Sources, except Error Attention (to
3805          * preserve status) and Link Attention
3806          */
3807         spin_lock(&phba->hbalock);
3808         ha_copy = readl(phba->HAregaddr);
3809         /* If somebody is waiting to handle an eratt don't process it
3810          * here.  The brdkill function will do this.
3811          */
3812         if (phba->link_flag & LS_IGNORE_ERATT)
3813                 ha_copy &= ~HA_ERATT;
3814         writel((ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
3815         readl(phba->HAregaddr); /* flush */
3816         spin_unlock(&phba->hbalock);
3817
3818         if (unlikely(!ha_copy))
3819                 return IRQ_NONE;
3820
3821         work_ha_copy = ha_copy & phba->work_ha_mask;
3822
3823         if (unlikely(work_ha_copy)) {
3824                 if (work_ha_copy & HA_LATT) {
3825                         if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
3826                                 /*
3827                                  * Turn off Link Attention interrupts
3828                                  * until CLEAR_LA done
3829                                  */
3830                                 spin_lock(&phba->hbalock);
3831                                 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
3832                                 control = readl(phba->HCregaddr);
3833                                 control &= ~HC_LAINT_ENA;
3834                                 writel(control, phba->HCregaddr);
3835                                 readl(phba->HCregaddr); /* flush */
3836                                 spin_unlock(&phba->hbalock);
3837                         }
3838                         else
3839                                 work_ha_copy &= ~HA_LATT;
3840                 }
3841
3842                 if (work_ha_copy & ~(HA_ERATT|HA_MBATT|HA_LATT)) {
3843                         /*
3844                          * Turn off Slow Rings interrupts, LPFC_ELS_RING is
3845                          * the only slow ring.
3846                          */
3847                         status = (work_ha_copy &
3848                                 (HA_RXMASK  << (4*LPFC_ELS_RING)));
3849                         status >>= (4*LPFC_ELS_RING);
3850                         if (status & HA_RXMASK) {
3851                                 spin_lock(&phba->hbalock);
3852                                 control = readl(phba->HCregaddr);
3853                                 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
3854                                         control &=
3855                                             ~(HC_R0INT_ENA << LPFC_ELS_RING);
3856                                         writel(control, phba->HCregaddr);
3857                                         readl(phba->HCregaddr); /* flush */
3858                                 }
3859                                 spin_unlock(&phba->hbalock);
3860                         }
3861                 }
3862
3863                 if (work_ha_copy & HA_ERATT) {
3864                         phba->link_state = LPFC_HBA_ERROR;
3865                         /*
3866                          * There was a link/board error.  Read the
3867                          * status register to retrieve the error event
3868                          * and process it.
3869                          */
3870                         phba->sli.slistat.err_attn_event++;
3871                         /* Save status info */
3872                         phba->work_hs = readl(phba->HSregaddr);
3873                         phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
3874                         phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
3875
3876                         /* Clear Chip error bit */
3877                         writel(HA_ERATT, phba->HAregaddr);
3878                         readl(phba->HAregaddr); /* flush */
3879                         phba->pport->stopped = 1;
3880                 }
3881
3882                 if ((work_ha_copy & HA_MBATT) &&
3883                     (phba->sli.mbox_active)) {
3884                         pmb = phba->sli.mbox_active;
3885                         pmbox = &pmb->mb;
3886                         mbox = &phba->slim2p->mbx;
3887                         vport = pmb->vport;
3888
3889                         /* First check out the status word */
3890                         lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
3891                         if (pmbox->mbxOwner != OWN_HOST) {
3892                                 /*
3893                                  * Stray Mailbox Interrupt, mbxCommand <cmd>
3894                                  * mbxStatus <status>
3895                                  */
3896                                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX |
3897                                                 LOG_SLI,
3898                                                 "%d (%d):0304 Stray Mailbox "
3899                                                 "Interrupt mbxCommand x%x "
3900                                                 "mbxStatus x%x\n",
3901                                                 phba->brd_no,
3902                                                 (vport
3903                                                  ? vport->vpi : 0),
3904                                                 pmbox->mbxCommand,
3905                                                 pmbox->mbxStatus);
3906                         }
3907                         phba->last_completion_time = jiffies;
3908                         del_timer_sync(&phba->sli.mbox_tmo);
3909
3910                         phba->sli.mbox_active = NULL;
3911                         if (pmb->mbox_cmpl) {
3912                                 lpfc_sli_pcimem_bcopy(mbox, pmbox,
3913                                                       MAILBOX_CMD_SIZE);
3914                         }
3915                         if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
3916                                 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
3917
3918                                 lpfc_debugfs_disc_trc(vport,
3919                                         LPFC_DISC_TRC_MBOX_VPORT,
3920                                         "MBOX dflt rpi: : status:x%x rpi:x%x",
3921                                         (uint32_t)pmbox->mbxStatus,
3922                                         pmbox->un.varWords[0], 0);
3923
3924                                 if ( !pmbox->mbxStatus) {
3925                                         mp = (struct lpfc_dmabuf *)
3926                                                 (pmb->context1);
3927                                         ndlp = (struct lpfc_nodelist *)
3928                                                 pmb->context2;
3929
3930                                         /* Reg_LOGIN of dflt RPI was successful.
3931                                          * new lets get rid of the RPI using the
3932                                          * same mbox buffer.
3933                                          */
3934                                         lpfc_unreg_login(phba, vport->vpi,
3935                                                 pmbox->un.varWords[0], pmb);
3936                                         pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
3937                                         pmb->context1 = mp;
3938                                         pmb->context2 = ndlp;
3939                                         pmb->vport = vport;
3940                                         spin_lock(&phba->hbalock);
3941                                         phba->sli.sli_flag &=
3942                                                 ~LPFC_SLI_MBOX_ACTIVE;
3943                                         spin_unlock(&phba->hbalock);
3944                                         goto send_current_mbox;
3945                                 }
3946                         }
3947                         spin_lock(&phba->pport->work_port_lock);
3948                         phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
3949                         spin_unlock(&phba->pport->work_port_lock);
3950                         lpfc_mbox_cmpl_put(phba, pmb);
3951                 }
3952                 if ((work_ha_copy & HA_MBATT) &&
3953                     (phba->sli.mbox_active == NULL)) {
3954 send_next_mbox:
3955                         spin_lock(&phba->hbalock);
3956                         phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
3957                         pmb = lpfc_mbox_get(phba);
3958                         spin_unlock(&phba->hbalock);
3959 send_current_mbox:
3960                         /* Process next mailbox command if there is one */
3961                         if (pmb != NULL) {
3962                                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3963                                 if (rc == MBX_NOT_FINISHED) {
3964                                         pmb->mb.mbxStatus = MBX_NOT_FINISHED;
3965                                         lpfc_mbox_cmpl_put(phba, pmb);
3966                                         goto send_next_mbox;
3967                                 }
3968                         } else {
3969                                 /* Turn on IOCB processing */
3970                                 for (i = 0; i < phba->sli.num_rings; i++)
3971                                         lpfc_sli_turn_on_ring(phba, i);
3972                         }
3973
3974                 }
3975
3976                 spin_lock(&phba->hbalock);
3977                 phba->work_ha |= work_ha_copy;
3978                 if (phba->work_wait)
3979                         lpfc_worker_wake_up(phba);
3980                 spin_unlock(&phba->hbalock);
3981         }
3982
3983         ha_copy &= ~(phba->work_ha_mask);
3984
3985         /*
3986          * Process all events on FCP ring.  Take the optimized path for
3987          * FCP IO.  Any other IO is slow path and is handled by
3988          * the worker thread.
3989          */
3990         status = (ha_copy & (HA_RXMASK  << (4*LPFC_FCP_RING)));
3991         status >>= (4*LPFC_FCP_RING);
3992         if (status & HA_RXMASK)
3993                 lpfc_sli_handle_fast_ring_event(phba,
3994                                                 &phba->sli.ring[LPFC_FCP_RING],
3995                                                 status);
3996
3997         if (phba->cfg_multi_ring_support == 2) {
3998                 /*
3999                  * Process all events on extra ring.  Take the optimized path
4000                  * for extra ring IO.  Any other IO is slow path and is handled
4001                  * by the worker thread.
4002                  */
4003                 status = (ha_copy & (HA_RXMASK  << (4*LPFC_EXTRA_RING)));
4004                 status >>= (4*LPFC_EXTRA_RING);
4005                 if (status & HA_RXMASK) {
4006                         lpfc_sli_handle_fast_ring_event(phba,
4007                                         &phba->sli.ring[LPFC_EXTRA_RING],
4008                                         status);
4009                 }
4010         }
4011         return IRQ_HANDLED;
4012
4013 } /* lpfc_intr_handler */