[SCSI] qla2xxx: Don't fallback to interrupt-polling during re-initialization with...
[linux-2.6] / drivers / scsi / qla2xxx / qla_isr.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2008 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8
9 #include <linux/delay.h>
10 #include <scsi/scsi_tcq.h>
11
12 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
13 static void qla2x00_process_completed_request(struct scsi_qla_host *,
14         struct req_que *, uint32_t);
15 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
16 static void qla2x00_status_cont_entry(scsi_qla_host_t *, sts_cont_entry_t *);
17 static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
18         sts_entry_t *);
19 static struct scsi_qla_host *qla2x00_get_rsp_host(struct rsp_que *);
20
21 /**
22  * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
23  * @irq:
24  * @dev_id: SCSI driver HA context
25  *
26  * Called by system whenever the host adapter generates an interrupt.
27  *
28  * Returns handled flag.
29  */
30 irqreturn_t
31 qla2100_intr_handler(int irq, void *dev_id)
32 {
33         scsi_qla_host_t *vha;
34         struct qla_hw_data *ha;
35         struct device_reg_2xxx __iomem *reg;
36         int             status;
37         unsigned long   iter;
38         uint16_t        hccr;
39         uint16_t        mb[4];
40         struct rsp_que *rsp;
41
42         rsp = (struct rsp_que *) dev_id;
43         if (!rsp) {
44                 printk(KERN_INFO
45                     "%s(): NULL response queue pointer\n", __func__);
46                 return (IRQ_NONE);
47         }
48
49         ha = rsp->hw;
50         reg = &ha->iobase->isp;
51         status = 0;
52
53         spin_lock(&ha->hardware_lock);
54         vha = qla2x00_get_rsp_host(rsp);
55         for (iter = 50; iter--; ) {
56                 hccr = RD_REG_WORD(&reg->hccr);
57                 if (hccr & HCCR_RISC_PAUSE) {
58                         if (pci_channel_offline(ha->pdev))
59                                 break;
60
61                         /*
62                          * Issue a "HARD" reset in order for the RISC interrupt
63                          * bit to be cleared.  Schedule a big hammmer to get
64                          * out of the RISC PAUSED state.
65                          */
66                         WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
67                         RD_REG_WORD(&reg->hccr);
68
69                         ha->isp_ops->fw_dump(vha, 1);
70                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
71                         break;
72                 } else if ((RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) == 0)
73                         break;
74
75                 if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
76                         WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
77                         RD_REG_WORD(&reg->hccr);
78
79                         /* Get mailbox data. */
80                         mb[0] = RD_MAILBOX_REG(ha, reg, 0);
81                         if (mb[0] > 0x3fff && mb[0] < 0x8000) {
82                                 qla2x00_mbx_completion(vha, mb[0]);
83                                 status |= MBX_INTERRUPT;
84                         } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
85                                 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
86                                 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
87                                 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
88                                 qla2x00_async_event(vha, rsp, mb);
89                         } else {
90                                 /*EMPTY*/
91                                 DEBUG2(printk("scsi(%ld): Unrecognized "
92                                     "interrupt type (%d).\n",
93                                     vha->host_no, mb[0]));
94                         }
95                         /* Release mailbox registers. */
96                         WRT_REG_WORD(&reg->semaphore, 0);
97                         RD_REG_WORD(&reg->semaphore);
98                 } else {
99                         qla2x00_process_response_queue(rsp);
100
101                         WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
102                         RD_REG_WORD(&reg->hccr);
103                 }
104         }
105         spin_unlock(&ha->hardware_lock);
106
107         if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
108             (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
109                 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
110                 complete(&ha->mbx_intr_comp);
111         }
112
113         return (IRQ_HANDLED);
114 }
115
116 /**
117  * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
118  * @irq:
119  * @dev_id: SCSI driver HA context
120  *
121  * Called by system whenever the host adapter generates an interrupt.
122  *
123  * Returns handled flag.
124  */
125 irqreturn_t
126 qla2300_intr_handler(int irq, void *dev_id)
127 {
128         scsi_qla_host_t *vha;
129         struct device_reg_2xxx __iomem *reg;
130         int             status;
131         unsigned long   iter;
132         uint32_t        stat;
133         uint16_t        hccr;
134         uint16_t        mb[4];
135         struct rsp_que *rsp;
136         struct qla_hw_data *ha;
137
138         rsp = (struct rsp_que *) dev_id;
139         if (!rsp) {
140                 printk(KERN_INFO
141                     "%s(): NULL response queue pointer\n", __func__);
142                 return (IRQ_NONE);
143         }
144
145         ha = rsp->hw;
146         reg = &ha->iobase->isp;
147         status = 0;
148
149         spin_lock(&ha->hardware_lock);
150         vha = qla2x00_get_rsp_host(rsp);
151         for (iter = 50; iter--; ) {
152                 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
153                 if (stat & HSR_RISC_PAUSED) {
154                         if (pci_channel_offline(ha->pdev))
155                                 break;
156
157                         hccr = RD_REG_WORD(&reg->hccr);
158                         if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
159                                 qla_printk(KERN_INFO, ha, "Parity error -- "
160                                     "HCCR=%x, Dumping firmware!\n", hccr);
161                         else
162                                 qla_printk(KERN_INFO, ha, "RISC paused -- "
163                                     "HCCR=%x, Dumping firmware!\n", hccr);
164
165                         /*
166                          * Issue a "HARD" reset in order for the RISC
167                          * interrupt bit to be cleared.  Schedule a big
168                          * hammmer to get out of the RISC PAUSED state.
169                          */
170                         WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
171                         RD_REG_WORD(&reg->hccr);
172
173                         ha->isp_ops->fw_dump(vha, 1);
174                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
175                         break;
176                 } else if ((stat & HSR_RISC_INT) == 0)
177                         break;
178
179                 switch (stat & 0xff) {
180                 case 0x1:
181                 case 0x2:
182                 case 0x10:
183                 case 0x11:
184                         qla2x00_mbx_completion(vha, MSW(stat));
185                         status |= MBX_INTERRUPT;
186
187                         /* Release mailbox registers. */
188                         WRT_REG_WORD(&reg->semaphore, 0);
189                         break;
190                 case 0x12:
191                         mb[0] = MSW(stat);
192                         mb[1] = RD_MAILBOX_REG(ha, reg, 1);
193                         mb[2] = RD_MAILBOX_REG(ha, reg, 2);
194                         mb[3] = RD_MAILBOX_REG(ha, reg, 3);
195                         qla2x00_async_event(vha, rsp, mb);
196                         break;
197                 case 0x13:
198                         qla2x00_process_response_queue(rsp);
199                         break;
200                 case 0x15:
201                         mb[0] = MBA_CMPLT_1_16BIT;
202                         mb[1] = MSW(stat);
203                         qla2x00_async_event(vha, rsp, mb);
204                         break;
205                 case 0x16:
206                         mb[0] = MBA_SCSI_COMPLETION;
207                         mb[1] = MSW(stat);
208                         mb[2] = RD_MAILBOX_REG(ha, reg, 2);
209                         qla2x00_async_event(vha, rsp, mb);
210                         break;
211                 default:
212                         DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
213                             "(%d).\n",
214                             vha->host_no, stat & 0xff));
215                         break;
216                 }
217                 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
218                 RD_REG_WORD_RELAXED(&reg->hccr);
219         }
220         spin_unlock(&ha->hardware_lock);
221
222         if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
223             (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
224                 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
225                 complete(&ha->mbx_intr_comp);
226         }
227
228         return (IRQ_HANDLED);
229 }
230
231 /**
232  * qla2x00_mbx_completion() - Process mailbox command completions.
233  * @ha: SCSI driver HA context
234  * @mb0: Mailbox0 register
235  */
236 static void
237 qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
238 {
239         uint16_t        cnt;
240         uint16_t __iomem *wptr;
241         struct qla_hw_data *ha = vha->hw;
242         struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
243
244         /* Load return mailbox registers. */
245         ha->flags.mbox_int = 1;
246         ha->mailbox_out[0] = mb0;
247         wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
248
249         for (cnt = 1; cnt < ha->mbx_count; cnt++) {
250                 if (IS_QLA2200(ha) && cnt == 8)
251                         wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
252                 if (cnt == 4 || cnt == 5)
253                         ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
254                 else
255                         ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
256
257                 wptr++;
258         }
259
260         if (ha->mcp) {
261                 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
262                     __func__, vha->host_no, ha->mcp->mb[0]));
263         } else {
264                 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
265                     __func__, vha->host_no));
266         }
267 }
268
269 /**
270  * qla2x00_async_event() - Process aynchronous events.
271  * @ha: SCSI driver HA context
272  * @mb: Mailbox registers (0 - 3)
273  */
274 void
275 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
276 {
277 #define LS_UNKNOWN      2
278         static char     *link_speeds[5] = { "1", "2", "?", "4", "8" };
279         char            *link_speed;
280         uint16_t        handle_cnt;
281         uint16_t        cnt;
282         uint32_t        handles[5];
283         struct qla_hw_data *ha = vha->hw;
284         struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
285         uint32_t        rscn_entry, host_pid;
286         uint8_t         rscn_queue_index;
287         unsigned long   flags;
288
289         /* Setup to process RIO completion. */
290         handle_cnt = 0;
291         switch (mb[0]) {
292         case MBA_SCSI_COMPLETION:
293                 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
294                 handle_cnt = 1;
295                 break;
296         case MBA_CMPLT_1_16BIT:
297                 handles[0] = mb[1];
298                 handle_cnt = 1;
299                 mb[0] = MBA_SCSI_COMPLETION;
300                 break;
301         case MBA_CMPLT_2_16BIT:
302                 handles[0] = mb[1];
303                 handles[1] = mb[2];
304                 handle_cnt = 2;
305                 mb[0] = MBA_SCSI_COMPLETION;
306                 break;
307         case MBA_CMPLT_3_16BIT:
308                 handles[0] = mb[1];
309                 handles[1] = mb[2];
310                 handles[2] = mb[3];
311                 handle_cnt = 3;
312                 mb[0] = MBA_SCSI_COMPLETION;
313                 break;
314         case MBA_CMPLT_4_16BIT:
315                 handles[0] = mb[1];
316                 handles[1] = mb[2];
317                 handles[2] = mb[3];
318                 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
319                 handle_cnt = 4;
320                 mb[0] = MBA_SCSI_COMPLETION;
321                 break;
322         case MBA_CMPLT_5_16BIT:
323                 handles[0] = mb[1];
324                 handles[1] = mb[2];
325                 handles[2] = mb[3];
326                 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
327                 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
328                 handle_cnt = 5;
329                 mb[0] = MBA_SCSI_COMPLETION;
330                 break;
331         case MBA_CMPLT_2_32BIT:
332                 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
333                 handles[1] = le32_to_cpu(
334                     ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
335                     RD_MAILBOX_REG(ha, reg, 6));
336                 handle_cnt = 2;
337                 mb[0] = MBA_SCSI_COMPLETION;
338                 break;
339         default:
340                 break;
341         }
342
343         switch (mb[0]) {
344         case MBA_SCSI_COMPLETION:       /* Fast Post */
345                 if (!vha->flags.online)
346                         break;
347
348                 for (cnt = 0; cnt < handle_cnt; cnt++)
349                         qla2x00_process_completed_request(vha, rsp->req,
350                                 handles[cnt]);
351                 break;
352
353         case MBA_RESET:                 /* Reset */
354                 DEBUG2(printk("scsi(%ld): Asynchronous RESET.\n",
355                         vha->host_no));
356
357                 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
358                 break;
359
360         case MBA_SYSTEM_ERR:            /* System Error */
361                 qla_printk(KERN_INFO, ha,
362                     "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n",
363                     mb[1], mb[2], mb[3]);
364
365                 ha->isp_ops->fw_dump(vha, 1);
366
367                 if (IS_FWI2_CAPABLE(ha)) {
368                         if (mb[1] == 0 && mb[2] == 0) {
369                                 qla_printk(KERN_ERR, ha,
370                                     "Unrecoverable Hardware Error: adapter "
371                                     "marked OFFLINE!\n");
372                                 vha->flags.online = 0;
373                         } else
374                                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
375                 } else if (mb[1] == 0) {
376                         qla_printk(KERN_INFO, ha,
377                             "Unrecoverable Hardware Error: adapter marked "
378                             "OFFLINE!\n");
379                         vha->flags.online = 0;
380                 } else
381                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
382                 break;
383
384         case MBA_REQ_TRANSFER_ERR:      /* Request Transfer Error */
385                 DEBUG2(printk("scsi(%ld): ISP Request Transfer Error.\n",
386                     vha->host_no));
387                 qla_printk(KERN_WARNING, ha, "ISP Request Transfer Error.\n");
388
389                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
390                 break;
391
392         case MBA_RSP_TRANSFER_ERR:      /* Response Transfer Error */
393                 DEBUG2(printk("scsi(%ld): ISP Response Transfer Error.\n",
394                     vha->host_no));
395                 qla_printk(KERN_WARNING, ha, "ISP Response Transfer Error.\n");
396
397                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
398                 break;
399
400         case MBA_WAKEUP_THRES:          /* Request Queue Wake-up */
401                 DEBUG2(printk("scsi(%ld): Asynchronous WAKEUP_THRES.\n",
402                     vha->host_no));
403                 break;
404
405         case MBA_LIP_OCCURRED:          /* Loop Initialization Procedure */
406                 DEBUG2(printk("scsi(%ld): LIP occurred (%x).\n", vha->host_no,
407                     mb[1]));
408                 qla_printk(KERN_INFO, ha, "LIP occurred (%x).\n", mb[1]);
409
410                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
411                         atomic_set(&vha->loop_state, LOOP_DOWN);
412                         atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
413                         qla2x00_mark_all_devices_lost(vha, 1);
414                 }
415
416                 if (vha->vp_idx) {
417                         atomic_set(&vha->vp_state, VP_FAILED);
418                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
419                 }
420
421                 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
422                 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
423
424                 vha->flags.management_server_logged_in = 0;
425                 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
426                 break;
427
428         case MBA_LOOP_UP:               /* Loop Up Event */
429                 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
430                         link_speed = link_speeds[0];
431                         ha->link_data_rate = PORT_SPEED_1GB;
432                 } else {
433                         link_speed = link_speeds[LS_UNKNOWN];
434                         if (mb[1] < 5)
435                                 link_speed = link_speeds[mb[1]];
436                         ha->link_data_rate = mb[1];
437                 }
438
439                 DEBUG2(printk("scsi(%ld): Asynchronous LOOP UP (%s Gbps).\n",
440                     vha->host_no, link_speed));
441                 qla_printk(KERN_INFO, ha, "LOOP UP detected (%s Gbps).\n",
442                     link_speed);
443
444                 vha->flags.management_server_logged_in = 0;
445                 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
446                 break;
447
448         case MBA_LOOP_DOWN:             /* Loop Down Event */
449                 DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN "
450                     "(%x %x %x).\n", vha->host_no, mb[1], mb[2], mb[3]));
451                 qla_printk(KERN_INFO, ha, "LOOP DOWN detected (%x %x %x).\n",
452                     mb[1], mb[2], mb[3]);
453
454                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
455                         atomic_set(&vha->loop_state, LOOP_DOWN);
456                         atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
457                         vha->device_flags |= DFLG_NO_CABLE;
458                         qla2x00_mark_all_devices_lost(vha, 1);
459                 }
460
461                 if (vha->vp_idx) {
462                         atomic_set(&vha->vp_state, VP_FAILED);
463                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
464                 }
465
466                 vha->flags.management_server_logged_in = 0;
467                 ha->link_data_rate = PORT_SPEED_UNKNOWN;
468                 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
469                 break;
470
471         case MBA_LIP_RESET:             /* LIP reset occurred */
472                 DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n",
473                     vha->host_no, mb[1]));
474                 qla_printk(KERN_INFO, ha,
475                     "LIP reset occurred (%x).\n", mb[1]);
476
477                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
478                         atomic_set(&vha->loop_state, LOOP_DOWN);
479                         atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
480                         qla2x00_mark_all_devices_lost(vha, 1);
481                 }
482
483                 if (vha->vp_idx) {
484                         atomic_set(&vha->vp_state, VP_FAILED);
485                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
486                 }
487
488                 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
489
490                 ha->operating_mode = LOOP;
491                 vha->flags.management_server_logged_in = 0;
492                 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
493                 break;
494
495         case MBA_POINT_TO_POINT:        /* Point-to-Point */
496                 if (IS_QLA2100(ha))
497                         break;
498
499                 DEBUG2(printk("scsi(%ld): Asynchronous P2P MODE received.\n",
500                     vha->host_no));
501
502                 /*
503                  * Until there's a transition from loop down to loop up, treat
504                  * this as loop down only.
505                  */
506                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
507                         atomic_set(&vha->loop_state, LOOP_DOWN);
508                         if (!atomic_read(&vha->loop_down_timer))
509                                 atomic_set(&vha->loop_down_timer,
510                                     LOOP_DOWN_TIME);
511                         qla2x00_mark_all_devices_lost(vha, 1);
512                 }
513
514                 if (vha->vp_idx) {
515                         atomic_set(&vha->vp_state, VP_FAILED);
516                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
517                 }
518
519                 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
520                         set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
521
522                 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
523                 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
524
525                 ha->flags.gpsc_supported = 1;
526                 vha->flags.management_server_logged_in = 0;
527                 break;
528
529         case MBA_CHG_IN_CONNECTION:     /* Change in connection mode */
530                 if (IS_QLA2100(ha))
531                         break;
532
533                 DEBUG2(printk("scsi(%ld): Asynchronous Change In Connection "
534                     "received.\n",
535                     vha->host_no));
536                 qla_printk(KERN_INFO, ha,
537                     "Configuration change detected: value=%x.\n", mb[1]);
538
539                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
540                         atomic_set(&vha->loop_state, LOOP_DOWN);
541                         if (!atomic_read(&vha->loop_down_timer))
542                                 atomic_set(&vha->loop_down_timer,
543                                     LOOP_DOWN_TIME);
544                         qla2x00_mark_all_devices_lost(vha, 1);
545                 }
546
547                 if (vha->vp_idx) {
548                         atomic_set(&vha->vp_state, VP_FAILED);
549                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
550                 }
551
552                 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
553                 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
554                 break;
555
556         case MBA_PORT_UPDATE:           /* Port database update */
557                 /* Only handle SCNs for our Vport index. */
558                 if (vha->vp_idx && vha->vp_idx != (mb[3] & 0xff))
559                         break;
560
561                 /*
562                  * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
563                  * event etc. earlier indicating loop is down) then process
564                  * it.  Otherwise ignore it and Wait for RSCN to come in.
565                  */
566                 atomic_set(&vha->loop_down_timer, 0);
567                 if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
568                     atomic_read(&vha->loop_state) != LOOP_DEAD) {
569                         DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE "
570                             "ignored %04x/%04x/%04x.\n", vha->host_no, mb[1],
571                             mb[2], mb[3]));
572                         break;
573                 }
574
575                 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n",
576                     vha->host_no));
577                 DEBUG(printk(KERN_INFO
578                     "scsi(%ld): Port database changed %04x %04x %04x.\n",
579                     vha->host_no, mb[1], mb[2], mb[3]));
580
581                 /*
582                  * Mark all devices as missing so we will login again.
583                  */
584                 atomic_set(&vha->loop_state, LOOP_UP);
585
586                 qla2x00_mark_all_devices_lost(vha, 1);
587
588                 vha->flags.rscn_queue_overflow = 1;
589
590                 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
591                 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
592                 break;
593
594         case MBA_RSCN_UPDATE:           /* State Change Registration */
595                 /* Check if the Vport has issued a SCR */
596                 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
597                         break;
598                 /* Only handle SCNs for our Vport index. */
599                 if (vha->vp_idx && vha->vp_idx != (mb[3] & 0xff))
600                         break;
601                 DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n",
602                     vha->host_no));
603                 DEBUG(printk(KERN_INFO
604                     "scsi(%ld): RSCN database changed -- %04x %04x %04x.\n",
605                     vha->host_no, mb[1], mb[2], mb[3]));
606
607                 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
608                 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
609                                 | vha->d_id.b.al_pa;
610                 if (rscn_entry == host_pid) {
611                         DEBUG(printk(KERN_INFO
612                             "scsi(%ld): Ignoring RSCN update to local host "
613                             "port ID (%06x)\n",
614                             vha->host_no, host_pid));
615                         break;
616                 }
617
618                 /* Ignore reserved bits from RSCN-payload. */
619                 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
620                 rscn_queue_index = vha->rscn_in_ptr + 1;
621                 if (rscn_queue_index == MAX_RSCN_COUNT)
622                         rscn_queue_index = 0;
623                 if (rscn_queue_index != vha->rscn_out_ptr) {
624                         vha->rscn_queue[vha->rscn_in_ptr] = rscn_entry;
625                         vha->rscn_in_ptr = rscn_queue_index;
626                 } else {
627                         vha->flags.rscn_queue_overflow = 1;
628                 }
629
630                 atomic_set(&vha->loop_state, LOOP_UPDATE);
631                 atomic_set(&vha->loop_down_timer, 0);
632                 vha->flags.management_server_logged_in = 0;
633
634                 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
635                 set_bit(RSCN_UPDATE, &vha->dpc_flags);
636                 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
637                 break;
638
639         /* case MBA_RIO_RESPONSE: */
640         case MBA_ZIO_RESPONSE:
641                 DEBUG2(printk("scsi(%ld): [R|Z]IO update completion.\n",
642                     vha->host_no));
643                 DEBUG(printk(KERN_INFO
644                     "scsi(%ld): [R|Z]IO update completion.\n",
645                     vha->host_no));
646
647                 if (IS_FWI2_CAPABLE(ha))
648                         qla24xx_process_response_queue(rsp);
649                 else
650                         qla2x00_process_response_queue(rsp);
651                 break;
652
653         case MBA_DISCARD_RND_FRAME:
654                 DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x "
655                     "%04x.\n", vha->host_no, mb[1], mb[2], mb[3]));
656                 break;
657
658         case MBA_TRACE_NOTIFICATION:
659                 DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n",
660                 vha->host_no, mb[1], mb[2]));
661                 break;
662
663         case MBA_ISP84XX_ALERT:
664                 DEBUG2(printk("scsi(%ld): ISP84XX Alert Notification -- "
665                     "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3]));
666
667                 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
668                 switch (mb[1]) {
669                 case A84_PANIC_RECOVERY:
670                         qla_printk(KERN_INFO, ha, "Alert 84XX: panic recovery "
671                             "%04x %04x\n", mb[2], mb[3]);
672                         break;
673                 case A84_OP_LOGIN_COMPLETE:
674                         ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
675                         DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX:"
676                             "firmware version %x\n", ha->cs84xx->op_fw_version));
677                         break;
678                 case A84_DIAG_LOGIN_COMPLETE:
679                         ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
680                         DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX:"
681                             "diagnostic firmware version %x\n",
682                             ha->cs84xx->diag_fw_version));
683                         break;
684                 case A84_GOLD_LOGIN_COMPLETE:
685                         ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
686                         ha->cs84xx->fw_update = 1;
687                         DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX: gold "
688                             "firmware version %x\n",
689                             ha->cs84xx->gold_fw_version));
690                         break;
691                 default:
692                         qla_printk(KERN_ERR, ha,
693                             "Alert 84xx: Invalid Alert %04x %04x %04x\n",
694                             mb[1], mb[2], mb[3]);
695                 }
696                 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
697                 break;
698         }
699
700         if (!vha->vp_idx && ha->num_vhosts)
701                 qla2x00_alert_all_vps(rsp, mb);
702 }
703
704 static void
705 qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data)
706 {
707         fc_port_t *fcport = data;
708         struct scsi_qla_host *vha = fcport->vha;
709         struct qla_hw_data *ha = vha->hw;
710         struct req_que *req = NULL;
711
712         req = ha->req_q_map[vha->req_ques[0]];
713         if (!req)
714                 return;
715         if (req->max_q_depth <= sdev->queue_depth)
716                 return;
717
718         if (sdev->ordered_tags)
719                 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG,
720                     sdev->queue_depth + 1);
721         else
722                 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG,
723                     sdev->queue_depth + 1);
724
725         fcport->last_ramp_up = jiffies;
726
727         DEBUG2(qla_printk(KERN_INFO, ha,
728             "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n",
729             fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
730             sdev->queue_depth));
731 }
732
733 static void
734 qla2x00_adjust_sdev_qdepth_down(struct scsi_device *sdev, void *data)
735 {
736         fc_port_t *fcport = data;
737
738         if (!scsi_track_queue_full(sdev, sdev->queue_depth - 1))
739                 return;
740
741         DEBUG2(qla_printk(KERN_INFO, fcport->vha->hw,
742             "scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n",
743             fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
744             sdev->queue_depth));
745 }
746
747 static inline void
748 qla2x00_ramp_up_queue_depth(scsi_qla_host_t *vha, struct req_que *req,
749                                                                 srb_t *sp)
750 {
751         fc_port_t *fcport;
752         struct scsi_device *sdev;
753
754         sdev = sp->cmd->device;
755         if (sdev->queue_depth >= req->max_q_depth)
756                 return;
757
758         fcport = sp->fcport;
759         if (time_before(jiffies,
760             fcport->last_ramp_up + ql2xqfullrampup * HZ))
761                 return;
762         if (time_before(jiffies,
763             fcport->last_queue_full + ql2xqfullrampup * HZ))
764                 return;
765
766         starget_for_each_device(sdev->sdev_target, fcport,
767             qla2x00_adjust_sdev_qdepth_up);
768 }
769
770 /**
771  * qla2x00_process_completed_request() - Process a Fast Post response.
772  * @ha: SCSI driver HA context
773  * @index: SRB index
774  */
775 static void
776 qla2x00_process_completed_request(struct scsi_qla_host *vha,
777                                 struct req_que *req, uint32_t index)
778 {
779         srb_t *sp;
780         struct qla_hw_data *ha = vha->hw;
781
782         /* Validate handle. */
783         if (index >= MAX_OUTSTANDING_COMMANDS) {
784                 DEBUG2(printk("scsi(%ld): Invalid SCSI completion handle %d.\n",
785                     vha->host_no, index));
786                 qla_printk(KERN_WARNING, ha,
787                     "Invalid SCSI completion handle %d.\n", index);
788
789                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
790                 return;
791         }
792
793         sp = req->outstanding_cmds[index];
794         if (sp) {
795                 /* Free outstanding command slot. */
796                 req->outstanding_cmds[index] = NULL;
797
798                 CMD_COMPL_STATUS(sp->cmd) = 0L;
799                 CMD_SCSI_STATUS(sp->cmd) = 0L;
800
801                 /* Save ISP completion status */
802                 sp->cmd->result = DID_OK << 16;
803
804                 qla2x00_ramp_up_queue_depth(vha, req, sp);
805                 qla2x00_sp_compl(ha, sp);
806         } else {
807                 DEBUG2(printk("scsi(%ld): Invalid ISP SCSI completion handle\n",
808                     vha->host_no));
809                 qla_printk(KERN_WARNING, ha,
810                     "Invalid ISP SCSI completion handle\n");
811
812                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
813         }
814 }
815
816 /**
817  * qla2x00_process_response_queue() - Process response queue entries.
818  * @ha: SCSI driver HA context
819  */
820 void
821 qla2x00_process_response_queue(struct rsp_que *rsp)
822 {
823         struct scsi_qla_host *vha;
824         struct qla_hw_data *ha = rsp->hw;
825         struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
826         sts_entry_t     *pkt;
827         uint16_t        handle_cnt;
828         uint16_t        cnt;
829
830         vha = qla2x00_get_rsp_host(rsp);
831
832         if (!vha->flags.online)
833                 return;
834
835         while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
836                 pkt = (sts_entry_t *)rsp->ring_ptr;
837
838                 rsp->ring_index++;
839                 if (rsp->ring_index == rsp->length) {
840                         rsp->ring_index = 0;
841                         rsp->ring_ptr = rsp->ring;
842                 } else {
843                         rsp->ring_ptr++;
844                 }
845
846                 if (pkt->entry_status != 0) {
847                         DEBUG3(printk(KERN_INFO
848                             "scsi(%ld): Process error entry.\n", vha->host_no));
849
850                         qla2x00_error_entry(vha, rsp, pkt);
851                         ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
852                         wmb();
853                         continue;
854                 }
855
856                 switch (pkt->entry_type) {
857                 case STATUS_TYPE:
858                         qla2x00_status_entry(vha, rsp, pkt);
859                         break;
860                 case STATUS_TYPE_21:
861                         handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
862                         for (cnt = 0; cnt < handle_cnt; cnt++) {
863                                 qla2x00_process_completed_request(vha, rsp->req,
864                                     ((sts21_entry_t *)pkt)->handle[cnt]);
865                         }
866                         break;
867                 case STATUS_TYPE_22:
868                         handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
869                         for (cnt = 0; cnt < handle_cnt; cnt++) {
870                                 qla2x00_process_completed_request(vha, rsp->req,
871                                     ((sts22_entry_t *)pkt)->handle[cnt]);
872                         }
873                         break;
874                 case STATUS_CONT_TYPE:
875                         qla2x00_status_cont_entry(vha, (sts_cont_entry_t *)pkt);
876                         break;
877                 default:
878                         /* Type Not Supported. */
879                         DEBUG4(printk(KERN_WARNING
880                             "scsi(%ld): Received unknown response pkt type %x "
881                             "entry status=%x.\n",
882                             vha->host_no, pkt->entry_type, pkt->entry_status));
883                         break;
884                 }
885                 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
886                 wmb();
887         }
888
889         /* Adjust ring index */
890         WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
891 }
892
893 static inline void
894 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len)
895 {
896         struct scsi_cmnd *cp = sp->cmd;
897
898         if (sense_len >= SCSI_SENSE_BUFFERSIZE)
899                 sense_len = SCSI_SENSE_BUFFERSIZE;
900
901         CMD_ACTUAL_SNSLEN(cp) = sense_len;
902         sp->request_sense_length = sense_len;
903         sp->request_sense_ptr = cp->sense_buffer;
904         if (sp->request_sense_length > 32)
905                 sense_len = 32;
906
907         memcpy(cp->sense_buffer, sense_data, sense_len);
908
909         sp->request_sense_ptr += sense_len;
910         sp->request_sense_length -= sense_len;
911         if (sp->request_sense_length != 0)
912                 sp->fcport->vha->status_srb = sp;
913
914         DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) "
915             "cmd=%p pid=%ld\n", __func__, sp->fcport->vha->host_no,
916             cp->device->channel, cp->device->id, cp->device->lun, cp,
917             cp->serial_number));
918         if (sense_len)
919                 DEBUG5(qla2x00_dump_buffer(cp->sense_buffer,
920                     CMD_ACTUAL_SNSLEN(cp)));
921 }
922
923 /**
924  * qla2x00_status_entry() - Process a Status IOCB entry.
925  * @ha: SCSI driver HA context
926  * @pkt: Entry pointer
927  */
928 static void
929 qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
930 {
931         srb_t           *sp;
932         fc_port_t       *fcport;
933         struct scsi_cmnd *cp;
934         sts_entry_t *sts;
935         struct sts_entry_24xx *sts24;
936         uint16_t        comp_status;
937         uint16_t        scsi_status;
938         uint8_t         lscsi_status;
939         int32_t         resid;
940         uint32_t        sense_len, rsp_info_len, resid_len, fw_resid_len;
941         uint8_t         *rsp_info, *sense_data;
942         struct qla_hw_data *ha = vha->hw;
943         struct req_que *req = rsp->req;
944
945         sts = (sts_entry_t *) pkt;
946         sts24 = (struct sts_entry_24xx *) pkt;
947         if (IS_FWI2_CAPABLE(ha)) {
948                 comp_status = le16_to_cpu(sts24->comp_status);
949                 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
950         } else {
951                 comp_status = le16_to_cpu(sts->comp_status);
952                 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
953         }
954
955         /* Fast path completion. */
956         if (comp_status == CS_COMPLETE && scsi_status == 0) {
957                 qla2x00_process_completed_request(vha, req, sts->handle);
958
959                 return;
960         }
961
962         /* Validate handle. */
963         if (sts->handle < MAX_OUTSTANDING_COMMANDS) {
964                 sp = req->outstanding_cmds[sts->handle];
965                 req->outstanding_cmds[sts->handle] = NULL;
966         } else
967                 sp = NULL;
968
969         if (sp == NULL) {
970                 DEBUG2(printk("scsi(%ld): Status Entry invalid handle.\n",
971                     vha->host_no));
972                 qla_printk(KERN_WARNING, ha, "Status Entry invalid handle.\n");
973
974                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
975                 qla2xxx_wake_dpc(vha);
976                 return;
977         }
978         cp = sp->cmd;
979         if (cp == NULL) {
980                 DEBUG2(printk("scsi(%ld): Command already returned back to OS "
981                     "pkt->handle=%d sp=%p.\n", vha->host_no, sts->handle, sp));
982                 qla_printk(KERN_WARNING, ha,
983                     "Command is NULL: already returned to OS (sp=%p)\n", sp);
984
985                 return;
986         }
987
988         lscsi_status = scsi_status & STATUS_MASK;
989         CMD_ENTRY_STATUS(cp) = sts->entry_status;
990         CMD_COMPL_STATUS(cp) = comp_status;
991         CMD_SCSI_STATUS(cp) = scsi_status;
992
993         fcport = sp->fcport;
994
995         sense_len = rsp_info_len = resid_len = fw_resid_len = 0;
996         if (IS_FWI2_CAPABLE(ha)) {
997                 sense_len = le32_to_cpu(sts24->sense_len);
998                 rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
999                 resid_len = le32_to_cpu(sts24->rsp_residual_count);
1000                 fw_resid_len = le32_to_cpu(sts24->residual_len);
1001                 rsp_info = sts24->data;
1002                 sense_data = sts24->data;
1003                 host_to_fcp_swap(sts24->data, sizeof(sts24->data));
1004         } else {
1005                 sense_len = le16_to_cpu(sts->req_sense_length);
1006                 rsp_info_len = le16_to_cpu(sts->rsp_info_len);
1007                 resid_len = le32_to_cpu(sts->residual_length);
1008                 rsp_info = sts->rsp_info;
1009                 sense_data = sts->req_sense_data;
1010         }
1011
1012         /* Check for any FCP transport errors. */
1013         if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
1014                 /* Sense data lies beyond any FCP RESPONSE data. */
1015                 if (IS_FWI2_CAPABLE(ha))
1016                         sense_data += rsp_info_len;
1017                 if (rsp_info_len > 3 && rsp_info[3]) {
1018                         DEBUG2(printk("scsi(%ld:%d:%d:%d) FCP I/O protocol "
1019                             "failure (%x/%02x%02x%02x%02x%02x%02x%02x%02x)..."
1020                             "retrying command\n", vha->host_no,
1021                             cp->device->channel, cp->device->id,
1022                             cp->device->lun, rsp_info_len, rsp_info[0],
1023                             rsp_info[1], rsp_info[2], rsp_info[3], rsp_info[4],
1024                             rsp_info[5], rsp_info[6], rsp_info[7]));
1025
1026                         cp->result = DID_BUS_BUSY << 16;
1027                         qla2x00_sp_compl(ha, sp);
1028                         return;
1029                 }
1030         }
1031
1032         /* Check for overrun. */
1033         if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
1034             scsi_status & SS_RESIDUAL_OVER)
1035                 comp_status = CS_DATA_OVERRUN;
1036
1037         /*
1038          * Based on Host and scsi status generate status code for Linux
1039          */
1040         switch (comp_status) {
1041         case CS_COMPLETE:
1042         case CS_QUEUE_FULL:
1043                 if (scsi_status == 0) {
1044                         cp->result = DID_OK << 16;
1045                         break;
1046                 }
1047                 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
1048                         resid = resid_len;
1049                         scsi_set_resid(cp, resid);
1050                         CMD_RESID_LEN(cp) = resid;
1051
1052                         if (!lscsi_status &&
1053                             ((unsigned)(scsi_bufflen(cp) - resid) <
1054                              cp->underflow)) {
1055                                 qla_printk(KERN_INFO, ha,
1056                                            "scsi(%ld:%d:%d:%d): Mid-layer underflow "
1057                                            "detected (%x of %x bytes)...returning "
1058                                            "error status.\n", vha->host_no,
1059                                            cp->device->channel, cp->device->id,
1060                                            cp->device->lun, resid,
1061                                            scsi_bufflen(cp));
1062
1063                                 cp->result = DID_ERROR << 16;
1064                                 break;
1065                         }
1066                 }
1067                 cp->result = DID_OK << 16 | lscsi_status;
1068
1069                 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1070                         DEBUG2(printk(KERN_INFO
1071                             "scsi(%ld): QUEUE FULL status detected "
1072                             "0x%x-0x%x.\n", vha->host_no, comp_status,
1073                             scsi_status));
1074
1075                         /* Adjust queue depth for all luns on the port. */
1076                         fcport->last_queue_full = jiffies;
1077                         starget_for_each_device(cp->device->sdev_target,
1078                             fcport, qla2x00_adjust_sdev_qdepth_down);
1079                         break;
1080                 }
1081                 if (lscsi_status != SS_CHECK_CONDITION)
1082                         break;
1083
1084                 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1085                 if (!(scsi_status & SS_SENSE_LEN_VALID))
1086                         break;
1087
1088                 qla2x00_handle_sense(sp, sense_data, sense_len);
1089                 break;
1090
1091         case CS_DATA_UNDERRUN:
1092                 resid = resid_len;
1093                 /* Use F/W calculated residual length. */
1094                 if (IS_FWI2_CAPABLE(ha)) {
1095                         if (!(scsi_status & SS_RESIDUAL_UNDER)) {
1096                                 lscsi_status = 0;
1097                         } else if (resid != fw_resid_len) {
1098                                 scsi_status &= ~SS_RESIDUAL_UNDER;
1099                                 lscsi_status = 0;
1100                         }
1101                         resid = fw_resid_len;
1102                 }
1103
1104                 if (scsi_status & SS_RESIDUAL_UNDER) {
1105                         scsi_set_resid(cp, resid);
1106                         CMD_RESID_LEN(cp) = resid;
1107                 } else {
1108                         DEBUG2(printk(KERN_INFO
1109                             "scsi(%ld:%d:%d) UNDERRUN status detected "
1110                             "0x%x-0x%x. resid=0x%x fw_resid=0x%x cdb=0x%x "
1111                             "os_underflow=0x%x\n", vha->host_no,
1112                             cp->device->id, cp->device->lun, comp_status,
1113                             scsi_status, resid_len, resid, cp->cmnd[0],
1114                             cp->underflow));
1115
1116                 }
1117
1118                 /*
1119                  * Check to see if SCSI Status is non zero. If so report SCSI
1120                  * Status.
1121                  */
1122                 if (lscsi_status != 0) {
1123                         cp->result = DID_OK << 16 | lscsi_status;
1124
1125                         if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1126                                 DEBUG2(printk(KERN_INFO
1127                                     "scsi(%ld): QUEUE FULL status detected "
1128                                     "0x%x-0x%x.\n", vha->host_no, comp_status,
1129                                     scsi_status));
1130
1131                                 /*
1132                                  * Adjust queue depth for all luns on the
1133                                  * port.
1134                                  */
1135                                 fcport->last_queue_full = jiffies;
1136                                 starget_for_each_device(
1137                                     cp->device->sdev_target, fcport,
1138                                     qla2x00_adjust_sdev_qdepth_down);
1139                                 break;
1140                         }
1141                         if (lscsi_status != SS_CHECK_CONDITION)
1142                                 break;
1143
1144                         memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1145                         if (!(scsi_status & SS_SENSE_LEN_VALID))
1146                                 break;
1147
1148                         qla2x00_handle_sense(sp, sense_data, sense_len);
1149                 } else {
1150                         /*
1151                          * If RISC reports underrun and target does not report
1152                          * it then we must have a lost frame, so tell upper
1153                          * layer to retry it by reporting a bus busy.
1154                          */
1155                         if (!(scsi_status & SS_RESIDUAL_UNDER)) {
1156                                 DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped "
1157                                               "frame(s) detected (%x of %x bytes)..."
1158                                               "retrying command.\n",
1159                                         vha->host_no, cp->device->channel,
1160                                         cp->device->id, cp->device->lun, resid,
1161                                         scsi_bufflen(cp)));
1162
1163                                 cp->result = DID_BUS_BUSY << 16;
1164                                 break;
1165                         }
1166
1167                         /* Handle mid-layer underflow */
1168                         if ((unsigned)(scsi_bufflen(cp) - resid) <
1169                             cp->underflow) {
1170                                 qla_printk(KERN_INFO, ha,
1171                                            "scsi(%ld:%d:%d:%d): Mid-layer underflow "
1172                                            "detected (%x of %x bytes)...returning "
1173                                            "error status.\n", vha->host_no,
1174                                            cp->device->channel, cp->device->id,
1175                                            cp->device->lun, resid,
1176                                            scsi_bufflen(cp));
1177
1178                                 cp->result = DID_ERROR << 16;
1179                                 break;
1180                         }
1181
1182                         /* Everybody online, looking good... */
1183                         cp->result = DID_OK << 16;
1184                 }
1185                 break;
1186
1187         case CS_DATA_OVERRUN:
1188                 DEBUG2(printk(KERN_INFO
1189                     "scsi(%ld:%d:%d): OVERRUN status detected 0x%x-0x%x\n",
1190                     vha->host_no, cp->device->id, cp->device->lun, comp_status,
1191                     scsi_status));
1192                 DEBUG2(printk(KERN_INFO
1193                     "CDB: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
1194                     cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3],
1195                     cp->cmnd[4], cp->cmnd[5]));
1196                 DEBUG2(printk(KERN_INFO
1197                     "PID=0x%lx req=0x%x xtra=0x%x -- returning DID_ERROR "
1198                     "status!\n",
1199                     cp->serial_number, scsi_bufflen(cp), resid_len));
1200
1201                 cp->result = DID_ERROR << 16;
1202                 break;
1203
1204         case CS_PORT_LOGGED_OUT:
1205         case CS_PORT_CONFIG_CHG:
1206         case CS_PORT_BUSY:
1207         case CS_INCOMPLETE:
1208         case CS_PORT_UNAVAILABLE:
1209                 /*
1210                  * If the port is in Target Down state, return all IOs for this
1211                  * Target with DID_NO_CONNECT ELSE Queue the IOs in the
1212                  * retry_queue.
1213                  */
1214                 DEBUG2(printk("scsi(%ld:%d:%d): status_entry: Port Down "
1215                     "pid=%ld, compl status=0x%x, port state=0x%x\n",
1216                     vha->host_no, cp->device->id, cp->device->lun,
1217                     cp->serial_number, comp_status,
1218                     atomic_read(&fcport->state)));
1219
1220                 /*
1221                  * We are going to have the fc class block the rport
1222                  * while we try to recover so instruct the mid layer
1223                  * to requeue until the class decides how to handle this.
1224                  */
1225                 cp->result = DID_TRANSPORT_DISRUPTED << 16;
1226                 if (atomic_read(&fcport->state) == FCS_ONLINE)
1227                         qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
1228                 break;
1229
1230         case CS_RESET:
1231                 DEBUG2(printk(KERN_INFO
1232                     "scsi(%ld): RESET status detected 0x%x-0x%x.\n",
1233                     vha->host_no, comp_status, scsi_status));
1234
1235                 cp->result = DID_RESET << 16;
1236                 break;
1237
1238         case CS_ABORTED:
1239                 /*
1240                  * hv2.19.12 - DID_ABORT does not retry the request if we
1241                  * aborted this request then abort otherwise it must be a
1242                  * reset.
1243                  */
1244                 DEBUG2(printk(KERN_INFO
1245                     "scsi(%ld): ABORT status detected 0x%x-0x%x.\n",
1246                     vha->host_no, comp_status, scsi_status));
1247
1248                 cp->result = DID_RESET << 16;
1249                 break;
1250
1251         case CS_TIMEOUT:
1252                 /*
1253                  * We are going to have the fc class block the rport
1254                  * while we try to recover so instruct the mid layer
1255                  * to requeue until the class decides how to handle this.
1256                  */
1257                 cp->result = DID_TRANSPORT_DISRUPTED << 16;
1258
1259                 if (IS_FWI2_CAPABLE(ha)) {
1260                         DEBUG2(printk(KERN_INFO
1261                             "scsi(%ld:%d:%d:%d): TIMEOUT status detected "
1262                             "0x%x-0x%x\n", vha->host_no, cp->device->channel,
1263                             cp->device->id, cp->device->lun, comp_status,
1264                             scsi_status));
1265                         break;
1266                 }
1267                 DEBUG2(printk(KERN_INFO
1268                     "scsi(%ld:%d:%d:%d): TIMEOUT status detected 0x%x-0x%x "
1269                     "sflags=%x.\n", vha->host_no, cp->device->channel,
1270                     cp->device->id, cp->device->lun, comp_status, scsi_status,
1271                     le16_to_cpu(sts->status_flags)));
1272
1273                 /* Check to see if logout occurred. */
1274                 if ((le16_to_cpu(sts->status_flags) & SF_LOGOUT_SENT))
1275                         qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
1276                 break;
1277
1278         default:
1279                 DEBUG3(printk("scsi(%ld): Error detected (unknown status) "
1280                     "0x%x-0x%x.\n", vha->host_no, comp_status, scsi_status));
1281                 qla_printk(KERN_INFO, ha,
1282                     "Unknown status detected 0x%x-0x%x.\n",
1283                     comp_status, scsi_status);
1284
1285                 cp->result = DID_ERROR << 16;
1286                 break;
1287         }
1288
1289         /* Place command on done queue. */
1290         if (vha->status_srb == NULL)
1291                 qla2x00_sp_compl(ha, sp);
1292 }
1293
1294 /**
1295  * qla2x00_status_cont_entry() - Process a Status Continuations entry.
1296  * @ha: SCSI driver HA context
1297  * @pkt: Entry pointer
1298  *
1299  * Extended sense data.
1300  */
1301 static void
1302 qla2x00_status_cont_entry(scsi_qla_host_t *vha, sts_cont_entry_t *pkt)
1303 {
1304         uint8_t         sense_sz = 0;
1305         struct qla_hw_data *ha = vha->hw;
1306         srb_t           *sp = vha->status_srb;
1307         struct scsi_cmnd *cp;
1308
1309         if (sp != NULL && sp->request_sense_length != 0) {
1310                 cp = sp->cmd;
1311                 if (cp == NULL) {
1312                         DEBUG2(printk("%s(): Cmd already returned back to OS "
1313                             "sp=%p.\n", __func__, sp));
1314                         qla_printk(KERN_INFO, ha,
1315                             "cmd is NULL: already returned to OS (sp=%p)\n",
1316                             sp);
1317
1318                         vha->status_srb = NULL;
1319                         return;
1320                 }
1321
1322                 if (sp->request_sense_length > sizeof(pkt->data)) {
1323                         sense_sz = sizeof(pkt->data);
1324                 } else {
1325                         sense_sz = sp->request_sense_length;
1326                 }
1327
1328                 /* Move sense data. */
1329                 if (IS_FWI2_CAPABLE(ha))
1330                         host_to_fcp_swap(pkt->data, sizeof(pkt->data));
1331                 memcpy(sp->request_sense_ptr, pkt->data, sense_sz);
1332                 DEBUG5(qla2x00_dump_buffer(sp->request_sense_ptr, sense_sz));
1333
1334                 sp->request_sense_ptr += sense_sz;
1335                 sp->request_sense_length -= sense_sz;
1336
1337                 /* Place command on done queue. */
1338                 if (sp->request_sense_length == 0) {
1339                         vha->status_srb = NULL;
1340                         qla2x00_sp_compl(ha, sp);
1341                 }
1342         }
1343 }
1344
1345 /**
1346  * qla2x00_error_entry() - Process an error entry.
1347  * @ha: SCSI driver HA context
1348  * @pkt: Entry pointer
1349  */
1350 static void
1351 qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
1352 {
1353         srb_t *sp;
1354         struct qla_hw_data *ha = vha->hw;
1355         struct req_que *req = rsp->req;
1356 #if defined(QL_DEBUG_LEVEL_2)
1357         if (pkt->entry_status & RF_INV_E_ORDER)
1358                 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__);
1359         else if (pkt->entry_status & RF_INV_E_COUNT)
1360                 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Count\n", __func__);
1361         else if (pkt->entry_status & RF_INV_E_PARAM)
1362                 qla_printk(KERN_ERR, ha,
1363                     "%s: Invalid Entry Parameter\n", __func__);
1364         else if (pkt->entry_status & RF_INV_E_TYPE)
1365                 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Type\n", __func__);
1366         else if (pkt->entry_status & RF_BUSY)
1367                 qla_printk(KERN_ERR, ha, "%s: Busy\n", __func__);
1368         else
1369                 qla_printk(KERN_ERR, ha, "%s: UNKNOWN flag error\n", __func__);
1370 #endif
1371
1372         /* Validate handle. */
1373         if (pkt->handle < MAX_OUTSTANDING_COMMANDS)
1374                 sp = req->outstanding_cmds[pkt->handle];
1375         else
1376                 sp = NULL;
1377
1378         if (sp) {
1379                 /* Free outstanding command slot. */
1380                 req->outstanding_cmds[pkt->handle] = NULL;
1381
1382                 /* Bad payload or header */
1383                 if (pkt->entry_status &
1384                     (RF_INV_E_ORDER | RF_INV_E_COUNT |
1385                      RF_INV_E_PARAM | RF_INV_E_TYPE)) {
1386                         sp->cmd->result = DID_ERROR << 16;
1387                 } else if (pkt->entry_status & RF_BUSY) {
1388                         sp->cmd->result = DID_BUS_BUSY << 16;
1389                 } else {
1390                         sp->cmd->result = DID_ERROR << 16;
1391                 }
1392                 qla2x00_sp_compl(ha, sp);
1393
1394         } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type ==
1395             COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7) {
1396                 DEBUG2(printk("scsi(%ld): Error entry - invalid handle\n",
1397                     vha->host_no));
1398                 qla_printk(KERN_WARNING, ha,
1399                     "Error entry - invalid handle\n");
1400
1401                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1402                 qla2xxx_wake_dpc(vha);
1403         }
1404 }
1405
1406 /**
1407  * qla24xx_mbx_completion() - Process mailbox command completions.
1408  * @ha: SCSI driver HA context
1409  * @mb0: Mailbox0 register
1410  */
1411 static void
1412 qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
1413 {
1414         uint16_t        cnt;
1415         uint16_t __iomem *wptr;
1416         struct qla_hw_data *ha = vha->hw;
1417         struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1418
1419         /* Load return mailbox registers. */
1420         ha->flags.mbox_int = 1;
1421         ha->mailbox_out[0] = mb0;
1422         wptr = (uint16_t __iomem *)&reg->mailbox1;
1423
1424         for (cnt = 1; cnt < ha->mbx_count; cnt++) {
1425                 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
1426                 wptr++;
1427         }
1428
1429         if (ha->mcp) {
1430                 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
1431                     __func__, vha->host_no, ha->mcp->mb[0]));
1432         } else {
1433                 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
1434                     __func__, vha->host_no));
1435         }
1436 }
1437
1438 /**
1439  * qla24xx_process_response_queue() - Process response queue entries.
1440  * @ha: SCSI driver HA context
1441  */
1442 void
1443 qla24xx_process_response_queue(struct rsp_que *rsp)
1444 {
1445         struct qla_hw_data *ha = rsp->hw;
1446         struct sts_entry_24xx *pkt;
1447         struct scsi_qla_host *vha;
1448
1449         vha = qla2x00_get_rsp_host(rsp);
1450
1451         if (!vha->flags.online)
1452                 return;
1453
1454         while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
1455                 pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
1456
1457                 rsp->ring_index++;
1458                 if (rsp->ring_index == rsp->length) {
1459                         rsp->ring_index = 0;
1460                         rsp->ring_ptr = rsp->ring;
1461                 } else {
1462                         rsp->ring_ptr++;
1463                 }
1464
1465                 if (pkt->entry_status != 0) {
1466                         DEBUG3(printk(KERN_INFO
1467                             "scsi(%ld): Process error entry.\n", vha->host_no));
1468
1469                         qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
1470                         ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1471                         wmb();
1472                         continue;
1473                 }
1474
1475                 switch (pkt->entry_type) {
1476                 case STATUS_TYPE:
1477                         qla2x00_status_entry(vha, rsp, pkt);
1478                         break;
1479                 case STATUS_CONT_TYPE:
1480                         qla2x00_status_cont_entry(vha, (sts_cont_entry_t *)pkt);
1481                         break;
1482                 case VP_RPT_ID_IOCB_TYPE:
1483                         qla24xx_report_id_acquisition(vha,
1484                             (struct vp_rpt_id_entry_24xx *)pkt);
1485                         break;
1486                 default:
1487                         /* Type Not Supported. */
1488                         DEBUG4(printk(KERN_WARNING
1489                             "scsi(%ld): Received unknown response pkt type %x "
1490                             "entry status=%x.\n",
1491                             vha->host_no, pkt->entry_type, pkt->entry_status));
1492                         break;
1493                 }
1494                 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1495                 wmb();
1496         }
1497
1498         /* Adjust ring index */
1499         ha->isp_ops->wrt_rsp_reg(ha, rsp->id, rsp->ring_index);
1500 }
1501
1502 static void
1503 qla2xxx_check_risc_status(scsi_qla_host_t *vha)
1504 {
1505         int rval;
1506         uint32_t cnt;
1507         struct qla_hw_data *ha = vha->hw;
1508         struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1509
1510         if (!IS_QLA25XX(ha))
1511                 return;
1512
1513         rval = QLA_SUCCESS;
1514         WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
1515         RD_REG_DWORD(&reg->iobase_addr);
1516         WRT_REG_DWORD(&reg->iobase_window, 0x0001);
1517         for (cnt = 10000; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
1518             rval == QLA_SUCCESS; cnt--) {
1519                 if (cnt) {
1520                         WRT_REG_DWORD(&reg->iobase_window, 0x0001);
1521                         udelay(10);
1522                 } else
1523                         rval = QLA_FUNCTION_TIMEOUT;
1524         }
1525         if (rval == QLA_SUCCESS)
1526                 goto next_test;
1527
1528         WRT_REG_DWORD(&reg->iobase_window, 0x0003);
1529         for (cnt = 100; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
1530             rval == QLA_SUCCESS; cnt--) {
1531                 if (cnt) {
1532                         WRT_REG_DWORD(&reg->iobase_window, 0x0003);
1533                         udelay(10);
1534                 } else
1535                         rval = QLA_FUNCTION_TIMEOUT;
1536         }
1537         if (rval != QLA_SUCCESS)
1538                 goto done;
1539
1540 next_test:
1541         if (RD_REG_DWORD(&reg->iobase_c8) & BIT_3)
1542                 qla_printk(KERN_INFO, ha, "Additional code -- 0x55AA.\n");
1543
1544 done:
1545         WRT_REG_DWORD(&reg->iobase_window, 0x0000);
1546         RD_REG_DWORD(&reg->iobase_window);
1547 }
1548
1549 /**
1550  * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
1551  * @irq:
1552  * @dev_id: SCSI driver HA context
1553  *
1554  * Called by system whenever the host adapter generates an interrupt.
1555  *
1556  * Returns handled flag.
1557  */
1558 irqreturn_t
1559 qla24xx_intr_handler(int irq, void *dev_id)
1560 {
1561         scsi_qla_host_t *vha;
1562         struct qla_hw_data *ha;
1563         struct device_reg_24xx __iomem *reg;
1564         int             status;
1565         unsigned long   iter;
1566         uint32_t        stat;
1567         uint32_t        hccr;
1568         uint16_t        mb[4];
1569         struct rsp_que *rsp;
1570
1571         rsp = (struct rsp_que *) dev_id;
1572         if (!rsp) {
1573                 printk(KERN_INFO
1574                     "%s(): NULL response queue pointer\n", __func__);
1575                 return IRQ_NONE;
1576         }
1577
1578         ha = rsp->hw;
1579         reg = &ha->iobase->isp24;
1580         status = 0;
1581
1582         spin_lock(&ha->hardware_lock);
1583         vha = qla2x00_get_rsp_host(rsp);
1584         for (iter = 50; iter--; ) {
1585                 stat = RD_REG_DWORD(&reg->host_status);
1586                 if (stat & HSRX_RISC_PAUSED) {
1587                         if (pci_channel_offline(ha->pdev))
1588                                 break;
1589
1590                         hccr = RD_REG_DWORD(&reg->hccr);
1591
1592                         qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
1593                             "Dumping firmware!\n", hccr);
1594
1595                         qla2xxx_check_risc_status(vha);
1596
1597                         ha->isp_ops->fw_dump(vha, 1);
1598                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1599                         break;
1600                 } else if ((stat & HSRX_RISC_INT) == 0)
1601                         break;
1602
1603                 switch (stat & 0xff) {
1604                 case 0x1:
1605                 case 0x2:
1606                 case 0x10:
1607                 case 0x11:
1608                         qla24xx_mbx_completion(vha, MSW(stat));
1609                         status |= MBX_INTERRUPT;
1610
1611                         break;
1612                 case 0x12:
1613                         mb[0] = MSW(stat);
1614                         mb[1] = RD_REG_WORD(&reg->mailbox1);
1615                         mb[2] = RD_REG_WORD(&reg->mailbox2);
1616                         mb[3] = RD_REG_WORD(&reg->mailbox3);
1617                         qla2x00_async_event(vha, rsp, mb);
1618                         break;
1619                 case 0x13:
1620                 case 0x14:
1621                         qla24xx_process_response_queue(rsp);
1622                         break;
1623                 default:
1624                         DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
1625                             "(%d).\n",
1626                             vha->host_no, stat & 0xff));
1627                         break;
1628                 }
1629                 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1630                 RD_REG_DWORD_RELAXED(&reg->hccr);
1631         }
1632         spin_unlock(&ha->hardware_lock);
1633
1634         if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
1635             (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
1636                 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
1637                 complete(&ha->mbx_intr_comp);
1638         }
1639
1640         return IRQ_HANDLED;
1641 }
1642
1643 static irqreturn_t
1644 qla24xx_msix_rsp_q(int irq, void *dev_id)
1645 {
1646         struct qla_hw_data *ha;
1647         struct rsp_que *rsp;
1648         struct device_reg_24xx __iomem *reg;
1649
1650         rsp = (struct rsp_que *) dev_id;
1651         if (!rsp) {
1652                 printk(KERN_INFO
1653                 "%s(): NULL response queue pointer\n", __func__);
1654                 return IRQ_NONE;
1655         }
1656         ha = rsp->hw;
1657         reg = &ha->iobase->isp24;
1658
1659         spin_lock_irq(&ha->hardware_lock);
1660
1661         qla24xx_process_response_queue(rsp);
1662         WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1663
1664         spin_unlock_irq(&ha->hardware_lock);
1665
1666         return IRQ_HANDLED;
1667 }
1668
1669 static irqreturn_t
1670 qla25xx_msix_rsp_q(int irq, void *dev_id)
1671 {
1672         struct qla_hw_data *ha;
1673         struct rsp_que *rsp;
1674         struct device_reg_24xx __iomem *reg;
1675         uint16_t msix_disabled_hccr = 0;
1676
1677         rsp = (struct rsp_que *) dev_id;
1678         if (!rsp) {
1679                 printk(KERN_INFO
1680                         "%s(): NULL response queue pointer\n", __func__);
1681                 return IRQ_NONE;
1682         }
1683         ha = rsp->hw;
1684         reg = &ha->iobase->isp24;
1685
1686         spin_lock_irq(&ha->hardware_lock);
1687
1688         msix_disabled_hccr = rsp->options;
1689         if (!rsp->id)
1690                 msix_disabled_hccr &= __constant_cpu_to_le32(BIT_22);
1691         else
1692                 msix_disabled_hccr &= __constant_cpu_to_le32(BIT_6);
1693
1694         qla24xx_process_response_queue(rsp);
1695
1696         if (!msix_disabled_hccr)
1697                 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1698
1699         spin_unlock_irq(&ha->hardware_lock);
1700
1701         return IRQ_HANDLED;
1702 }
1703
1704 static irqreturn_t
1705 qla24xx_msix_default(int irq, void *dev_id)
1706 {
1707         scsi_qla_host_t *vha;
1708         struct qla_hw_data *ha;
1709         struct rsp_que *rsp;
1710         struct device_reg_24xx __iomem *reg;
1711         int             status;
1712         uint32_t        stat;
1713         uint32_t        hccr;
1714         uint16_t        mb[4];
1715
1716         rsp = (struct rsp_que *) dev_id;
1717         if (!rsp) {
1718                 DEBUG(printk(
1719                 "%s(): NULL response queue pointer\n", __func__));
1720                 return IRQ_NONE;
1721         }
1722         ha = rsp->hw;
1723         reg = &ha->iobase->isp24;
1724         status = 0;
1725
1726         spin_lock_irq(&ha->hardware_lock);
1727         vha = qla2x00_get_rsp_host(rsp);
1728         do {
1729                 stat = RD_REG_DWORD(&reg->host_status);
1730                 if (stat & HSRX_RISC_PAUSED) {
1731                         if (pci_channel_offline(ha->pdev))
1732                                 break;
1733
1734                         hccr = RD_REG_DWORD(&reg->hccr);
1735
1736                         qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
1737                             "Dumping firmware!\n", hccr);
1738
1739                         qla2xxx_check_risc_status(vha);
1740
1741                         ha->isp_ops->fw_dump(vha, 1);
1742                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1743                         break;
1744                 } else if ((stat & HSRX_RISC_INT) == 0)
1745                         break;
1746
1747                 switch (stat & 0xff) {
1748                 case 0x1:
1749                 case 0x2:
1750                 case 0x10:
1751                 case 0x11:
1752                         qla24xx_mbx_completion(vha, MSW(stat));
1753                         status |= MBX_INTERRUPT;
1754
1755                         break;
1756                 case 0x12:
1757                         mb[0] = MSW(stat);
1758                         mb[1] = RD_REG_WORD(&reg->mailbox1);
1759                         mb[2] = RD_REG_WORD(&reg->mailbox2);
1760                         mb[3] = RD_REG_WORD(&reg->mailbox3);
1761                         qla2x00_async_event(vha, rsp, mb);
1762                         break;
1763                 case 0x13:
1764                 case 0x14:
1765                         qla24xx_process_response_queue(rsp);
1766                         break;
1767                 default:
1768                         DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
1769                             "(%d).\n",
1770                             vha->host_no, stat & 0xff));
1771                         break;
1772                 }
1773                 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1774         } while (0);
1775         spin_unlock_irq(&ha->hardware_lock);
1776
1777         if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
1778             (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
1779                 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
1780                 complete(&ha->mbx_intr_comp);
1781         }
1782
1783         return IRQ_HANDLED;
1784 }
1785
1786 /* Interrupt handling helpers. */
1787
1788 struct qla_init_msix_entry {
1789         uint16_t entry;
1790         uint16_t index;
1791         const char *name;
1792         irq_handler_t handler;
1793 };
1794
1795 static struct qla_init_msix_entry base_queue = {
1796         .entry = 0,
1797         .index = 0,
1798         .name = "qla2xxx (default)",
1799         .handler = qla24xx_msix_default,
1800 };
1801
1802 static struct qla_init_msix_entry base_rsp_queue = {
1803         .entry = 1,
1804         .index = 1,
1805         .name = "qla2xxx (rsp_q)",
1806         .handler = qla24xx_msix_rsp_q,
1807 };
1808
1809 static struct qla_init_msix_entry multi_rsp_queue = {
1810         .entry = 1,
1811         .index = 1,
1812         .name = "qla2xxx (multi_q)",
1813         .handler = qla25xx_msix_rsp_q,
1814 };
1815
1816 static void
1817 qla24xx_disable_msix(struct qla_hw_data *ha)
1818 {
1819         int i;
1820         struct qla_msix_entry *qentry;
1821
1822         for (i = 0; i < ha->msix_count; i++) {
1823                 qentry = &ha->msix_entries[i];
1824                 if (qentry->have_irq)
1825                         free_irq(qentry->vector, qentry->rsp);
1826         }
1827         pci_disable_msix(ha->pdev);
1828         kfree(ha->msix_entries);
1829         ha->msix_entries = NULL;
1830         ha->flags.msix_enabled = 0;
1831 }
1832
1833 static int
1834 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
1835 {
1836         int i, ret;
1837         struct msix_entry *entries;
1838         struct qla_msix_entry *qentry;
1839         struct qla_init_msix_entry *msix_queue;
1840
1841         entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
1842                                         GFP_KERNEL);
1843         if (!entries)
1844                 return -ENOMEM;
1845
1846         for (i = 0; i < ha->msix_count; i++)
1847                 entries[i].entry = i;
1848
1849         ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
1850         if (ret) {
1851                 qla_printk(KERN_WARNING, ha,
1852                         "MSI-X: Failed to enable support -- %d/%d\n"
1853                         " Retry with %d vectors\n", ha->msix_count, ret, ret);
1854                 ha->msix_count = ret;
1855                 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
1856                 if (ret) {
1857                         qla_printk(KERN_WARNING, ha, "MSI-X: Failed to enable"
1858                                 " support, giving up -- %d/%d\n",
1859                                 ha->msix_count, ret);
1860                         goto msix_out;
1861                 }
1862                 ha->max_queues = ha->msix_count - 1;
1863         }
1864         ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
1865                                 ha->msix_count, GFP_KERNEL);
1866         if (!ha->msix_entries) {
1867                 ret = -ENOMEM;
1868                 goto msix_out;
1869         }
1870         ha->flags.msix_enabled = 1;
1871
1872         for (i = 0; i < ha->msix_count; i++) {
1873                 qentry = &ha->msix_entries[i];
1874                 qentry->vector = entries[i].vector;
1875                 qentry->entry = entries[i].entry;
1876                 qentry->have_irq = 0;
1877                 qentry->rsp = NULL;
1878         }
1879
1880         /* Enable MSI-X for AENs for queue 0 */
1881         qentry = &ha->msix_entries[0];
1882         ret = request_irq(qentry->vector, base_queue.handler, 0,
1883                                         base_queue.name, rsp);
1884         if (ret) {
1885                 qla_printk(KERN_WARNING, ha,
1886                         "MSI-X: Unable to register handler -- %x/%d.\n",
1887                         qentry->vector, ret);
1888                 qla24xx_disable_msix(ha);
1889                 goto msix_out;
1890         }
1891         qentry->have_irq = 1;
1892         qentry->rsp = rsp;
1893
1894         /* Enable MSI-X vector for response queue update for queue 0 */
1895         if (ha->max_queues > 1 && ha->mqiobase) {
1896                 ha->mqenable = 1;
1897                 msix_queue = &multi_rsp_queue;
1898                 qla_printk(KERN_INFO, ha,
1899                                 "MQ enabled, Number of Queue Resources: %d \n",
1900                                 ha->max_queues);
1901         } else {
1902                 ha->mqenable = 0;
1903                 msix_queue = &base_rsp_queue;
1904         }
1905
1906         qentry = &ha->msix_entries[1];
1907         ret = request_irq(qentry->vector, msix_queue->handler, 0,
1908                                                 msix_queue->name, rsp);
1909         if (ret) {
1910                 qla_printk(KERN_WARNING, ha,
1911                         "MSI-X: Unable to register handler -- %x/%d.\n",
1912                         qentry->vector, ret);
1913                 qla24xx_disable_msix(ha);
1914                 ha->mqenable = 0;
1915                 goto msix_out;
1916         }
1917         qentry->have_irq = 1;
1918         qentry->rsp = rsp;
1919
1920 msix_out:
1921         kfree(entries);
1922         return ret;
1923 }
1924
1925 int
1926 qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
1927 {
1928         int ret;
1929         device_reg_t __iomem *reg = ha->iobase;
1930
1931         /* If possible, enable MSI-X. */
1932         if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha))
1933                 goto skip_msix;
1934
1935         if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX ||
1936                 !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) {
1937                 DEBUG2(qla_printk(KERN_WARNING, ha,
1938                 "MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n",
1939                         ha->pdev->revision, ha->fw_attributes));
1940
1941                 goto skip_msix;
1942         }
1943
1944         if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
1945             (ha->pdev->subsystem_device == 0x7040 ||
1946                 ha->pdev->subsystem_device == 0x7041 ||
1947                 ha->pdev->subsystem_device == 0x1705)) {
1948                 DEBUG2(qla_printk(KERN_WARNING, ha,
1949                     "MSI-X: Unsupported ISP2432 SSVID/SSDID (0x%X, 0x%X).\n",
1950                     ha->pdev->subsystem_vendor,
1951                     ha->pdev->subsystem_device));
1952
1953                 goto skip_msi;
1954         }
1955
1956         ret = qla24xx_enable_msix(ha, rsp);
1957         if (!ret) {
1958                 DEBUG2(qla_printk(KERN_INFO, ha,
1959                     "MSI-X: Enabled (0x%X, 0x%X).\n", ha->chip_revision,
1960                     ha->fw_attributes));
1961                 goto clear_risc_ints;
1962         }
1963         qla_printk(KERN_WARNING, ha,
1964             "MSI-X: Falling back-to INTa mode -- %d.\n", ret);
1965 skip_msix:
1966
1967         if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha))
1968                 goto skip_msi;
1969
1970         ret = pci_enable_msi(ha->pdev);
1971         if (!ret) {
1972                 DEBUG2(qla_printk(KERN_INFO, ha, "MSI: Enabled.\n"));
1973                 ha->flags.msi_enabled = 1;
1974         }
1975 skip_msi:
1976
1977         ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
1978             IRQF_DISABLED|IRQF_SHARED, QLA2XXX_DRIVER_NAME, rsp);
1979         if (ret) {
1980                 qla_printk(KERN_WARNING, ha,
1981                     "Failed to reserve interrupt %d already in use.\n",
1982                     ha->pdev->irq);
1983                 goto fail;
1984         }
1985         ha->flags.inta_enabled = 1;
1986 clear_risc_ints:
1987
1988         spin_lock_irq(&ha->hardware_lock);
1989         if (IS_FWI2_CAPABLE(ha)) {
1990                 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT);
1991                 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_RISC_INT);
1992         } else {
1993                 WRT_REG_WORD(&reg->isp.semaphore, 0);
1994                 WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_RISC_INT);
1995                 WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_HOST_INT);
1996         }
1997         spin_unlock_irq(&ha->hardware_lock);
1998
1999 fail:
2000         return ret;
2001 }
2002
2003 void
2004 qla2x00_free_irqs(scsi_qla_host_t *vha)
2005 {
2006         struct qla_hw_data *ha = vha->hw;
2007         struct rsp_que *rsp = ha->rsp_q_map[0];
2008
2009         if (ha->flags.msix_enabled)
2010                 qla24xx_disable_msix(ha);
2011         else if (ha->flags.inta_enabled) {
2012                 free_irq(ha->pdev->irq, rsp);
2013                 pci_disable_msi(ha->pdev);
2014         }
2015 }
2016
2017 static struct scsi_qla_host *
2018 qla2x00_get_rsp_host(struct rsp_que *rsp)
2019 {
2020         srb_t *sp;
2021         struct qla_hw_data *ha = rsp->hw;
2022         struct scsi_qla_host *vha = NULL;
2023         struct sts_entry_24xx *pkt;
2024         struct req_que *req;
2025
2026         if (rsp->id) {
2027                 pkt = (struct sts_entry_24xx *) rsp->ring_ptr;
2028                 req = rsp->req;
2029                 if (pkt && pkt->handle < MAX_OUTSTANDING_COMMANDS) {
2030                         sp = req->outstanding_cmds[pkt->handle];
2031                         if (sp)
2032                                 vha = sp->vha;
2033                 }
2034         }
2035         if (!vha)
2036         /* handle it in base queue */
2037                 vha = pci_get_drvdata(ha->pdev);
2038
2039         return vha;
2040 }
2041
2042 int qla25xx_request_irq(struct rsp_que *rsp)
2043 {
2044         struct qla_hw_data *ha = rsp->hw;
2045         struct qla_init_msix_entry *intr = &multi_rsp_queue;
2046         struct qla_msix_entry *msix = rsp->msix;
2047         int ret;
2048
2049         ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp);
2050         if (ret) {
2051                 qla_printk(KERN_WARNING, ha,
2052                         "MSI-X: Unable to register handler -- %x/%d.\n",
2053                         msix->vector, ret);
2054                 return ret;
2055         }
2056         msix->have_irq = 1;
2057         msix->rsp = rsp;
2058         return ret;
2059 }
2060
2061 void
2062 qla25xx_wrt_rsp_reg(struct qla_hw_data *ha, uint16_t id, uint16_t index)
2063 {
2064         device_reg_t __iomem *reg = (void *) ha->mqiobase + QLA_QUE_PAGE * id;
2065         WRT_REG_DWORD(&reg->isp25mq.rsp_q_out, index);
2066 }
2067
2068 void
2069 qla24xx_wrt_rsp_reg(struct qla_hw_data *ha, uint16_t id, uint16_t index)
2070 {
2071         device_reg_t __iomem *reg = (void *) ha->iobase;
2072         WRT_REG_DWORD(&reg->isp24.rsp_q_out, index);
2073 }
2074