2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
9 #include <linux/delay.h>
10 #include <scsi/scsi_tcq.h>
12 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
13 static void qla2x00_process_completed_request(struct scsi_qla_host *, uint32_t);
14 static void qla2x00_status_entry(scsi_qla_host_t *, void *);
15 static void qla2x00_status_cont_entry(scsi_qla_host_t *, sts_cont_entry_t *);
16 static void qla2x00_error_entry(scsi_qla_host_t *, sts_entry_t *);
17 static void qla2x00_ms_entry(scsi_qla_host_t *, ms_iocb_entry_t *);
19 static void qla24xx_ms_entry(scsi_qla_host_t *, struct ct_entry_24xx *);
22 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
24 * @dev_id: SCSI driver HA context
26 * Called by system whenever the host adapter generates an interrupt.
28 * Returns handled flag.
31 qla2100_intr_handler(int irq, void *dev_id)
34 struct device_reg_2xxx __iomem *reg;
40 ha = (scsi_qla_host_t *) dev_id;
43 "%s(): NULL host pointer\n", __func__);
47 reg = &ha->iobase->isp;
50 spin_lock(&ha->hardware_lock);
51 for (iter = 50; iter--; ) {
52 hccr = RD_REG_WORD(®->hccr);
53 if (hccr & HCCR_RISC_PAUSE) {
54 if (pci_channel_offline(ha->pdev))
58 * Issue a "HARD" reset in order for the RISC interrupt
59 * bit to be cleared. Schedule a big hammmer to get
60 * out of the RISC PAUSED state.
62 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC);
63 RD_REG_WORD(®->hccr);
65 ha->isp_ops->fw_dump(ha, 1);
66 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
68 } else if ((RD_REG_WORD(®->istatus) & ISR_RISC_INT) == 0)
71 if (RD_REG_WORD(®->semaphore) & BIT_0) {
72 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
73 RD_REG_WORD(®->hccr);
75 /* Get mailbox data. */
76 mb[0] = RD_MAILBOX_REG(ha, reg, 0);
77 if (mb[0] > 0x3fff && mb[0] < 0x8000) {
78 qla2x00_mbx_completion(ha, mb[0]);
79 status |= MBX_INTERRUPT;
80 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
81 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
82 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
83 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
84 qla2x00_async_event(ha, mb);
87 DEBUG2(printk("scsi(%ld): Unrecognized "
88 "interrupt type (%d).\n",
91 /* Release mailbox registers. */
92 WRT_REG_WORD(®->semaphore, 0);
93 RD_REG_WORD(®->semaphore);
95 qla2x00_process_response_queue(ha);
97 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
98 RD_REG_WORD(®->hccr);
101 spin_unlock(&ha->hardware_lock);
103 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
104 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
105 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
106 complete(&ha->mbx_intr_comp);
109 return (IRQ_HANDLED);
113 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
115 * @dev_id: SCSI driver HA context
117 * Called by system whenever the host adapter generates an interrupt.
119 * Returns handled flag.
122 qla2300_intr_handler(int irq, void *dev_id)
125 struct device_reg_2xxx __iomem *reg;
132 ha = (scsi_qla_host_t *) dev_id;
135 "%s(): NULL host pointer\n", __func__);
139 reg = &ha->iobase->isp;
142 spin_lock(&ha->hardware_lock);
143 for (iter = 50; iter--; ) {
144 stat = RD_REG_DWORD(®->u.isp2300.host_status);
145 if (stat & HSR_RISC_PAUSED) {
146 if (pci_channel_offline(ha->pdev))
149 hccr = RD_REG_WORD(®->hccr);
150 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
151 qla_printk(KERN_INFO, ha, "Parity error -- "
152 "HCCR=%x, Dumping firmware!\n", hccr);
154 qla_printk(KERN_INFO, ha, "RISC paused -- "
155 "HCCR=%x, Dumping firmware!\n", hccr);
158 * Issue a "HARD" reset in order for the RISC
159 * interrupt bit to be cleared. Schedule a big
160 * hammmer to get out of the RISC PAUSED state.
162 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC);
163 RD_REG_WORD(®->hccr);
165 ha->isp_ops->fw_dump(ha, 1);
166 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
168 } else if ((stat & HSR_RISC_INT) == 0)
171 switch (stat & 0xff) {
176 qla2x00_mbx_completion(ha, MSW(stat));
177 status |= MBX_INTERRUPT;
179 /* Release mailbox registers. */
180 WRT_REG_WORD(®->semaphore, 0);
184 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
185 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
186 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
187 qla2x00_async_event(ha, mb);
190 qla2x00_process_response_queue(ha);
193 mb[0] = MBA_CMPLT_1_16BIT;
195 qla2x00_async_event(ha, mb);
198 mb[0] = MBA_SCSI_COMPLETION;
200 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
201 qla2x00_async_event(ha, mb);
204 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
206 ha->host_no, stat & 0xff));
209 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
210 RD_REG_WORD_RELAXED(®->hccr);
212 spin_unlock(&ha->hardware_lock);
214 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
215 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
216 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
217 complete(&ha->mbx_intr_comp);
220 return (IRQ_HANDLED);
224 * qla2x00_mbx_completion() - Process mailbox command completions.
225 * @ha: SCSI driver HA context
226 * @mb0: Mailbox0 register
229 qla2x00_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0)
232 uint16_t __iomem *wptr;
233 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
235 /* Load return mailbox registers. */
236 ha->flags.mbox_int = 1;
237 ha->mailbox_out[0] = mb0;
238 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
240 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
241 if (IS_QLA2200(ha) && cnt == 8)
242 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
243 if (cnt == 4 || cnt == 5)
244 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
246 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
252 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
253 __func__, ha->host_no, ha->mcp->mb[0]));
255 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
256 __func__, ha->host_no));
261 * qla2x00_async_event() - Process aynchronous events.
262 * @ha: SCSI driver HA context
263 * @mb: Mailbox registers (0 - 3)
266 qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
269 static char *link_speeds[5] = { "1", "2", "?", "4", "8" };
274 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
275 uint32_t rscn_entry, host_pid;
276 uint8_t rscn_queue_index;
278 /* Setup to process RIO completion. */
281 case MBA_SCSI_COMPLETION:
282 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
285 case MBA_CMPLT_1_16BIT:
288 mb[0] = MBA_SCSI_COMPLETION;
290 case MBA_CMPLT_2_16BIT:
294 mb[0] = MBA_SCSI_COMPLETION;
296 case MBA_CMPLT_3_16BIT:
301 mb[0] = MBA_SCSI_COMPLETION;
303 case MBA_CMPLT_4_16BIT:
307 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
309 mb[0] = MBA_SCSI_COMPLETION;
311 case MBA_CMPLT_5_16BIT:
315 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
316 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
318 mb[0] = MBA_SCSI_COMPLETION;
320 case MBA_CMPLT_2_32BIT:
321 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
322 handles[1] = le32_to_cpu(
323 ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
324 RD_MAILBOX_REG(ha, reg, 6));
326 mb[0] = MBA_SCSI_COMPLETION;
333 case MBA_SCSI_COMPLETION: /* Fast Post */
334 if (!ha->flags.online)
337 for (cnt = 0; cnt < handle_cnt; cnt++)
338 qla2x00_process_completed_request(ha, handles[cnt]);
341 case MBA_RESET: /* Reset */
342 DEBUG2(printk("scsi(%ld): Asynchronous RESET.\n", ha->host_no));
344 set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
347 case MBA_SYSTEM_ERR: /* System Error */
348 qla_printk(KERN_INFO, ha,
349 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n",
350 mb[1], mb[2], mb[3]);
352 qla2x00_post_hwe_work(ha, mb[0], mb[1], mb[2], mb[3]);
353 ha->isp_ops->fw_dump(ha, 1);
355 if (IS_FWI2_CAPABLE(ha)) {
356 if (mb[1] == 0 && mb[2] == 0) {
357 qla_printk(KERN_ERR, ha,
358 "Unrecoverable Hardware Error: adapter "
359 "marked OFFLINE!\n");
360 ha->flags.online = 0;
362 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
363 } else if (mb[1] == 0) {
364 qla_printk(KERN_INFO, ha,
365 "Unrecoverable Hardware Error: adapter marked "
367 ha->flags.online = 0;
369 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
372 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
373 DEBUG2(printk("scsi(%ld): ISP Request Transfer Error.\n",
375 qla_printk(KERN_WARNING, ha, "ISP Request Transfer Error.\n");
377 qla2x00_post_hwe_work(ha, mb[0], mb[1], mb[2], mb[3]);
378 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
381 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
382 DEBUG2(printk("scsi(%ld): ISP Response Transfer Error.\n",
384 qla_printk(KERN_WARNING, ha, "ISP Response Transfer Error.\n");
386 qla2x00_post_hwe_work(ha, mb[0], mb[1], mb[2], mb[3]);
387 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
390 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
391 DEBUG2(printk("scsi(%ld): Asynchronous WAKEUP_THRES.\n",
395 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
396 DEBUG2(printk("scsi(%ld): LIP occured (%x).\n", ha->host_no,
398 qla_printk(KERN_INFO, ha, "LIP occured (%x).\n", mb[1]);
400 if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
401 atomic_set(&ha->loop_state, LOOP_DOWN);
402 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
403 qla2x00_mark_all_devices_lost(ha, 1);
407 atomic_set(&ha->vp_state, VP_FAILED);
408 fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED);
411 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
413 ha->flags.management_server_logged_in = 0;
414 qla2x00_post_aen_work(ha, FCH_EVT_LIP, mb[1]);
417 case MBA_LOOP_UP: /* Loop Up Event */
418 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
419 link_speed = link_speeds[0];
420 ha->link_data_rate = PORT_SPEED_1GB;
422 link_speed = link_speeds[LS_UNKNOWN];
424 link_speed = link_speeds[mb[1]];
425 ha->link_data_rate = mb[1];
428 DEBUG2(printk("scsi(%ld): Asynchronous LOOP UP (%s Gbps).\n",
429 ha->host_no, link_speed));
430 qla_printk(KERN_INFO, ha, "LOOP UP detected (%s Gbps).\n",
433 ha->flags.management_server_logged_in = 0;
434 qla2x00_post_aen_work(ha, FCH_EVT_LINKUP, ha->link_data_rate);
437 case MBA_LOOP_DOWN: /* Loop Down Event */
438 DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN (%x).\n",
439 ha->host_no, mb[1]));
440 qla_printk(KERN_INFO, ha, "LOOP DOWN detected (%x).\n", mb[1]);
442 if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
443 atomic_set(&ha->loop_state, LOOP_DOWN);
444 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
445 ha->device_flags |= DFLG_NO_CABLE;
446 qla2x00_mark_all_devices_lost(ha, 1);
450 atomic_set(&ha->vp_state, VP_FAILED);
451 fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED);
454 ha->flags.management_server_logged_in = 0;
455 ha->link_data_rate = PORT_SPEED_UNKNOWN;
457 set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags);
458 qla2x00_post_aen_work(ha, FCH_EVT_LINKDOWN, 0);
461 case MBA_LIP_RESET: /* LIP reset occurred */
462 DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n",
463 ha->host_no, mb[1]));
464 qla_printk(KERN_INFO, ha,
465 "LIP reset occured (%x).\n", mb[1]);
467 if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
468 atomic_set(&ha->loop_state, LOOP_DOWN);
469 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
470 qla2x00_mark_all_devices_lost(ha, 1);
474 atomic_set(&ha->vp_state, VP_FAILED);
475 fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED);
478 set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
480 ha->operating_mode = LOOP;
481 ha->flags.management_server_logged_in = 0;
482 qla2x00_post_aen_work(ha, FCH_EVT_LIPRESET, mb[1]);
485 case MBA_POINT_TO_POINT: /* Point-to-Point */
489 DEBUG2(printk("scsi(%ld): Asynchronous P2P MODE received.\n",
493 * Until there's a transition from loop down to loop up, treat
494 * this as loop down only.
496 if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
497 atomic_set(&ha->loop_state, LOOP_DOWN);
498 if (!atomic_read(&ha->loop_down_timer))
499 atomic_set(&ha->loop_down_timer,
501 qla2x00_mark_all_devices_lost(ha, 1);
505 atomic_set(&ha->vp_state, VP_FAILED);
506 fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED);
509 if (!(test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags))) {
510 set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
512 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
514 ha->flags.gpsc_supported = 1;
515 ha->flags.management_server_logged_in = 0;
518 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */
522 DEBUG2(printk("scsi(%ld): Asynchronous Change In Connection "
525 qla_printk(KERN_INFO, ha,
526 "Configuration change detected: value=%x.\n", mb[1]);
528 if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
529 atomic_set(&ha->loop_state, LOOP_DOWN);
530 if (!atomic_read(&ha->loop_down_timer))
531 atomic_set(&ha->loop_down_timer,
533 qla2x00_mark_all_devices_lost(ha, 1);
537 atomic_set(&ha->vp_state, VP_FAILED);
538 fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED);
541 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
542 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
545 case MBA_PORT_UPDATE: /* Port database update */
547 * If PORT UPDATE is global (recieved LIP_OCCURED/LIP_RESET
548 * event etc. earlier indicating loop is down) then process
549 * it. Otherwise ignore it and Wait for RSCN to come in.
551 atomic_set(&ha->loop_down_timer, 0);
552 if (atomic_read(&ha->loop_state) != LOOP_DOWN &&
553 atomic_read(&ha->loop_state) != LOOP_DEAD) {
554 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE "
555 "ignored %04x/%04x/%04x.\n", ha->host_no, mb[1],
560 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n",
562 DEBUG(printk(KERN_INFO
563 "scsi(%ld): Port database changed %04x %04x %04x.\n",
564 ha->host_no, mb[1], mb[2], mb[3]));
567 * Mark all devices as missing so we will login again.
569 atomic_set(&ha->loop_state, LOOP_UP);
571 qla2x00_mark_all_devices_lost(ha, 1);
573 ha->flags.rscn_queue_overflow = 1;
575 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
576 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
579 case MBA_RSCN_UPDATE: /* State Change Registration */
580 /* Check if the Vport has issued a SCR */
581 if (ha->parent && test_bit(VP_SCR_NEEDED, &ha->vp_flags))
583 /* Only handle SCNs for our Vport index. */
584 if (ha->flags.npiv_supported && ha->vp_idx != mb[3])
587 DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n",
589 DEBUG(printk(KERN_INFO
590 "scsi(%ld): RSCN database changed -- %04x %04x %04x.\n",
591 ha->host_no, mb[1], mb[2], mb[3]));
593 rscn_entry = (mb[1] << 16) | mb[2];
594 host_pid = (ha->d_id.b.domain << 16) | (ha->d_id.b.area << 8) |
596 if (rscn_entry == host_pid) {
597 DEBUG(printk(KERN_INFO
598 "scsi(%ld): Ignoring RSCN update to local host "
600 ha->host_no, host_pid));
604 rscn_queue_index = ha->rscn_in_ptr + 1;
605 if (rscn_queue_index == MAX_RSCN_COUNT)
606 rscn_queue_index = 0;
607 if (rscn_queue_index != ha->rscn_out_ptr) {
608 ha->rscn_queue[ha->rscn_in_ptr] = rscn_entry;
609 ha->rscn_in_ptr = rscn_queue_index;
611 ha->flags.rscn_queue_overflow = 1;
614 atomic_set(&ha->loop_state, LOOP_UPDATE);
615 atomic_set(&ha->loop_down_timer, 0);
616 ha->flags.management_server_logged_in = 0;
618 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
619 set_bit(RSCN_UPDATE, &ha->dpc_flags);
620 qla2x00_post_aen_work(ha, FCH_EVT_RSCN, rscn_entry);
623 /* case MBA_RIO_RESPONSE: */
624 case MBA_ZIO_RESPONSE:
625 DEBUG2(printk("scsi(%ld): [R|Z]IO update completion.\n",
627 DEBUG(printk(KERN_INFO
628 "scsi(%ld): [R|Z]IO update completion.\n",
631 if (IS_FWI2_CAPABLE(ha))
632 qla24xx_process_response_queue(ha);
634 qla2x00_process_response_queue(ha);
637 case MBA_DISCARD_RND_FRAME:
638 DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x "
639 "%04x.\n", ha->host_no, mb[1], mb[2], mb[3]));
642 case MBA_TRACE_NOTIFICATION:
643 DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n",
644 ha->host_no, mb[1], mb[2]));
648 if (!ha->parent && ha->num_vhosts)
649 qla2x00_alert_all_vps(ha, mb);
653 qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data)
655 fc_port_t *fcport = data;
657 if (fcport->ha->max_q_depth <= sdev->queue_depth)
660 if (sdev->ordered_tags)
661 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG,
662 sdev->queue_depth + 1);
664 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG,
665 sdev->queue_depth + 1);
667 fcport->last_ramp_up = jiffies;
669 DEBUG2(qla_printk(KERN_INFO, fcport->ha,
670 "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n",
671 fcport->ha->host_no, sdev->channel, sdev->id, sdev->lun,
676 qla2x00_adjust_sdev_qdepth_down(struct scsi_device *sdev, void *data)
678 fc_port_t *fcport = data;
680 if (!scsi_track_queue_full(sdev, sdev->queue_depth - 1))
683 DEBUG2(qla_printk(KERN_INFO, fcport->ha,
684 "scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n",
685 fcport->ha->host_no, sdev->channel, sdev->id, sdev->lun,
690 qla2x00_ramp_up_queue_depth(scsi_qla_host_t *ha, srb_t *sp)
693 struct scsi_device *sdev;
695 sdev = sp->cmd->device;
696 if (sdev->queue_depth >= ha->max_q_depth)
700 if (time_before(jiffies,
701 fcport->last_ramp_up + ql2xqfullrampup * HZ))
703 if (time_before(jiffies,
704 fcport->last_queue_full + ql2xqfullrampup * HZ))
707 starget_for_each_device(sdev->sdev_target, fcport,
708 qla2x00_adjust_sdev_qdepth_up);
712 * qla2x00_process_completed_request() - Process a Fast Post response.
713 * @ha: SCSI driver HA context
717 qla2x00_process_completed_request(struct scsi_qla_host *ha, uint32_t index)
721 /* Validate handle. */
722 if (index >= MAX_OUTSTANDING_COMMANDS) {
723 DEBUG2(printk("scsi(%ld): Invalid SCSI completion handle %d.\n",
724 ha->host_no, index));
725 qla_printk(KERN_WARNING, ha,
726 "Invalid SCSI completion handle %d.\n", index);
728 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
732 sp = ha->outstanding_cmds[index];
734 /* Free outstanding command slot. */
735 ha->outstanding_cmds[index] = NULL;
737 CMD_COMPL_STATUS(sp->cmd) = 0L;
738 CMD_SCSI_STATUS(sp->cmd) = 0L;
740 /* Save ISP completion status */
741 sp->cmd->result = DID_OK << 16;
743 qla2x00_ramp_up_queue_depth(ha, sp);
744 qla2x00_sp_compl(ha, sp);
746 DEBUG2(printk("scsi(%ld): Invalid ISP SCSI completion handle\n",
748 qla_printk(KERN_WARNING, ha,
749 "Invalid ISP SCSI completion handle\n");
751 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
756 * qla2x00_process_response_queue() - Process response queue entries.
757 * @ha: SCSI driver HA context
760 qla2x00_process_response_queue(struct scsi_qla_host *ha)
762 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
767 if (!ha->flags.online)
770 while (ha->response_ring_ptr->signature != RESPONSE_PROCESSED) {
771 pkt = (sts_entry_t *)ha->response_ring_ptr;
773 ha->rsp_ring_index++;
774 if (ha->rsp_ring_index == ha->response_q_length) {
775 ha->rsp_ring_index = 0;
776 ha->response_ring_ptr = ha->response_ring;
778 ha->response_ring_ptr++;
781 if (pkt->entry_status != 0) {
782 DEBUG3(printk(KERN_INFO
783 "scsi(%ld): Process error entry.\n", ha->host_no));
785 qla2x00_error_entry(ha, pkt);
786 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
791 switch (pkt->entry_type) {
793 qla2x00_status_entry(ha, pkt);
796 handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
797 for (cnt = 0; cnt < handle_cnt; cnt++) {
798 qla2x00_process_completed_request(ha,
799 ((sts21_entry_t *)pkt)->handle[cnt]);
803 handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
804 for (cnt = 0; cnt < handle_cnt; cnt++) {
805 qla2x00_process_completed_request(ha,
806 ((sts22_entry_t *)pkt)->handle[cnt]);
809 case STATUS_CONT_TYPE:
810 qla2x00_status_cont_entry(ha, (sts_cont_entry_t *)pkt);
813 qla2x00_ms_entry(ha, (ms_iocb_entry_t *)pkt);
816 /* Type Not Supported. */
817 DEBUG4(printk(KERN_WARNING
818 "scsi(%ld): Received unknown response pkt type %x "
819 "entry status=%x.\n",
820 ha->host_no, pkt->entry_type, pkt->entry_status));
823 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
827 /* Adjust ring index */
828 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), ha->rsp_ring_index);
832 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len)
834 struct scsi_cmnd *cp = sp->cmd;
836 if (sense_len >= SCSI_SENSE_BUFFERSIZE)
837 sense_len = SCSI_SENSE_BUFFERSIZE;
839 CMD_ACTUAL_SNSLEN(cp) = sense_len;
840 sp->request_sense_length = sense_len;
841 sp->request_sense_ptr = cp->sense_buffer;
842 if (sp->request_sense_length > 32)
845 memcpy(cp->sense_buffer, sense_data, sense_len);
847 sp->request_sense_ptr += sense_len;
848 sp->request_sense_length -= sense_len;
849 if (sp->request_sense_length != 0)
850 sp->ha->status_srb = sp;
852 DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) "
853 "cmd=%p pid=%ld\n", __func__, sp->ha->host_no, cp->device->channel,
854 cp->device->id, cp->device->lun, cp, cp->serial_number));
856 DEBUG5(qla2x00_dump_buffer(cp->sense_buffer,
857 CMD_ACTUAL_SNSLEN(cp)));
861 * qla2x00_status_entry() - Process a Status IOCB entry.
862 * @ha: SCSI driver HA context
863 * @pkt: Entry pointer
866 qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
870 struct scsi_cmnd *cp;
872 struct sts_entry_24xx *sts24;
873 uint16_t comp_status;
874 uint16_t scsi_status;
875 uint8_t lscsi_status;
877 uint32_t sense_len, rsp_info_len, resid_len, fw_resid_len;
878 uint8_t *rsp_info, *sense_data;
880 sts = (sts_entry_t *) pkt;
881 sts24 = (struct sts_entry_24xx *) pkt;
882 if (IS_FWI2_CAPABLE(ha)) {
883 comp_status = le16_to_cpu(sts24->comp_status);
884 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
886 comp_status = le16_to_cpu(sts->comp_status);
887 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
890 /* Fast path completion. */
891 if (comp_status == CS_COMPLETE && scsi_status == 0) {
892 qla2x00_process_completed_request(ha, sts->handle);
897 /* Validate handle. */
898 if (sts->handle < MAX_OUTSTANDING_COMMANDS) {
899 sp = ha->outstanding_cmds[sts->handle];
900 ha->outstanding_cmds[sts->handle] = NULL;
905 DEBUG2(printk("scsi(%ld): Status Entry invalid handle.\n",
907 qla_printk(KERN_WARNING, ha, "Status Entry invalid handle.\n");
909 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
910 qla2xxx_wake_dpc(ha);
915 DEBUG2(printk("scsi(%ld): Command already returned back to OS "
916 "pkt->handle=%d sp=%p.\n", ha->host_no, sts->handle, sp));
917 qla_printk(KERN_WARNING, ha,
918 "Command is NULL: already returned to OS (sp=%p)\n", sp);
923 lscsi_status = scsi_status & STATUS_MASK;
924 CMD_ENTRY_STATUS(cp) = sts->entry_status;
925 CMD_COMPL_STATUS(cp) = comp_status;
926 CMD_SCSI_STATUS(cp) = scsi_status;
930 sense_len = rsp_info_len = resid_len = fw_resid_len = 0;
931 if (IS_FWI2_CAPABLE(ha)) {
932 sense_len = le32_to_cpu(sts24->sense_len);
933 rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
934 resid_len = le32_to_cpu(sts24->rsp_residual_count);
935 fw_resid_len = le32_to_cpu(sts24->residual_len);
936 rsp_info = sts24->data;
937 sense_data = sts24->data;
938 host_to_fcp_swap(sts24->data, sizeof(sts24->data));
940 sense_len = le16_to_cpu(sts->req_sense_length);
941 rsp_info_len = le16_to_cpu(sts->rsp_info_len);
942 resid_len = le32_to_cpu(sts->residual_length);
943 rsp_info = sts->rsp_info;
944 sense_data = sts->req_sense_data;
947 /* Check for any FCP transport errors. */
948 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
949 /* Sense data lies beyond any FCP RESPONSE data. */
950 if (IS_FWI2_CAPABLE(ha))
951 sense_data += rsp_info_len;
952 if (rsp_info_len > 3 && rsp_info[3]) {
953 DEBUG2(printk("scsi(%ld:%d:%d:%d) FCP I/O protocol "
954 "failure (%x/%02x%02x%02x%02x%02x%02x%02x%02x)..."
955 "retrying command\n", ha->host_no,
956 cp->device->channel, cp->device->id,
957 cp->device->lun, rsp_info_len, rsp_info[0],
958 rsp_info[1], rsp_info[2], rsp_info[3], rsp_info[4],
959 rsp_info[5], rsp_info[6], rsp_info[7]));
961 cp->result = DID_BUS_BUSY << 16;
962 qla2x00_sp_compl(ha, sp);
967 /* Check for overrun. */
968 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
969 scsi_status & SS_RESIDUAL_OVER)
970 comp_status = CS_DATA_OVERRUN;
973 * Based on Host and scsi status generate status code for Linux
975 switch (comp_status) {
978 if (scsi_status == 0) {
979 cp->result = DID_OK << 16;
982 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
984 scsi_set_resid(cp, resid);
985 CMD_RESID_LEN(cp) = resid;
988 ((unsigned)(scsi_bufflen(cp) - resid) <
990 qla_printk(KERN_INFO, ha,
991 "scsi(%ld:%d:%d:%d): Mid-layer underflow "
992 "detected (%x of %x bytes)...returning "
993 "error status.\n", ha->host_no,
994 cp->device->channel, cp->device->id,
995 cp->device->lun, resid,
998 cp->result = DID_ERROR << 16;
1002 cp->result = DID_OK << 16 | lscsi_status;
1004 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1005 DEBUG2(printk(KERN_INFO
1006 "scsi(%ld): QUEUE FULL status detected "
1007 "0x%x-0x%x.\n", ha->host_no, comp_status,
1010 /* Adjust queue depth for all luns on the port. */
1011 fcport->last_queue_full = jiffies;
1012 starget_for_each_device(cp->device->sdev_target,
1013 fcport, qla2x00_adjust_sdev_qdepth_down);
1016 if (lscsi_status != SS_CHECK_CONDITION)
1019 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1020 if (!(scsi_status & SS_SENSE_LEN_VALID))
1023 qla2x00_handle_sense(sp, sense_data, sense_len);
1026 case CS_DATA_UNDERRUN:
1028 /* Use F/W calculated residual length. */
1029 if (IS_FWI2_CAPABLE(ha)) {
1030 if (scsi_status & SS_RESIDUAL_UNDER &&
1031 resid != fw_resid_len) {
1032 scsi_status &= ~SS_RESIDUAL_UNDER;
1035 resid = fw_resid_len;
1038 if (scsi_status & SS_RESIDUAL_UNDER) {
1039 scsi_set_resid(cp, resid);
1040 CMD_RESID_LEN(cp) = resid;
1042 DEBUG2(printk(KERN_INFO
1043 "scsi(%ld:%d:%d) UNDERRUN status detected "
1044 "0x%x-0x%x. resid=0x%x fw_resid=0x%x cdb=0x%x "
1045 "os_underflow=0x%x\n", ha->host_no,
1046 cp->device->id, cp->device->lun, comp_status,
1047 scsi_status, resid_len, resid, cp->cmnd[0],
1053 * Check to see if SCSI Status is non zero. If so report SCSI
1056 if (lscsi_status != 0) {
1057 cp->result = DID_OK << 16 | lscsi_status;
1059 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1060 DEBUG2(printk(KERN_INFO
1061 "scsi(%ld): QUEUE FULL status detected "
1062 "0x%x-0x%x.\n", ha->host_no, comp_status,
1066 * Adjust queue depth for all luns on the
1069 fcport->last_queue_full = jiffies;
1070 starget_for_each_device(
1071 cp->device->sdev_target, fcport,
1072 qla2x00_adjust_sdev_qdepth_down);
1075 if (lscsi_status != SS_CHECK_CONDITION)
1078 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1079 if (!(scsi_status & SS_SENSE_LEN_VALID))
1082 qla2x00_handle_sense(sp, sense_data, sense_len);
1085 * In case of a Underrun condition, set both the lscsi
1086 * status and the completion status to appropriate
1090 ((unsigned)(scsi_bufflen(cp) - resid) <
1092 DEBUG2(qla_printk(KERN_INFO, ha,
1093 "scsi(%ld:%d:%d:%d): Mid-layer underflow "
1094 "detected (%x of %x bytes)...returning "
1095 "error status.\n", ha->host_no,
1096 cp->device->channel, cp->device->id,
1097 cp->device->lun, resid,
1100 cp->result = DID_ERROR << 16 | lscsi_status;
1104 * If RISC reports underrun and target does not report
1105 * it then we must have a lost frame, so tell upper
1106 * layer to retry it by reporting a bus busy.
1108 if (!(scsi_status & SS_RESIDUAL_UNDER)) {
1109 DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped "
1110 "frame(s) detected (%x of %x bytes)..."
1111 "retrying command.\n", ha->host_no,
1112 cp->device->channel, cp->device->id,
1113 cp->device->lun, resid,
1116 cp->result = DID_BUS_BUSY << 16;
1120 /* Handle mid-layer underflow */
1121 if ((unsigned)(scsi_bufflen(cp) - resid) <
1123 qla_printk(KERN_INFO, ha,
1124 "scsi(%ld:%d:%d:%d): Mid-layer underflow "
1125 "detected (%x of %x bytes)...returning "
1126 "error status.\n", ha->host_no,
1127 cp->device->channel, cp->device->id,
1128 cp->device->lun, resid,
1131 cp->result = DID_ERROR << 16;
1135 /* Everybody online, looking good... */
1136 cp->result = DID_OK << 16;
1140 case CS_DATA_OVERRUN:
1141 DEBUG2(printk(KERN_INFO
1142 "scsi(%ld:%d:%d): OVERRUN status detected 0x%x-0x%x\n",
1143 ha->host_no, cp->device->id, cp->device->lun, comp_status,
1145 DEBUG2(printk(KERN_INFO
1146 "CDB: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
1147 cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3],
1148 cp->cmnd[4], cp->cmnd[5]));
1149 DEBUG2(printk(KERN_INFO
1150 "PID=0x%lx req=0x%x xtra=0x%x -- returning DID_ERROR "
1152 cp->serial_number, scsi_bufflen(cp), resid_len));
1154 cp->result = DID_ERROR << 16;
1157 case CS_PORT_LOGGED_OUT:
1158 case CS_PORT_CONFIG_CHG:
1161 case CS_PORT_UNAVAILABLE:
1163 * If the port is in Target Down state, return all IOs for this
1164 * Target with DID_NO_CONNECT ELSE Queue the IOs in the
1167 DEBUG2(printk("scsi(%ld:%d:%d): status_entry: Port Down "
1168 "pid=%ld, compl status=0x%x, port state=0x%x\n",
1169 ha->host_no, cp->device->id, cp->device->lun,
1170 cp->serial_number, comp_status,
1171 atomic_read(&fcport->state)));
1173 cp->result = DID_BUS_BUSY << 16;
1174 if (atomic_read(&fcport->state) == FCS_ONLINE) {
1175 qla2x00_mark_device_lost(ha, fcport, 1, 1);
1180 DEBUG2(printk(KERN_INFO
1181 "scsi(%ld): RESET status detected 0x%x-0x%x.\n",
1182 ha->host_no, comp_status, scsi_status));
1184 cp->result = DID_RESET << 16;
1189 * hv2.19.12 - DID_ABORT does not retry the request if we
1190 * aborted this request then abort otherwise it must be a
1193 DEBUG2(printk(KERN_INFO
1194 "scsi(%ld): ABORT status detected 0x%x-0x%x.\n",
1195 ha->host_no, comp_status, scsi_status));
1197 cp->result = DID_RESET << 16;
1201 cp->result = DID_BUS_BUSY << 16;
1203 if (IS_FWI2_CAPABLE(ha)) {
1204 DEBUG2(printk(KERN_INFO
1205 "scsi(%ld:%d:%d:%d): TIMEOUT status detected "
1206 "0x%x-0x%x\n", ha->host_no, cp->device->channel,
1207 cp->device->id, cp->device->lun, comp_status,
1211 DEBUG2(printk(KERN_INFO
1212 "scsi(%ld:%d:%d:%d): TIMEOUT status detected 0x%x-0x%x "
1213 "sflags=%x.\n", ha->host_no, cp->device->channel,
1214 cp->device->id, cp->device->lun, comp_status, scsi_status,
1215 le16_to_cpu(sts->status_flags)));
1217 /* Check to see if logout occurred. */
1218 if ((le16_to_cpu(sts->status_flags) & SF_LOGOUT_SENT))
1219 qla2x00_mark_device_lost(ha, fcport, 1, 1);
1223 DEBUG3(printk("scsi(%ld): Error detected (unknown status) "
1224 "0x%x-0x%x.\n", ha->host_no, comp_status, scsi_status));
1225 qla_printk(KERN_INFO, ha,
1226 "Unknown status detected 0x%x-0x%x.\n",
1227 comp_status, scsi_status);
1229 cp->result = DID_ERROR << 16;
1233 /* Place command on done queue. */
1234 if (ha->status_srb == NULL)
1235 qla2x00_sp_compl(ha, sp);
1239 * qla2x00_status_cont_entry() - Process a Status Continuations entry.
1240 * @ha: SCSI driver HA context
1241 * @pkt: Entry pointer
1243 * Extended sense data.
1246 qla2x00_status_cont_entry(scsi_qla_host_t *ha, sts_cont_entry_t *pkt)
1248 uint8_t sense_sz = 0;
1249 srb_t *sp = ha->status_srb;
1250 struct scsi_cmnd *cp;
1252 if (sp != NULL && sp->request_sense_length != 0) {
1255 DEBUG2(printk("%s(): Cmd already returned back to OS "
1256 "sp=%p.\n", __func__, sp));
1257 qla_printk(KERN_INFO, ha,
1258 "cmd is NULL: already returned to OS (sp=%p)\n",
1261 ha->status_srb = NULL;
1265 if (sp->request_sense_length > sizeof(pkt->data)) {
1266 sense_sz = sizeof(pkt->data);
1268 sense_sz = sp->request_sense_length;
1271 /* Move sense data. */
1272 if (IS_FWI2_CAPABLE(ha))
1273 host_to_fcp_swap(pkt->data, sizeof(pkt->data));
1274 memcpy(sp->request_sense_ptr, pkt->data, sense_sz);
1275 DEBUG5(qla2x00_dump_buffer(sp->request_sense_ptr, sense_sz));
1277 sp->request_sense_ptr += sense_sz;
1278 sp->request_sense_length -= sense_sz;
1280 /* Place command on done queue. */
1281 if (sp->request_sense_length == 0) {
1282 ha->status_srb = NULL;
1283 qla2x00_sp_compl(ha, sp);
1289 * qla2x00_error_entry() - Process an error entry.
1290 * @ha: SCSI driver HA context
1291 * @pkt: Entry pointer
1294 qla2x00_error_entry(scsi_qla_host_t *ha, sts_entry_t *pkt)
1298 #if defined(QL_DEBUG_LEVEL_2)
1299 if (pkt->entry_status & RF_INV_E_ORDER)
1300 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__);
1301 else if (pkt->entry_status & RF_INV_E_COUNT)
1302 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Count\n", __func__);
1303 else if (pkt->entry_status & RF_INV_E_PARAM)
1304 qla_printk(KERN_ERR, ha,
1305 "%s: Invalid Entry Parameter\n", __func__);
1306 else if (pkt->entry_status & RF_INV_E_TYPE)
1307 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Type\n", __func__);
1308 else if (pkt->entry_status & RF_BUSY)
1309 qla_printk(KERN_ERR, ha, "%s: Busy\n", __func__);
1311 qla_printk(KERN_ERR, ha, "%s: UNKNOWN flag error\n", __func__);
1314 /* Validate handle. */
1315 if (pkt->handle < MAX_OUTSTANDING_COMMANDS)
1316 sp = ha->outstanding_cmds[pkt->handle];
1321 /* Free outstanding command slot. */
1322 ha->outstanding_cmds[pkt->handle] = NULL;
1324 /* Bad payload or header */
1325 if (pkt->entry_status &
1326 (RF_INV_E_ORDER | RF_INV_E_COUNT |
1327 RF_INV_E_PARAM | RF_INV_E_TYPE)) {
1328 sp->cmd->result = DID_ERROR << 16;
1329 } else if (pkt->entry_status & RF_BUSY) {
1330 sp->cmd->result = DID_BUS_BUSY << 16;
1332 sp->cmd->result = DID_ERROR << 16;
1334 qla2x00_sp_compl(ha, sp);
1336 } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type ==
1337 COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7) {
1338 DEBUG2(printk("scsi(%ld): Error entry - invalid handle\n",
1340 qla_printk(KERN_WARNING, ha,
1341 "Error entry - invalid handle\n");
1343 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
1344 qla2xxx_wake_dpc(ha);
1349 * qla2x00_ms_entry() - Process a Management Server entry.
1350 * @ha: SCSI driver HA context
1351 * @index: Response queue out pointer
1354 qla2x00_ms_entry(scsi_qla_host_t *ha, ms_iocb_entry_t *pkt)
1358 DEBUG3(printk("%s(%ld): pkt=%p pkthandle=%d.\n",
1359 __func__, ha->host_no, pkt, pkt->handle1));
1361 /* Validate handle. */
1362 if (pkt->handle1 < MAX_OUTSTANDING_COMMANDS)
1363 sp = ha->outstanding_cmds[pkt->handle1];
1368 DEBUG2(printk("scsi(%ld): MS entry - invalid handle\n",
1370 qla_printk(KERN_WARNING, ha, "MS entry - invalid handle\n");
1372 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
1376 CMD_COMPL_STATUS(sp->cmd) = le16_to_cpu(pkt->status);
1377 CMD_ENTRY_STATUS(sp->cmd) = pkt->entry_status;
1379 /* Free outstanding command slot. */
1380 ha->outstanding_cmds[pkt->handle1] = NULL;
1382 qla2x00_sp_compl(ha, sp);
1387 * qla24xx_mbx_completion() - Process mailbox command completions.
1388 * @ha: SCSI driver HA context
1389 * @mb0: Mailbox0 register
1392 qla24xx_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0)
1395 uint16_t __iomem *wptr;
1396 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1398 /* Load return mailbox registers. */
1399 ha->flags.mbox_int = 1;
1400 ha->mailbox_out[0] = mb0;
1401 wptr = (uint16_t __iomem *)®->mailbox1;
1403 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
1404 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
1409 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
1410 __func__, ha->host_no, ha->mcp->mb[0]));
1412 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
1413 __func__, ha->host_no));
1418 * qla24xx_process_response_queue() - Process response queue entries.
1419 * @ha: SCSI driver HA context
1422 qla24xx_process_response_queue(struct scsi_qla_host *ha)
1424 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1425 struct sts_entry_24xx *pkt;
1427 if (!ha->flags.online)
1430 while (ha->response_ring_ptr->signature != RESPONSE_PROCESSED) {
1431 pkt = (struct sts_entry_24xx *)ha->response_ring_ptr;
1433 ha->rsp_ring_index++;
1434 if (ha->rsp_ring_index == ha->response_q_length) {
1435 ha->rsp_ring_index = 0;
1436 ha->response_ring_ptr = ha->response_ring;
1438 ha->response_ring_ptr++;
1441 if (pkt->entry_status != 0) {
1442 DEBUG3(printk(KERN_INFO
1443 "scsi(%ld): Process error entry.\n", ha->host_no));
1445 qla2x00_error_entry(ha, (sts_entry_t *) pkt);
1446 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1451 switch (pkt->entry_type) {
1453 qla2x00_status_entry(ha, pkt);
1455 case STATUS_CONT_TYPE:
1456 qla2x00_status_cont_entry(ha, (sts_cont_entry_t *)pkt);
1459 qla24xx_ms_entry(ha, (struct ct_entry_24xx *)pkt);
1461 case VP_RPT_ID_IOCB_TYPE:
1462 qla24xx_report_id_acquisition(ha,
1463 (struct vp_rpt_id_entry_24xx *)pkt);
1466 /* Type Not Supported. */
1467 DEBUG4(printk(KERN_WARNING
1468 "scsi(%ld): Received unknown response pkt type %x "
1469 "entry status=%x.\n",
1470 ha->host_no, pkt->entry_type, pkt->entry_status));
1473 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1477 /* Adjust ring index */
1478 WRT_REG_DWORD(®->rsp_q_out, ha->rsp_ring_index);
1482 qla2xxx_check_risc_status(scsi_qla_host_t *ha)
1486 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1488 if (!IS_QLA25XX(ha))
1492 WRT_REG_DWORD(®->iobase_addr, 0x7C00);
1493 RD_REG_DWORD(®->iobase_addr);
1494 WRT_REG_DWORD(®->iobase_window, 0x0001);
1495 for (cnt = 10000; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 &&
1496 rval == QLA_SUCCESS; cnt--) {
1498 WRT_REG_DWORD(®->iobase_window, 0x0001);
1501 rval = QLA_FUNCTION_TIMEOUT;
1503 if (rval == QLA_SUCCESS)
1506 WRT_REG_DWORD(®->iobase_window, 0x0003);
1507 for (cnt = 100; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 &&
1508 rval == QLA_SUCCESS; cnt--) {
1510 WRT_REG_DWORD(®->iobase_window, 0x0003);
1513 rval = QLA_FUNCTION_TIMEOUT;
1515 if (rval != QLA_SUCCESS)
1519 if (RD_REG_DWORD(®->iobase_c8) & BIT_3)
1520 qla_printk(KERN_INFO, ha, "Additional code -- 0x55AA.\n");
1523 WRT_REG_DWORD(®->iobase_window, 0x0000);
1524 RD_REG_DWORD(®->iobase_window);
1528 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
1530 * @dev_id: SCSI driver HA context
1532 * Called by system whenever the host adapter generates an interrupt.
1534 * Returns handled flag.
1537 qla24xx_intr_handler(int irq, void *dev_id)
1539 scsi_qla_host_t *ha;
1540 struct device_reg_24xx __iomem *reg;
1547 ha = (scsi_qla_host_t *) dev_id;
1550 "%s(): NULL host pointer\n", __func__);
1554 reg = &ha->iobase->isp24;
1557 spin_lock(&ha->hardware_lock);
1558 for (iter = 50; iter--; ) {
1559 stat = RD_REG_DWORD(®->host_status);
1560 if (stat & HSRX_RISC_PAUSED) {
1561 if (pci_channel_offline(ha->pdev))
1564 if (ha->hw_event_pause_errors == 0)
1565 qla2x00_post_hwe_work(ha, HW_EVENT_PARITY_ERR,
1566 0, MSW(stat), LSW(stat));
1567 else if (ha->hw_event_pause_errors < 0xffffffff)
1568 ha->hw_event_pause_errors++;
1570 hccr = RD_REG_DWORD(®->hccr);
1572 qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
1573 "Dumping firmware!\n", hccr);
1575 qla2xxx_check_risc_status(ha);
1577 ha->isp_ops->fw_dump(ha, 1);
1578 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
1580 } else if ((stat & HSRX_RISC_INT) == 0)
1583 switch (stat & 0xff) {
1588 qla24xx_mbx_completion(ha, MSW(stat));
1589 status |= MBX_INTERRUPT;
1594 mb[1] = RD_REG_WORD(®->mailbox1);
1595 mb[2] = RD_REG_WORD(®->mailbox2);
1596 mb[3] = RD_REG_WORD(®->mailbox3);
1597 qla2x00_async_event(ha, mb);
1600 qla24xx_process_response_queue(ha);
1603 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
1605 ha->host_no, stat & 0xff));
1608 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
1609 RD_REG_DWORD_RELAXED(®->hccr);
1611 spin_unlock(&ha->hardware_lock);
1613 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
1614 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
1615 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
1616 complete(&ha->mbx_intr_comp);
1623 * qla24xx_ms_entry() - Process a Management Server entry.
1624 * @ha: SCSI driver HA context
1625 * @index: Response queue out pointer
1628 qla24xx_ms_entry(scsi_qla_host_t *ha, struct ct_entry_24xx *pkt)
1632 DEBUG3(printk("%s(%ld): pkt=%p pkthandle=%d.\n",
1633 __func__, ha->host_no, pkt, pkt->handle));
1635 DEBUG9(printk("%s: ct pkt dump:\n", __func__));
1636 DEBUG9(qla2x00_dump_buffer((void *)pkt, sizeof(struct ct_entry_24xx)));
1638 /* Validate handle. */
1639 if (pkt->handle < MAX_OUTSTANDING_COMMANDS)
1640 sp = ha->outstanding_cmds[pkt->handle];
1645 DEBUG2(printk("scsi(%ld): MS entry - invalid handle\n",
1647 DEBUG10(printk("scsi(%ld): MS entry - invalid handle\n",
1649 qla_printk(KERN_WARNING, ha, "MS entry - invalid handle %d\n",
1652 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
1656 CMD_COMPL_STATUS(sp->cmd) = le16_to_cpu(pkt->comp_status);
1657 CMD_ENTRY_STATUS(sp->cmd) = pkt->entry_status;
1659 /* Free outstanding command slot. */
1660 ha->outstanding_cmds[pkt->handle] = NULL;
1662 qla2x00_sp_compl(ha, sp);
1666 qla24xx_msix_rsp_q(int irq, void *dev_id)
1668 scsi_qla_host_t *ha;
1669 struct device_reg_24xx __iomem *reg;
1672 reg = &ha->iobase->isp24;
1674 spin_lock(&ha->hardware_lock);
1676 qla24xx_process_response_queue(ha);
1677 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
1679 spin_unlock(&ha->hardware_lock);
1685 qla24xx_msix_default(int irq, void *dev_id)
1687 scsi_qla_host_t *ha;
1688 struct device_reg_24xx __iomem *reg;
1695 reg = &ha->iobase->isp24;
1698 spin_lock(&ha->hardware_lock);
1700 stat = RD_REG_DWORD(®->host_status);
1701 if (stat & HSRX_RISC_PAUSED) {
1702 if (pci_channel_offline(ha->pdev))
1705 if (ha->hw_event_pause_errors == 0)
1706 qla2x00_post_hwe_work(ha, HW_EVENT_PARITY_ERR,
1707 0, MSW(stat), LSW(stat));
1708 else if (ha->hw_event_pause_errors < 0xffffffff)
1709 ha->hw_event_pause_errors++;
1711 hccr = RD_REG_DWORD(®->hccr);
1713 qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
1714 "Dumping firmware!\n", hccr);
1716 qla2xxx_check_risc_status(ha);
1718 ha->isp_ops->fw_dump(ha, 1);
1719 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
1721 } else if ((stat & HSRX_RISC_INT) == 0)
1724 switch (stat & 0xff) {
1729 qla24xx_mbx_completion(ha, MSW(stat));
1730 status |= MBX_INTERRUPT;
1735 mb[1] = RD_REG_WORD(®->mailbox1);
1736 mb[2] = RD_REG_WORD(®->mailbox2);
1737 mb[3] = RD_REG_WORD(®->mailbox3);
1738 qla2x00_async_event(ha, mb);
1741 qla24xx_process_response_queue(ha);
1744 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
1746 ha->host_no, stat & 0xff));
1749 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
1751 spin_unlock(&ha->hardware_lock);
1753 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
1754 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
1755 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
1756 complete(&ha->mbx_intr_comp);
1762 /* Interrupt handling helpers. */
1764 struct qla_init_msix_entry {
1768 irq_handler_t handler;
1771 static struct qla_init_msix_entry imsix_entries[QLA_MSIX_ENTRIES] = {
1772 { QLA_MSIX_DEFAULT, QLA_MIDX_DEFAULT,
1773 "qla2xxx (default)", qla24xx_msix_default },
1775 { QLA_MSIX_RSP_Q, QLA_MIDX_RSP_Q,
1776 "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
1780 qla24xx_disable_msix(scsi_qla_host_t *ha)
1783 struct qla_msix_entry *qentry;
1785 for (i = 0; i < QLA_MSIX_ENTRIES; i++) {
1786 qentry = &ha->msix_entries[imsix_entries[i].index];
1787 if (qentry->have_irq)
1788 free_irq(qentry->msix_vector, ha);
1790 pci_disable_msix(ha->pdev);
1794 qla24xx_enable_msix(scsi_qla_host_t *ha)
1797 struct msix_entry entries[QLA_MSIX_ENTRIES];
1798 struct qla_msix_entry *qentry;
1800 for (i = 0; i < QLA_MSIX_ENTRIES; i++)
1801 entries[i].entry = imsix_entries[i].entry;
1803 ret = pci_enable_msix(ha->pdev, entries, ARRAY_SIZE(entries));
1805 qla_printk(KERN_WARNING, ha,
1806 "MSI-X: Failed to enable support -- %d/%d\n",
1807 QLA_MSIX_ENTRIES, ret);
1810 ha->flags.msix_enabled = 1;
1812 for (i = 0; i < QLA_MSIX_ENTRIES; i++) {
1813 qentry = &ha->msix_entries[imsix_entries[i].index];
1814 qentry->msix_vector = entries[i].vector;
1815 qentry->msix_entry = entries[i].entry;
1816 qentry->have_irq = 0;
1817 ret = request_irq(qentry->msix_vector,
1818 imsix_entries[i].handler, 0, imsix_entries[i].name, ha);
1820 qla_printk(KERN_WARNING, ha,
1821 "MSI-X: Unable to register handler -- %x/%d.\n",
1822 imsix_entries[i].index, ret);
1823 qla24xx_disable_msix(ha);
1826 qentry->have_irq = 1;
1834 qla2x00_request_irqs(scsi_qla_host_t *ha)
1837 device_reg_t __iomem *reg = ha->iobase;
1839 /* If possible, enable MSI-X. */
1840 if (!IS_QLA2432(ha) && !IS_QLA2532(ha))
1843 if (IS_QLA2432(ha) && (ha->chip_revision < QLA_MSIX_CHIP_REV_24XX ||
1844 !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) {
1845 DEBUG2(qla_printk(KERN_WARNING, ha,
1846 "MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n",
1847 ha->chip_revision, ha->fw_attributes));
1852 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
1853 (ha->pdev->subsystem_device == 0x7040 ||
1854 ha->pdev->subsystem_device == 0x7041 ||
1855 ha->pdev->subsystem_device == 0x1705)) {
1856 DEBUG2(qla_printk(KERN_WARNING, ha,
1857 "MSI-X: Unsupported ISP2432 SSVID/SSDID (0x%X, 0x%X).\n",
1858 ha->pdev->subsystem_vendor,
1859 ha->pdev->subsystem_device));
1864 ret = qla24xx_enable_msix(ha);
1866 DEBUG2(qla_printk(KERN_INFO, ha,
1867 "MSI-X: Enabled (0x%X, 0x%X).\n", ha->chip_revision,
1868 ha->fw_attributes));
1869 goto clear_risc_ints;
1871 qla_printk(KERN_WARNING, ha,
1872 "MSI-X: Falling back-to INTa mode -- %d.\n", ret);
1875 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha))
1878 ret = pci_enable_msi(ha->pdev);
1880 DEBUG2(qla_printk(KERN_INFO, ha, "MSI: Enabled.\n"));
1881 ha->flags.msi_enabled = 1;
1885 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
1886 IRQF_DISABLED|IRQF_SHARED, QLA2XXX_DRIVER_NAME, ha);
1888 qla_printk(KERN_WARNING, ha,
1889 "Failed to reserve interrupt %d already in use.\n",
1893 ha->flags.inta_enabled = 1;
1894 ha->host->irq = ha->pdev->irq;
1897 ha->isp_ops->disable_intrs(ha);
1898 spin_lock_irq(&ha->hardware_lock);
1899 if (IS_FWI2_CAPABLE(ha)) {
1900 WRT_REG_DWORD(®->isp24.hccr, HCCRX_CLR_HOST_INT);
1901 WRT_REG_DWORD(®->isp24.hccr, HCCRX_CLR_RISC_INT);
1903 WRT_REG_WORD(®->isp.semaphore, 0);
1904 WRT_REG_WORD(®->isp.hccr, HCCR_CLR_RISC_INT);
1905 WRT_REG_WORD(®->isp.hccr, HCCR_CLR_HOST_INT);
1907 spin_unlock_irq(&ha->hardware_lock);
1908 ha->isp_ops->enable_intrs(ha);
1915 qla2x00_free_irqs(scsi_qla_host_t *ha)
1918 if (ha->flags.msix_enabled)
1919 qla24xx_disable_msix(ha);
1920 else if (ha->flags.inta_enabled) {
1921 free_irq(ha->host->irq, ha);
1922 pci_disable_msi(ha->pdev);