2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2006 QLogic Corporation
5 * See LICENSE.qla4xxx for copyright and licensing details.
11 #include "ql4_inline.h"
14 * qla2x00_process_completed_request() - Process a Fast Post response.
15 * @ha: SCSI driver HA context
18 static void qla4xxx_process_completed_request(struct scsi_qla_host *ha,
23 srb = qla4xxx_del_from_active_array(ha, index);
25 /* Save ISP completion status */
26 srb->cmd->result = DID_OK << 16;
27 qla4xxx_srb_compl(ha, srb);
29 DEBUG2(printk("scsi%ld: Invalid ISP SCSI completion handle = "
30 "%d\n", ha->host_no, index));
31 set_bit(DPC_RESET_HA, &ha->dpc_flags);
36 * qla4xxx_status_entry - processes status IOCBs
37 * @ha: Pointer to host adapter structure.
38 * @sts_entry: Pointer to status entry structure.
40 static void qla4xxx_status_entry(struct scsi_qla_host *ha,
41 struct status_entry *sts_entry)
44 struct scsi_cmnd *cmd;
46 struct ddb_entry *ddb_entry;
48 uint16_t sensebytecnt;
50 if (sts_entry->completionStatus == SCS_COMPLETE &&
51 sts_entry->scsiStatus == 0) {
52 qla4xxx_process_completed_request(ha,
53 le32_to_cpu(sts_entry->
58 srb = qla4xxx_del_from_active_array(ha, le32_to_cpu(sts_entry->handle));
60 /* FIXMEdg: Don't we need to reset ISP in this case??? */
61 DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Status Entry invalid "
62 "handle 0x%x, sp=%p. This cmd may have already "
63 "been completed.\n", ha->host_no, __func__,
64 le32_to_cpu(sts_entry->handle), srb));
70 DEBUG2(printk("scsi%ld: %s: Command already returned back to "
71 "OS pkt->handle=%d srb=%p srb->state:%d\n",
72 ha->host_no, __func__, sts_entry->handle,
74 dev_warn(&ha->pdev->dev, "Command is NULL:"
75 " already returned to OS (srb=%p)\n", srb);
80 if (ddb_entry == NULL) {
81 cmd->result = DID_NO_CONNECT << 16;
82 goto status_entry_exit;
85 residual = le32_to_cpu(sts_entry->residualByteCnt);
87 /* Translate ISP error to a Linux SCSI error. */
88 scsi_status = sts_entry->scsiStatus;
89 switch (sts_entry->completionStatus) {
91 if (scsi_status == 0) {
92 cmd->result = DID_OK << 16;
96 if (sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) {
97 cmd->result = DID_ERROR << 16;
101 if (sts_entry->iscsiFlags &ISCSI_FLAG_RESIDUAL_UNDER) {
102 scsi_set_resid(cmd, residual);
103 if (!scsi_status && ((scsi_bufflen(cmd) - residual) <
106 cmd->result = DID_ERROR << 16;
108 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: "
109 "Mid-layer Data underrun0, "
111 "residual = 0x%x\n", ha->host_no,
112 cmd->device->channel,
114 cmd->device->lun, __func__,
115 scsi_bufflen(cmd), residual));
120 cmd->result = DID_OK << 16 | scsi_status;
122 if (scsi_status != SCSI_CHECK_CONDITION)
125 /* Copy Sense Data into sense buffer. */
126 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
128 sensebytecnt = le16_to_cpu(sts_entry->senseDataByteCnt);
129 if (sensebytecnt == 0)
132 memcpy(cmd->sense_buffer, sts_entry->senseData,
133 min_t(uint16_t, sensebytecnt, SCSI_SENSE_BUFFERSIZE));
135 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: sense key = %x, "
136 "ASC/ASCQ = %02x/%02x\n", ha->host_no,
137 cmd->device->channel, cmd->device->id,
138 cmd->device->lun, __func__,
139 sts_entry->senseData[2] & 0x0f,
140 sts_entry->senseData[12],
141 sts_entry->senseData[13]));
143 srb->flags |= SRB_GOT_SENSE;
147 /* Always set the status to DID_ERROR, since
148 * all conditions result in that status anyway */
149 cmd->result = DID_ERROR << 16;
152 case SCS_RESET_OCCURRED:
153 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: Device RESET occurred\n",
154 ha->host_no, cmd->device->channel,
155 cmd->device->id, cmd->device->lun, __func__));
157 cmd->result = DID_RESET << 16;
161 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: Abort occurred\n",
162 ha->host_no, cmd->device->channel,
163 cmd->device->id, cmd->device->lun, __func__));
165 cmd->result = DID_RESET << 16;
169 DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%d: Timeout\n",
170 ha->host_no, cmd->device->channel,
171 cmd->device->id, cmd->device->lun));
173 cmd->result = DID_BUS_BUSY << 16;
176 * Mark device missing so that we won't continue to send
177 * I/O to this device. We should get a ddb state change
180 if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE)
181 qla4xxx_mark_device_missing(ha, ddb_entry);
184 case SCS_DATA_UNDERRUN:
185 case SCS_DATA_OVERRUN:
186 if ((sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) ||
187 (sts_entry->completionStatus == SCS_DATA_OVERRUN)) {
188 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: " "Data overrun, "
189 "residual = 0x%x\n", ha->host_no,
190 cmd->device->channel, cmd->device->id,
191 cmd->device->lun, __func__, residual));
193 cmd->result = DID_ERROR << 16;
197 scsi_set_resid(cmd, residual);
200 * If there is scsi_status, it takes precedense over
201 * underflow condition.
203 if (scsi_status != 0) {
204 cmd->result = DID_OK << 16 | scsi_status;
206 if (scsi_status != SCSI_CHECK_CONDITION)
209 /* Copy Sense Data into sense buffer. */
210 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
213 le16_to_cpu(sts_entry->senseDataByteCnt);
214 if (sensebytecnt == 0)
217 memcpy(cmd->sense_buffer, sts_entry->senseData,
218 min_t(uint16_t, sensebytecnt, SCSI_SENSE_BUFFERSIZE));
220 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: sense key = %x, "
221 "ASC/ASCQ = %02x/%02x\n", ha->host_no,
222 cmd->device->channel, cmd->device->id,
223 cmd->device->lun, __func__,
224 sts_entry->senseData[2] & 0x0f,
225 sts_entry->senseData[12],
226 sts_entry->senseData[13]));
229 * If RISC reports underrun and target does not
230 * report it then we must have a lost frame, so
231 * tell upper layer to retry it by reporting a
234 if ((sts_entry->iscsiFlags &
235 ISCSI_FLAG_RESIDUAL_UNDER) == 0) {
236 cmd->result = DID_BUS_BUSY << 16;
237 } else if ((scsi_bufflen(cmd) - residual) <
240 * Handle mid-layer underflow???
242 * For kernels less than 2.4, the driver must
243 * return an error if an underflow is detected.
244 * For kernels equal-to and above 2.4, the
245 * mid-layer will appearantly handle the
246 * underflow by detecting the residual count --
247 * unfortunately, we do not see where this is
248 * actually being done. In the interim, we
249 * will return DID_ERROR.
251 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: "
252 "Mid-layer Data underrun1, "
254 "residual = 0x%x\n", ha->host_no,
255 cmd->device->channel,
257 cmd->device->lun, __func__,
258 scsi_bufflen(cmd), residual));
260 cmd->result = DID_ERROR << 16;
262 cmd->result = DID_OK << 16;
267 case SCS_DEVICE_LOGGED_OUT:
268 case SCS_DEVICE_UNAVAILABLE:
270 * Mark device missing so that we won't continue to
271 * send I/O to this device. We should get a ddb
272 * state change AEN soon.
274 if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE)
275 qla4xxx_mark_device_missing(ha, ddb_entry);
277 cmd->result = DID_BUS_BUSY << 16;
282 * SCSI Mid-Layer handles device queue full
284 cmd->result = DID_OK << 16 | sts_entry->scsiStatus;
285 DEBUG2(printk("scsi%ld:%d:%d: %s: QUEUE FULL detected "
286 "compl=%02x, scsi=%02x, state=%02x, iFlags=%02x,"
287 " iResp=%02x\n", ha->host_no, cmd->device->id,
288 cmd->device->lun, __func__,
289 sts_entry->completionStatus,
290 sts_entry->scsiStatus, sts_entry->state_flags,
291 sts_entry->iscsiFlags,
292 sts_entry->iscsiResponse));
296 cmd->result = DID_ERROR << 16;
302 /* complete the request */
303 srb->cc_stat = sts_entry->completionStatus;
304 qla4xxx_srb_compl(ha, srb);
308 * qla4xxx_process_response_queue - process response queue completions
309 * @ha: Pointer to host adapter structure.
311 * This routine process response queue completions in interrupt context.
312 * Hardware_lock locked upon entry
314 static void qla4xxx_process_response_queue(struct scsi_qla_host * ha)
317 struct srb *srb = NULL;
318 struct status_entry *sts_entry;
320 /* Process all responses from response queue */
321 while ((ha->response_in =
322 (uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in)) !=
324 sts_entry = (struct status_entry *) ha->response_ptr;
327 /* Advance pointers for next entry */
328 if (ha->response_out == (RESPONSE_QUEUE_DEPTH - 1)) {
329 ha->response_out = 0;
330 ha->response_ptr = ha->response_ring;
337 switch (sts_entry->hdr.entryType) {
340 * Common status - Single completion posted in single
343 qla4xxx_status_entry(ha, sts_entry);
346 case ET_PASSTHRU_STATUS:
349 case ET_STATUS_CONTINUATION:
350 /* Just throw away the status continuation entries */
351 DEBUG2(printk("scsi%ld: %s: Status Continuation entry "
352 "- ignoring\n", ha->host_no, __func__));
356 /* ISP device queue is full. Command not
357 * accepted by ISP. Queue command for
360 srb = qla4xxx_del_from_active_array(ha,
361 le32_to_cpu(sts_entry->
364 goto exit_prq_invalid_handle;
366 DEBUG2(printk("scsi%ld: %s: FW device queue full, "
367 "srb %p\n", ha->host_no, __func__, srb));
369 /* ETRY normally by sending it back with
371 srb->cmd->result = DID_BUS_BUSY << 16;
372 qla4xxx_srb_compl(ha, srb);
376 /* Just throw away the continuation entries */
377 DEBUG2(printk("scsi%ld: %s: Continuation entry - "
378 "ignoring\n", ha->host_no, __func__));
383 * Invalid entry in response queue, reset RISC
386 DEBUG2(printk("scsi%ld: %s: Invalid entry %x in "
387 "response queue \n", ha->host_no,
389 sts_entry->hdr.entryType));
395 * Done with responses, update the ISP For QLA4010, this also clears
398 writel(ha->response_out, &ha->reg->rsp_q_out);
399 readl(&ha->reg->rsp_q_out);
403 exit_prq_invalid_handle:
404 DEBUG2(printk("scsi%ld: %s: Invalid handle(srb)=%p type=%x IOCS=%x\n",
405 ha->host_no, __func__, srb, sts_entry->hdr.entryType,
406 sts_entry->completionStatus));
409 writel(ha->response_out, &ha->reg->rsp_q_out);
410 readl(&ha->reg->rsp_q_out);
412 set_bit(DPC_RESET_HA, &ha->dpc_flags);
416 * qla4xxx_isr_decode_mailbox - decodes mailbox status
417 * @ha: Pointer to host adapter structure.
418 * @mailbox_status: Mailbox status.
420 * This routine decodes the mailbox status during the ISR.
421 * Hardware_lock locked upon entry. runs in interrupt context.
423 static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
424 uint32_t mbox_status)
427 uint32_t mbox_stat2, mbox_stat3;
429 if ((mbox_status == MBOX_STS_BUSY) ||
430 (mbox_status == MBOX_STS_INTERMEDIATE_COMPLETION) ||
431 (mbox_status >> 12 == MBOX_COMPLETION_STATUS)) {
432 ha->mbox_status[0] = mbox_status;
434 if (test_bit(AF_MBOX_COMMAND, &ha->flags)) {
436 * Copy all mailbox registers to a temporary
437 * location and set mailbox command done flag
439 for (i = 1; i < ha->mbox_status_count; i++)
441 readl(&ha->reg->mailbox[i]);
443 set_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
445 } else if (mbox_status >> 12 == MBOX_ASYNC_EVENT_STATUS) {
446 /* Immediately process the AENs that don't require much work.
447 * Only queue the database_changed AENs */
448 if (ha->aen_log.count < MAX_AEN_ENTRIES) {
449 for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
450 ha->aen_log.entry[ha->aen_log.count].mbox_sts[i] =
451 readl(&ha->reg->mailbox[i]);
454 switch (mbox_status) {
455 case MBOX_ASTS_SYSTEM_ERROR:
456 /* Log Mailbox registers */
457 if (ql4xdontresethba) {
458 DEBUG2(printk("%s:Dont Reset HBA\n",
461 set_bit(AF_GET_CRASH_RECORD, &ha->flags);
462 set_bit(DPC_RESET_HA, &ha->dpc_flags);
466 case MBOX_ASTS_REQUEST_TRANSFER_ERROR:
467 case MBOX_ASTS_RESPONSE_TRANSFER_ERROR:
468 case MBOX_ASTS_NVRAM_INVALID:
469 case MBOX_ASTS_IP_ADDRESS_CHANGED:
470 case MBOX_ASTS_DHCP_LEASE_EXPIRED:
471 DEBUG2(printk("scsi%ld: AEN %04x, ERROR Status, "
472 "Reset HA\n", ha->host_no, mbox_status));
473 set_bit(DPC_RESET_HA, &ha->dpc_flags);
476 case MBOX_ASTS_LINK_UP:
477 DEBUG2(printk("scsi%ld: AEN %04x Adapter LINK UP\n",
478 ha->host_no, mbox_status));
479 set_bit(AF_LINK_UP, &ha->flags);
482 case MBOX_ASTS_LINK_DOWN:
483 DEBUG2(printk("scsi%ld: AEN %04x Adapter LINK DOWN\n",
484 ha->host_no, mbox_status));
485 clear_bit(AF_LINK_UP, &ha->flags);
488 case MBOX_ASTS_HEARTBEAT:
489 ha->seconds_since_last_heartbeat = 0;
492 case MBOX_ASTS_DHCP_LEASE_ACQUIRED:
493 DEBUG2(printk("scsi%ld: AEN %04x DHCP LEASE "
494 "ACQUIRED\n", ha->host_no, mbox_status));
495 set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags);
498 case MBOX_ASTS_PROTOCOL_STATISTIC_ALARM:
499 case MBOX_ASTS_SCSI_COMMAND_PDU_REJECTED: /* Target
502 case MBOX_ASTS_UNSOLICITED_PDU_RECEIVED: /* Connection mode */
503 case MBOX_ASTS_IPSEC_SYSTEM_FATAL_ERROR:
504 case MBOX_ASTS_SUBNET_STATE_CHANGE:
506 DEBUG2(printk("scsi%ld: AEN %04x\n", ha->host_no,
510 case MBOX_ASTS_IP_ADDR_STATE_CHANGED:
511 mbox_stat2 = readl(&ha->reg->mailbox[2]);
512 mbox_stat3 = readl(&ha->reg->mailbox[3]);
514 if ((mbox_stat3 == 5) && (mbox_stat2 == 3))
515 set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags);
516 else if ((mbox_stat3 == 2) && (mbox_stat2 == 5))
517 set_bit(DPC_RESET_HA, &ha->dpc_flags);
520 case MBOX_ASTS_MAC_ADDRESS_CHANGED:
523 DEBUG2(printk(KERN_INFO "scsi%ld: AEN %04x, "
524 "mbox_sts[1]=%04x, mbox_sts[2]=%04x\n",
525 ha->host_no, mbox_status,
526 readl(&ha->reg->mailbox[1]),
527 readl(&ha->reg->mailbox[2])));
530 case MBOX_ASTS_SELF_TEST_FAILED:
531 case MBOX_ASTS_LOGIN_FAILED:
533 DEBUG2(printk("scsi%ld: AEN %04x, mbox_sts[1]=%04x, "
534 "mbox_sts[2]=%04x, mbox_sts[3]=%04x\n",
535 ha->host_no, mbox_status,
536 readl(&ha->reg->mailbox[1]),
537 readl(&ha->reg->mailbox[2]),
538 readl(&ha->reg->mailbox[3])));
541 case MBOX_ASTS_DATABASE_CHANGED:
542 /* Queue AEN information and process it in the DPC
544 if (ha->aen_q_count > 0) {
546 /* decrement available counter */
549 for (i = 1; i < MBOX_AEN_REG_COUNT; i++)
550 ha->aen_q[ha->aen_in].mbox_sts[i] =
551 readl(&ha->reg->mailbox[i]);
553 ha->aen_q[ha->aen_in].mbox_sts[0] = mbox_status;
555 /* print debug message */
556 DEBUG2(printk("scsi%ld: AEN[%d] %04x queued"
557 " mb1:0x%x mb2:0x%x mb3:0x%x mb4:0x%x\n",
558 ha->host_no, ha->aen_in,
560 ha->aen_q[ha->aen_in].mbox_sts[1],
561 ha->aen_q[ha->aen_in].mbox_sts[2],
562 ha->aen_q[ha->aen_in].mbox_sts[3],
563 ha->aen_q[ha->aen_in]. mbox_sts[4]));
564 /* advance pointer */
566 if (ha->aen_in == MAX_AEN_ENTRIES)
569 /* The DPC routine will process the aen */
570 set_bit(DPC_AEN, &ha->dpc_flags);
572 DEBUG2(printk("scsi%ld: %s: aen %04x, queue "
573 "overflowed! AEN LOST!!\n",
574 ha->host_no, __func__,
577 DEBUG2(printk("scsi%ld: DUMP AEN QUEUE\n",
580 for (i = 0; i < MAX_AEN_ENTRIES; i++) {
581 DEBUG2(printk("AEN[%d] %04x %04x %04x "
583 ha->aen_q[i].mbox_sts[0],
584 ha->aen_q[i].mbox_sts[1],
585 ha->aen_q[i].mbox_sts[2],
586 ha->aen_q[i].mbox_sts[3]));
592 DEBUG2(printk(KERN_WARNING
593 "scsi%ld: AEN %04x UNKNOWN\n",
594 ha->host_no, mbox_status));
598 DEBUG2(printk("scsi%ld: Unknown mailbox status %08X\n",
599 ha->host_no, mbox_status));
601 ha->mbox_status[0] = mbox_status;
606 * qla4xxx_interrupt_service_routine - isr
607 * @ha: pointer to host adapter structure.
609 * This is the main interrupt service routine.
610 * hardware_lock locked upon entry. runs in interrupt context.
612 void qla4xxx_interrupt_service_routine(struct scsi_qla_host * ha,
613 uint32_t intr_status)
615 /* Process response queue interrupt. */
616 if (intr_status & CSR_SCSI_COMPLETION_INTR)
617 qla4xxx_process_response_queue(ha);
619 /* Process mailbox/asynch event interrupt.*/
620 if (intr_status & CSR_SCSI_PROCESSOR_INTR) {
621 qla4xxx_isr_decode_mailbox(ha,
622 readl(&ha->reg->mailbox[0]));
624 /* Clear Mailbox Interrupt */
625 writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
626 &ha->reg->ctrl_status);
627 readl(&ha->reg->ctrl_status);
632 * qla4xxx_intr_handler - hardware interrupt handler.
634 * @dev_id: Pointer to host adapter structure
636 irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id)
638 struct scsi_qla_host *ha;
639 uint32_t intr_status;
640 unsigned long flags = 0;
641 uint8_t reqs_count = 0;
643 ha = (struct scsi_qla_host *) dev_id;
645 DEBUG2(printk(KERN_INFO
646 "qla4xxx: Interrupt with NULL host ptr\n"));
650 spin_lock_irqsave(&ha->hardware_lock, flags);
654 * Repeatedly service interrupts up to a maximum of
655 * MAX_REQS_SERVICED_PER_INTR
659 * Read interrupt status
661 if (le32_to_cpu(ha->shadow_regs->rsp_q_in) !=
663 intr_status = CSR_SCSI_COMPLETION_INTR;
665 intr_status = readl(&ha->reg->ctrl_status);
668 (CSR_SCSI_RESET_INTR|CSR_FATAL_ERROR|INTR_PENDING)) ==
671 ha->spurious_int_count++;
675 if (intr_status & CSR_FATAL_ERROR) {
676 DEBUG2(printk(KERN_INFO "scsi%ld: Fatal Error, "
677 "Status 0x%04x\n", ha->host_no,
678 readl(isp_port_error_status (ha))));
680 /* Issue Soft Reset to clear this error condition.
681 * This will prevent the RISC from repeatedly
682 * interrupting the driver; thus, allowing the DPC to
683 * get scheduled to continue error recovery.
684 * NOTE: Disabling RISC interrupts does not work in
685 * this case, as CSR_FATAL_ERROR overrides
686 * CSR_SCSI_INTR_ENABLE */
687 if ((readl(&ha->reg->ctrl_status) &
688 CSR_SCSI_RESET_INTR) == 0) {
689 writel(set_rmask(CSR_SOFT_RESET),
690 &ha->reg->ctrl_status);
691 readl(&ha->reg->ctrl_status);
694 writel(set_rmask(CSR_FATAL_ERROR),
695 &ha->reg->ctrl_status);
696 readl(&ha->reg->ctrl_status);
698 __qla4xxx_disable_intrs(ha);
700 set_bit(DPC_RESET_HA, &ha->dpc_flags);
703 } else if (intr_status & CSR_SCSI_RESET_INTR) {
704 clear_bit(AF_ONLINE, &ha->flags);
705 __qla4xxx_disable_intrs(ha);
707 writel(set_rmask(CSR_SCSI_RESET_INTR),
708 &ha->reg->ctrl_status);
709 readl(&ha->reg->ctrl_status);
712 set_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
715 } else if (intr_status & INTR_PENDING) {
716 qla4xxx_interrupt_service_routine(ha, intr_status);
717 ha->total_io_count++;
718 if (++reqs_count == MAX_REQS_SERVICED_PER_INTR)
725 spin_unlock_irqrestore(&ha->hardware_lock, flags);
731 * qla4xxx_process_aen - processes AENs generated by firmware
732 * @ha: pointer to host adapter structure.
733 * @process_aen: type of AENs to process
735 * Processes specific types of Asynchronous Events generated by firmware.
736 * The type of AENs to process is specified by process_aen and can be
738 * FLUSH_DDB_CHANGED_AENS 1
739 * RELOGIN_DDB_CHANGED_AENS 2
741 void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen)
743 uint32_t mbox_sts[MBOX_AEN_REG_COUNT];
748 spin_lock_irqsave(&ha->hardware_lock, flags);
749 while (ha->aen_out != ha->aen_in) {
750 aen = &ha->aen_q[ha->aen_out];
751 /* copy aen information to local structure */
752 for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
753 mbox_sts[i] = aen->mbox_sts[i];
758 if (ha->aen_out == MAX_AEN_ENTRIES)
761 spin_unlock_irqrestore(&ha->hardware_lock, flags);
763 DEBUG2(printk("qla4xxx(%ld): AEN[%d]=0x%08x, mbx1=0x%08x mbx2=0x%08x"
764 " mbx3=0x%08x mbx4=0x%08x\n", ha->host_no,
765 (ha->aen_out ? (ha->aen_out-1): (MAX_AEN_ENTRIES-1)),
766 mbox_sts[0], mbox_sts[1], mbox_sts[2],
767 mbox_sts[3], mbox_sts[4]));
769 switch (mbox_sts[0]) {
770 case MBOX_ASTS_DATABASE_CHANGED:
771 if (process_aen == FLUSH_DDB_CHANGED_AENS) {
772 DEBUG2(printk("scsi%ld: AEN[%d] %04x, index "
773 "[%d] state=%04x FLUSHED!\n",
774 ha->host_no, ha->aen_out,
775 mbox_sts[0], mbox_sts[2],
778 } else if (process_aen == RELOGIN_DDB_CHANGED_AENS) {
779 /* for use during init time, we only want to
780 * relogin non-active ddbs */
781 struct ddb_entry *ddb_entry;
784 /* FIXME: name length? */
785 qla4xxx_lookup_ddb_by_fw_index(ha,
790 ddb_entry->dev_scan_wait_to_complete_relogin =
792 ddb_entry->dev_scan_wait_to_start_relogin =
794 ((ddb_entry->default_time2wait +
797 DEBUG2(printk("scsi%ld: ddb index [%d] initate"
798 " RELOGIN after %d seconds\n",
800 ddb_entry->fw_ddb_index,
801 ddb_entry->default_time2wait +
806 if (mbox_sts[1] == 0) { /* Global DB change. */
807 qla4xxx_reinitialize_ddb_list(ha);
808 } else if (mbox_sts[1] == 1) { /* Specific device. */
809 qla4xxx_process_ddb_changed(ha, mbox_sts[2],
814 spin_lock_irqsave(&ha->hardware_lock, flags);
816 spin_unlock_irqrestore(&ha->hardware_lock, flags);