2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2005 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
9 #include <linux/moduleparam.h>
10 #include <linux/vmalloc.h>
11 #include <linux/smp_lock.h>
12 #include <linux/delay.h>
14 #include <scsi/scsi_tcq.h>
15 #include <scsi/scsicam.h>
16 #include <scsi/scsi_transport.h>
17 #include <scsi/scsi_transport_fc.h>
22 char qla2x00_version_str[40];
25 * SRB allocation cache
27 static kmem_cache_t *srb_cachep;
30 * Ioctl related information.
34 int ql2xlogintimeout = 20;
35 module_param(ql2xlogintimeout, int, S_IRUGO|S_IRUSR);
36 MODULE_PARM_DESC(ql2xlogintimeout,
37 "Login timeout value in seconds.");
39 int qlport_down_retry = 30;
40 module_param(qlport_down_retry, int, S_IRUGO|S_IRUSR);
41 MODULE_PARM_DESC(qlport_down_retry,
42 "Maximum number of command retries to a port that returns"
43 "a PORT-DOWN status.");
45 int ql2xplogiabsentdevice;
46 module_param(ql2xplogiabsentdevice, int, S_IRUGO|S_IWUSR);
47 MODULE_PARM_DESC(ql2xplogiabsentdevice,
48 "Option to enable PLOGI to devices that are not present after "
49 "a Fabric scan. This is needed for several broken switches."
50 "Default is 0 - no PLOGI. 1 - perfom PLOGI.");
52 int ql2xloginretrycount = 0;
53 module_param(ql2xloginretrycount, int, S_IRUGO|S_IRUSR);
54 MODULE_PARM_DESC(ql2xloginretrycount,
55 "Specify an alternate value for the NVRAM login retry count.");
58 module_param(ql2xfwloadbin, int, S_IRUGO|S_IRUSR);
59 MODULE_PARM_DESC(ql2xfwloadbin,
60 "Load ISP2xxx firmware image via hotplug.");
62 static void qla2x00_free_device(scsi_qla_host_t *);
64 static void qla2x00_config_dma_addressing(scsi_qla_host_t *ha);
67 module_param(ql2xfdmienable, int, S_IRUGO|S_IRUSR);
68 MODULE_PARM_DESC(ql2xfdmienable,
69 "Enables FDMI registratons "
70 "Default is 0 - no FDMI. 1 - perfom FDMI.");
73 * SCSI host template entry points
75 static int qla2xxx_slave_configure(struct scsi_device * device);
76 static int qla2xxx_slave_alloc(struct scsi_device *);
77 static void qla2xxx_slave_destroy(struct scsi_device *);
78 static int qla2x00_queuecommand(struct scsi_cmnd *cmd,
79 void (*fn)(struct scsi_cmnd *));
80 static int qla24xx_queuecommand(struct scsi_cmnd *cmd,
81 void (*fn)(struct scsi_cmnd *));
82 static int qla2xxx_eh_abort(struct scsi_cmnd *);
83 static int qla2xxx_eh_device_reset(struct scsi_cmnd *);
84 static int qla2xxx_eh_bus_reset(struct scsi_cmnd *);
85 static int qla2xxx_eh_host_reset(struct scsi_cmnd *);
86 static int qla2x00_loop_reset(scsi_qla_host_t *ha);
87 static int qla2x00_device_reset(scsi_qla_host_t *, fc_port_t *);
89 static int qla2x00_change_queue_depth(struct scsi_device *, int);
90 static int qla2x00_change_queue_type(struct scsi_device *, int);
92 static struct scsi_host_template qla2x00_driver_template = {
93 .module = THIS_MODULE,
95 .queuecommand = qla2x00_queuecommand,
97 .eh_abort_handler = qla2xxx_eh_abort,
98 .eh_device_reset_handler = qla2xxx_eh_device_reset,
99 .eh_bus_reset_handler = qla2xxx_eh_bus_reset,
100 .eh_host_reset_handler = qla2xxx_eh_host_reset,
102 .slave_configure = qla2xxx_slave_configure,
104 .slave_alloc = qla2xxx_slave_alloc,
105 .slave_destroy = qla2xxx_slave_destroy,
106 .change_queue_depth = qla2x00_change_queue_depth,
107 .change_queue_type = qla2x00_change_queue_type,
110 .use_clustering = ENABLE_CLUSTERING,
111 .sg_tablesize = SG_ALL,
114 * The RISC allows for each command to transfer (2^32-1) bytes of data,
115 * which equates to 0x800000 sectors.
117 .max_sectors = 0xFFFF,
118 .shost_attrs = qla2x00_host_attrs,
121 static struct scsi_host_template qla24xx_driver_template = {
122 .module = THIS_MODULE,
124 .queuecommand = qla24xx_queuecommand,
126 .eh_abort_handler = qla2xxx_eh_abort,
127 .eh_device_reset_handler = qla2xxx_eh_device_reset,
128 .eh_bus_reset_handler = qla2xxx_eh_bus_reset,
129 .eh_host_reset_handler = qla2xxx_eh_host_reset,
131 .slave_configure = qla2xxx_slave_configure,
133 .slave_alloc = qla2xxx_slave_alloc,
134 .slave_destroy = qla2xxx_slave_destroy,
135 .change_queue_depth = qla2x00_change_queue_depth,
136 .change_queue_type = qla2x00_change_queue_type,
139 .use_clustering = ENABLE_CLUSTERING,
140 .sg_tablesize = SG_ALL,
142 .max_sectors = 0xFFFF,
143 .shost_attrs = qla2x00_host_attrs,
146 static struct scsi_transport_template *qla2xxx_transport_template = NULL;
148 /* TODO Convert to inlines
152 #define WATCH_INTERVAL 1 /* number of seconds */
154 static void qla2x00_timer(scsi_qla_host_t *);
156 static __inline__ void qla2x00_start_timer(scsi_qla_host_t *,
157 void *, unsigned long);
158 static __inline__ void qla2x00_restart_timer(scsi_qla_host_t *, unsigned long);
159 static __inline__ void qla2x00_stop_timer(scsi_qla_host_t *);
162 qla2x00_start_timer(scsi_qla_host_t *ha, void *func, unsigned long interval)
164 init_timer(&ha->timer);
165 ha->timer.expires = jiffies + interval * HZ;
166 ha->timer.data = (unsigned long)ha;
167 ha->timer.function = (void (*)(unsigned long))func;
168 add_timer(&ha->timer);
169 ha->timer_active = 1;
173 qla2x00_restart_timer(scsi_qla_host_t *ha, unsigned long interval)
175 mod_timer(&ha->timer, jiffies + interval * HZ);
178 static __inline__ void
179 qla2x00_stop_timer(scsi_qla_host_t *ha)
181 del_timer_sync(&ha->timer);
182 ha->timer_active = 0;
185 static int qla2x00_do_dpc(void *data);
187 static void qla2x00_rst_aen(scsi_qla_host_t *);
189 static uint8_t qla2x00_mem_alloc(scsi_qla_host_t *);
190 static void qla2x00_mem_free(scsi_qla_host_t *ha);
191 static int qla2x00_allocate_sp_pool( scsi_qla_host_t *ha);
192 static void qla2x00_free_sp_pool(scsi_qla_host_t *ha);
193 static void qla2x00_sp_free_dma(scsi_qla_host_t *, srb_t *);
194 void qla2x00_sp_compl(scsi_qla_host_t *ha, srb_t *);
196 /* -------------------------------------------------------------------------- */
199 qla2x00_pci_info_str(struct scsi_qla_host *ha, char *str)
201 static char *pci_bus_modes[] = {
202 "33", "66", "100", "133",
207 pci_bus = (ha->pci_attr & (BIT_9 | BIT_10)) >> 9;
210 strcat(str, pci_bus_modes[pci_bus]);
212 pci_bus = (ha->pci_attr & BIT_8) >> 8;
214 strcat(str, pci_bus_modes[pci_bus]);
216 strcat(str, " MHz)");
222 qla24xx_pci_info_str(struct scsi_qla_host *ha, char *str)
224 static char *pci_bus_modes[] = { "33", "66", "100", "133", };
228 pcie_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP);
231 uint16_t pcie_lstat, lspeed, lwidth;
234 pci_read_config_word(ha->pdev, pcie_reg, &pcie_lstat);
235 lspeed = pcie_lstat & (BIT_0 | BIT_1 | BIT_2 | BIT_3);
236 lwidth = (pcie_lstat &
237 (BIT_4 | BIT_5 | BIT_6 | BIT_7 | BIT_8 | BIT_9)) >> 4;
239 strcpy(str, "PCIe (");
241 strcat(str, "2.5Gb/s ");
243 strcat(str, "<unknown> ");
244 snprintf(lwstr, sizeof(lwstr), "x%d)", lwidth);
251 pci_bus = (ha->pci_attr & CSRX_PCIX_BUS_MODE_MASK) >> 8;
252 if (pci_bus == 0 || pci_bus == 8) {
254 strcat(str, pci_bus_modes[pci_bus >> 3]);
258 strcat(str, "Mode 2");
260 strcat(str, "Mode 1");
262 strcat(str, pci_bus_modes[pci_bus & ~BIT_2]);
264 strcat(str, " MHz)");
270 qla2x00_fw_version_str(struct scsi_qla_host *ha, char *str)
274 sprintf(str, "%d.%02d.%02d ", ha->fw_major_version,
275 ha->fw_minor_version,
276 ha->fw_subminor_version);
278 if (ha->fw_attributes & BIT_9) {
283 switch (ha->fw_attributes & 0xFF) {
297 sprintf(un_str, "(%x)", ha->fw_attributes);
301 if (ha->fw_attributes & 0x100)
308 qla24xx_fw_version_str(struct scsi_qla_host *ha, char *str)
310 sprintf(str, "%d.%02d.%02d ", ha->fw_major_version,
311 ha->fw_minor_version,
312 ha->fw_subminor_version);
314 if (ha->fw_attributes & BIT_0)
315 strcat(str, "[Class 2] ");
316 if (ha->fw_attributes & BIT_1)
317 strcat(str, "[IP] ");
318 if (ha->fw_attributes & BIT_2)
319 strcat(str, "[Multi-ID] ");
320 if (ha->fw_attributes & BIT_13)
321 strcat(str, "[Experimental]");
325 static inline srb_t *
326 qla2x00_get_new_sp(scsi_qla_host_t *ha, fc_port_t *fcport,
327 struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
331 sp = mempool_alloc(ha->srb_mempool, GFP_ATOMIC);
335 atomic_set(&sp->ref_count, 1);
340 CMD_SP(cmd) = (void *)sp;
341 cmd->scsi_done = done;
347 qla2x00_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
349 scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
350 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
351 struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device));
355 rval = fc_remote_port_chkready(rport);
358 goto qc_fail_command;
361 if (atomic_read(&fcport->state) != FCS_ONLINE) {
362 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
363 atomic_read(&ha->loop_state) == LOOP_DEAD) {
364 cmd->result = DID_NO_CONNECT << 16;
365 goto qc_fail_command;
370 spin_unlock_irq(ha->host->host_lock);
372 sp = qla2x00_get_new_sp(ha, fcport, cmd, done);
374 goto qc_host_busy_lock;
376 rval = qla2x00_start_scsi(sp);
377 if (rval != QLA_SUCCESS)
378 goto qc_host_busy_free_sp;
380 spin_lock_irq(ha->host->host_lock);
384 qc_host_busy_free_sp:
385 qla2x00_sp_free_dma(ha, sp);
386 mempool_free(sp, ha->srb_mempool);
389 spin_lock_irq(ha->host->host_lock);
392 return SCSI_MLQUEUE_HOST_BUSY;
402 qla24xx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
404 scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
405 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
406 struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device));
410 rval = fc_remote_port_chkready(rport);
413 goto qc24_fail_command;
416 if (atomic_read(&fcport->state) != FCS_ONLINE) {
417 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
418 atomic_read(&ha->loop_state) == LOOP_DEAD) {
419 cmd->result = DID_NO_CONNECT << 16;
420 goto qc24_fail_command;
425 spin_unlock_irq(ha->host->host_lock);
427 sp = qla2x00_get_new_sp(ha, fcport, cmd, done);
429 goto qc24_host_busy_lock;
431 rval = qla24xx_start_scsi(sp);
432 if (rval != QLA_SUCCESS)
433 goto qc24_host_busy_free_sp;
435 spin_lock_irq(ha->host->host_lock);
439 qc24_host_busy_free_sp:
440 qla2x00_sp_free_dma(ha, sp);
441 mempool_free(sp, ha->srb_mempool);
444 spin_lock_irq(ha->host->host_lock);
447 return SCSI_MLQUEUE_HOST_BUSY;
457 * qla2x00_eh_wait_on_command
458 * Waits for the command to be returned by the Firmware for some
462 * ha = actual ha whose done queue will contain the command
463 * returned by firmware.
464 * cmd = Scsi Command to wait on.
465 * flag = Abort/Reset(Bus or Device Reset)
472 qla2x00_eh_wait_on_command(scsi_qla_host_t *ha, struct scsi_cmnd *cmd)
474 #define ABORT_POLLING_PERIOD 1000
475 #define ABORT_WAIT_ITER ((10 * 1000) / (ABORT_POLLING_PERIOD))
476 unsigned long wait_iter = ABORT_WAIT_ITER;
477 int ret = QLA_SUCCESS;
479 while (CMD_SP(cmd)) {
480 msleep(ABORT_POLLING_PERIOD);
486 ret = QLA_FUNCTION_FAILED;
492 * qla2x00_wait_for_hba_online
493 * Wait till the HBA is online after going through
494 * <= MAX_RETRIES_OF_ISP_ABORT or
495 * finally HBA is disabled ie marked offline
498 * ha - pointer to host adapter structure
501 * Does context switching-Release SPIN_LOCK
502 * (if any) before calling this routine.
505 * Success (Adapter is online) : 0
506 * Failed (Adapter is offline/disabled) : 1
509 qla2x00_wait_for_hba_online(scsi_qla_host_t *ha)
512 unsigned long wait_online;
514 wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ);
515 while (((test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags)) ||
516 test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags) ||
517 test_bit(ISP_ABORT_RETRY, &ha->dpc_flags) ||
518 ha->dpc_active) && time_before(jiffies, wait_online)) {
522 if (ha->flags.online)
523 return_status = QLA_SUCCESS;
525 return_status = QLA_FUNCTION_FAILED;
527 DEBUG2(printk("%s return_status=%d\n",__func__,return_status));
529 return (return_status);
533 * qla2x00_wait_for_loop_ready
534 * Wait for MAX_LOOP_TIMEOUT(5 min) value for loop
535 * to be in LOOP_READY state.
537 * ha - pointer to host adapter structure
540 * Does context switching-Release SPIN_LOCK
541 * (if any) before calling this routine.
545 * Success (LOOP_READY) : 0
546 * Failed (LOOP_NOT_READY) : 1
549 qla2x00_wait_for_loop_ready(scsi_qla_host_t *ha)
551 int return_status = QLA_SUCCESS;
552 unsigned long loop_timeout ;
554 /* wait for 5 min at the max for loop to be ready */
555 loop_timeout = jiffies + (MAX_LOOP_TIMEOUT * HZ);
557 while ((!atomic_read(&ha->loop_down_timer) &&
558 atomic_read(&ha->loop_state) == LOOP_DOWN) ||
559 atomic_read(&ha->loop_state) != LOOP_READY) {
561 if (time_after_eq(jiffies, loop_timeout)) {
562 return_status = QLA_FUNCTION_FAILED;
566 return (return_status);
569 /**************************************************************************
573 * The abort function will abort the specified command.
576 * cmd = Linux SCSI command packet to be aborted.
579 * Either SUCCESS or FAILED.
582 **************************************************************************/
584 qla2xxx_eh_abort(struct scsi_cmnd *cmd)
586 scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
589 unsigned int id, lun;
590 unsigned long serial;
598 id = cmd->device->id;
599 lun = cmd->device->lun;
600 serial = cmd->serial_number;
602 /* Check active list for command command. */
603 spin_lock_irqsave(&ha->hardware_lock, flags);
604 for (i = 1; i < MAX_OUTSTANDING_COMMANDS; i++) {
605 sp = ha->outstanding_cmds[i];
613 DEBUG2(printk("%s(%ld): aborting sp %p from RISC. pid=%ld "
614 "sp->state=%x\n", __func__, ha->host_no, sp, serial,
616 DEBUG3(qla2x00_print_scsi_cmd(cmd);)
618 spin_unlock_irqrestore(&ha->hardware_lock, flags);
619 if (ha->isp_ops.abort_command(ha, sp)) {
620 DEBUG2(printk("%s(%ld): abort_command "
621 "mbx failed.\n", __func__, ha->host_no));
623 DEBUG3(printk("%s(%ld): abort_command "
624 "mbx success.\n", __func__, ha->host_no));
627 spin_lock_irqsave(&ha->hardware_lock, flags);
631 spin_unlock_irqrestore(&ha->hardware_lock, flags);
633 /* Wait for the command to be returned. */
634 if (ret == SUCCESS) {
635 if (qla2x00_eh_wait_on_command(ha, cmd) != QLA_SUCCESS) {
636 qla_printk(KERN_ERR, ha,
637 "scsi(%ld:%d:%d): Abort handler timed out -- %lx "
638 "%x.\n", ha->host_no, id, lun, serial, ret);
642 qla_printk(KERN_INFO, ha,
643 "scsi(%ld:%d:%d): Abort command issued -- %lx %x.\n", ha->host_no,
644 id, lun, serial, ret);
649 /**************************************************************************
650 * qla2x00_eh_wait_for_pending_target_commands
653 * Waits for all the commands to come back from the specified target.
656 * ha - pointer to scsi_qla_host structure.
659 * Either SUCCESS or FAILED.
662 **************************************************************************/
664 qla2x00_eh_wait_for_pending_target_commands(scsi_qla_host_t *ha, unsigned int t)
669 struct scsi_cmnd *cmd;
675 * Waiting for all commands for the designated target in the active
678 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
679 spin_lock_irqsave(&ha->hardware_lock, flags);
680 sp = ha->outstanding_cmds[cnt];
683 spin_unlock_irqrestore(&ha->hardware_lock, flags);
684 if (cmd->device->id == t) {
685 if (!qla2x00_eh_wait_on_command(ha, cmd)) {
691 spin_unlock_irqrestore(&ha->hardware_lock, flags);
698 /**************************************************************************
699 * qla2xxx_eh_device_reset
702 * The device reset function will reset the target and abort any
703 * executing commands.
705 * NOTE: The use of SP is undefined within this context. Do *NOT*
706 * attempt to use this value, even if you determine it is
710 * cmd = Linux SCSI command packet of the command that cause the
714 * SUCCESS/FAILURE (defined as macro in scsi.h).
716 **************************************************************************/
718 qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
720 scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
721 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
724 unsigned int id, lun;
725 unsigned long serial;
729 id = cmd->device->id;
730 lun = cmd->device->lun;
731 serial = cmd->serial_number;
733 sp = (srb_t *) CMD_SP(cmd);
737 qla_printk(KERN_INFO, ha,
738 "scsi(%ld:%d:%d): DEVICE RESET ISSUED.\n", ha->host_no, id, lun);
740 if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS)
741 goto eh_dev_reset_done;
743 if (qla2x00_wait_for_loop_ready(ha) == QLA_SUCCESS) {
744 if (qla2x00_device_reset(ha, fcport) == 0)
747 #if defined(LOGOUT_AFTER_DEVICE_RESET)
748 if (ret == SUCCESS) {
749 if (fcport->flags & FC_FABRIC_DEVICE) {
750 ha->isp_ops.fabric_logout(ha, fcport->loop_id);
751 qla2x00_mark_device_lost(ha, fcport);
756 DEBUG2(printk(KERN_INFO
757 "%s failed: loop not ready\n",__func__);)
761 DEBUG3(printk("%s(%ld): device reset failed\n",
762 __func__, ha->host_no));
763 qla_printk(KERN_INFO, ha, "%s: device reset failed\n",
766 goto eh_dev_reset_done;
769 /* Flush outstanding commands. */
770 if (qla2x00_eh_wait_for_pending_target_commands(ha, id))
773 DEBUG3(printk("%s(%ld): failed while waiting for commands\n",
774 __func__, ha->host_no));
775 qla_printk(KERN_INFO, ha,
776 "%s: failed while waiting for commands\n", __func__);
778 qla_printk(KERN_INFO, ha,
779 "scsi(%ld:%d:%d): DEVICE RESET SUCCEEDED.\n", ha->host_no,
785 /**************************************************************************
786 * qla2x00_eh_wait_for_pending_commands
789 * Waits for all the commands to come back from the specified host.
792 * ha - pointer to scsi_qla_host structure.
799 **************************************************************************/
801 qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *ha)
806 struct scsi_cmnd *cmd;
812 * Waiting for all commands for the designated target in the active
815 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
816 spin_lock_irqsave(&ha->hardware_lock, flags);
817 sp = ha->outstanding_cmds[cnt];
820 spin_unlock_irqrestore(&ha->hardware_lock, flags);
821 status = qla2x00_eh_wait_on_command(ha, cmd);
826 spin_unlock_irqrestore(&ha->hardware_lock, flags);
833 /**************************************************************************
834 * qla2xxx_eh_bus_reset
837 * The bus reset function will reset the bus and abort any executing
841 * cmd = Linux SCSI command packet of the command that cause the
845 * SUCCESS/FAILURE (defined as macro in scsi.h).
847 **************************************************************************/
849 qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
851 scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
852 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
855 unsigned int id, lun;
856 unsigned long serial;
860 id = cmd->device->id;
861 lun = cmd->device->lun;
862 serial = cmd->serial_number;
864 sp = (srb_t *) CMD_SP(cmd);
868 qla_printk(KERN_INFO, ha,
869 "scsi(%ld:%d:%d): LOOP RESET ISSUED.\n", ha->host_no, id, lun);
871 if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS) {
872 DEBUG2(printk("%s failed:board disabled\n",__func__));
873 goto eh_bus_reset_done;
876 if (qla2x00_wait_for_loop_ready(ha) == QLA_SUCCESS) {
877 if (qla2x00_loop_reset(ha) == QLA_SUCCESS)
881 goto eh_bus_reset_done;
883 /* Flush outstanding commands. */
884 if (!qla2x00_eh_wait_for_pending_commands(ha))
888 qla_printk(KERN_INFO, ha, "%s: reset %s\n", __func__,
889 (ret == FAILED) ? "failed" : "succeded");
894 /**************************************************************************
895 * qla2xxx_eh_host_reset
898 * The reset function will reset the Adapter.
901 * cmd = Linux SCSI command packet of the command that cause the
905 * Either SUCCESS or FAILED.
908 **************************************************************************/
910 qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
912 scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
913 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
916 unsigned int id, lun;
917 unsigned long serial;
921 id = cmd->device->id;
922 lun = cmd->device->lun;
923 serial = cmd->serial_number;
925 sp = (srb_t *) CMD_SP(cmd);
929 qla_printk(KERN_INFO, ha,
930 "scsi(%ld:%d:%d): ADAPTER RESET ISSUED.\n", ha->host_no, id, lun);
932 if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS)
933 goto eh_host_reset_lock;
936 * Fixme-may be dpc thread is active and processing
937 * loop_resync,so wait a while for it to
938 * be completed and then issue big hammer.Otherwise
939 * it may cause I/O failure as big hammer marks the
940 * devices as lost kicking of the port_down_timer
941 * while dpc is stuck for the mailbox to complete.
943 qla2x00_wait_for_loop_ready(ha);
944 set_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags);
945 if (qla2x00_abort_isp(ha)) {
946 clear_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags);
947 /* failed. schedule dpc to try */
948 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
950 if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS)
951 goto eh_host_reset_lock;
953 clear_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags);
955 /* Waiting for our command in done_queue to be returned to OS.*/
956 if (qla2x00_eh_wait_for_pending_commands(ha))
960 qla_printk(KERN_INFO, ha, "%s: reset %s\n", __func__,
961 (ret == FAILED) ? "failed" : "succeded");
971 * ha = adapter block pointer.
977 qla2x00_loop_reset(scsi_qla_host_t *ha)
979 int status = QLA_SUCCESS;
980 struct fc_port *fcport;
982 if (ha->flags.enable_lip_reset) {
983 status = qla2x00_lip_reset(ha);
986 if (status == QLA_SUCCESS && ha->flags.enable_target_reset) {
987 list_for_each_entry(fcport, &ha->fcports, list) {
988 if (fcport->port_type != FCT_TARGET)
991 status = qla2x00_device_reset(ha, fcport);
992 if (status != QLA_SUCCESS)
997 if (status == QLA_SUCCESS &&
998 ((!ha->flags.enable_target_reset &&
999 !ha->flags.enable_lip_reset) ||
1000 ha->flags.enable_lip_full_login)) {
1002 status = qla2x00_full_login_lip(ha);
1005 /* Issue marker command only when we are going to start the I/O */
1006 ha->marker_needed = 1;
1010 DEBUG2_3(printk("%s(%ld): **** FAILED ****\n",
1015 DEBUG3(printk("%s(%ld): exiting normally.\n",
1024 * qla2x00_device_reset
1025 * Issue bus device reset message to the target.
1028 * ha = adapter block pointer.
1030 * TARGET_QUEUE_LOCK must be released.
1031 * ADAPTER_STATE_LOCK must be released.
1037 qla2x00_device_reset(scsi_qla_host_t *ha, fc_port_t *reset_fcport)
1039 /* Abort Target command will clear Reservation */
1040 return ha->isp_ops.abort_target(reset_fcport);
1044 qla2xxx_slave_alloc(struct scsi_device *sdev)
1046 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1048 if (!rport || fc_remote_port_chkready(rport))
1051 sdev->hostdata = *(fc_port_t **)rport->dd_data;
1057 qla2xxx_slave_configure(struct scsi_device *sdev)
1059 scsi_qla_host_t *ha = to_qla_host(sdev->host);
1060 struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
1062 if (sdev->tagged_supported)
1063 scsi_activate_tcq(sdev, 32);
1065 scsi_deactivate_tcq(sdev, 32);
1067 rport->dev_loss_tmo = ha->port_down_retry_count + 5;
1073 qla2xxx_slave_destroy(struct scsi_device *sdev)
1075 sdev->hostdata = NULL;
1079 qla2x00_change_queue_depth(struct scsi_device *sdev, int qdepth)
1081 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
1082 return sdev->queue_depth;
1086 qla2x00_change_queue_type(struct scsi_device *sdev, int tag_type)
1088 if (sdev->tagged_supported) {
1089 scsi_set_tag_type(sdev, tag_type);
1091 scsi_activate_tcq(sdev, sdev->queue_depth);
1093 scsi_deactivate_tcq(sdev, sdev->queue_depth);
1101 * qla2x00_config_dma_addressing() - Configure OS DMA addressing method.
1104 * At exit, the @ha's flags.enable_64bit_addressing set to indicated
1105 * supported addressing method.
1108 qla2x00_config_dma_addressing(scsi_qla_host_t *ha)
1110 /* Assume a 32bit DMA mask. */
1111 ha->flags.enable_64bit_addressing = 0;
1113 if (!dma_set_mask(&ha->pdev->dev, DMA_64BIT_MASK)) {
1114 /* Any upper-dword bits set? */
1115 if (MSD(dma_get_required_mask(&ha->pdev->dev)) &&
1116 !pci_set_consistent_dma_mask(ha->pdev, DMA_64BIT_MASK)) {
1117 /* Ok, a 64bit DMA mask is applicable. */
1118 ha->flags.enable_64bit_addressing = 1;
1119 ha->isp_ops.calc_req_entries = qla2x00_calc_iocbs_64;
1120 ha->isp_ops.build_iocbs = qla2x00_build_scsi_iocbs_64;
1125 dma_set_mask(&ha->pdev->dev, DMA_32BIT_MASK);
1126 pci_set_consistent_dma_mask(ha->pdev, DMA_32BIT_MASK);
1130 qla2x00_iospace_config(scsi_qla_host_t *ha)
1132 unsigned long pio, pio_len, pio_flags;
1133 unsigned long mmio, mmio_len, mmio_flags;
1135 /* We only need PIO for Flash operations on ISP2312 v2 chips. */
1136 pio = pci_resource_start(ha->pdev, 0);
1137 pio_len = pci_resource_len(ha->pdev, 0);
1138 pio_flags = pci_resource_flags(ha->pdev, 0);
1139 if (pio_flags & IORESOURCE_IO) {
1140 if (pio_len < MIN_IOBASE_LEN) {
1141 qla_printk(KERN_WARNING, ha,
1142 "Invalid PCI I/O region size (%s)...\n",
1143 pci_name(ha->pdev));
1147 qla_printk(KERN_WARNING, ha,
1148 "region #0 not a PIO resource (%s)...\n",
1149 pci_name(ha->pdev));
1153 /* Use MMIO operations for all accesses. */
1154 mmio = pci_resource_start(ha->pdev, 1);
1155 mmio_len = pci_resource_len(ha->pdev, 1);
1156 mmio_flags = pci_resource_flags(ha->pdev, 1);
1158 if (!(mmio_flags & IORESOURCE_MEM)) {
1159 qla_printk(KERN_ERR, ha,
1160 "region #0 not an MMIO resource (%s), aborting\n",
1161 pci_name(ha->pdev));
1162 goto iospace_error_exit;
1164 if (mmio_len < MIN_IOBASE_LEN) {
1165 qla_printk(KERN_ERR, ha,
1166 "Invalid PCI mem region size (%s), aborting\n",
1167 pci_name(ha->pdev));
1168 goto iospace_error_exit;
1171 if (pci_request_regions(ha->pdev, ha->brd_info->drv_name)) {
1172 qla_printk(KERN_WARNING, ha,
1173 "Failed to reserve PIO/MMIO regions (%s)\n",
1174 pci_name(ha->pdev));
1176 goto iospace_error_exit;
1179 ha->pio_address = pio;
1180 ha->pio_length = pio_len;
1181 ha->iobase = ioremap(mmio, MIN_IOBASE_LEN);
1183 qla_printk(KERN_ERR, ha,
1184 "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev));
1186 goto iospace_error_exit;
1196 qla2x00_enable_intrs(scsi_qla_host_t *ha)
1198 unsigned long flags = 0;
1199 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1201 spin_lock_irqsave(&ha->hardware_lock, flags);
1202 ha->interrupts_on = 1;
1203 /* enable risc and host interrupts */
1204 WRT_REG_WORD(®->ictrl, ICR_EN_INT | ICR_EN_RISC);
1205 RD_REG_WORD(®->ictrl);
1206 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1211 qla2x00_disable_intrs(scsi_qla_host_t *ha)
1213 unsigned long flags = 0;
1214 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1216 spin_lock_irqsave(&ha->hardware_lock, flags);
1217 ha->interrupts_on = 0;
1218 /* disable risc and host interrupts */
1219 WRT_REG_WORD(®->ictrl, 0);
1220 RD_REG_WORD(®->ictrl);
1221 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1225 qla24xx_enable_intrs(scsi_qla_host_t *ha)
1227 unsigned long flags = 0;
1228 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1230 spin_lock_irqsave(&ha->hardware_lock, flags);
1231 ha->interrupts_on = 1;
1232 WRT_REG_DWORD(®->ictrl, ICRX_EN_RISC_INT);
1233 RD_REG_DWORD(®->ictrl);
1234 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1238 qla24xx_disable_intrs(scsi_qla_host_t *ha)
1240 unsigned long flags = 0;
1241 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1243 spin_lock_irqsave(&ha->hardware_lock, flags);
1244 ha->interrupts_on = 0;
1245 WRT_REG_DWORD(®->ictrl, 0);
1246 RD_REG_DWORD(®->ictrl);
1247 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1251 * PCI driver interface
1253 int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
1256 device_reg_t __iomem *reg;
1257 struct Scsi_Host *host;
1258 scsi_qla_host_t *ha;
1259 unsigned long flags = 0;
1260 unsigned long wait_switch = 0;
1265 if (pci_enable_device(pdev))
1268 host = scsi_host_alloc(brd_info->sht ? brd_info->sht:
1269 &qla2x00_driver_template, sizeof(scsi_qla_host_t));
1272 "qla2xxx: Couldn't allocate host from scsi layer!\n");
1273 goto probe_disable_device;
1276 /* Clear our data area */
1277 ha = (scsi_qla_host_t *)host->hostdata;
1278 memset(ha, 0, sizeof(scsi_qla_host_t));
1282 ha->host_no = host->host_no;
1283 ha->brd_info = brd_info;
1284 sprintf(ha->host_str, "%s_%ld", ha->brd_info->drv_name, ha->host_no);
1288 /* Configure PCI I/O space */
1289 ret = qla2x00_iospace_config(ha);
1293 qla_printk(KERN_INFO, ha,
1294 "Found an %s, irq %d, iobase 0x%p\n", ha->brd_info->isp_name,
1295 pdev->irq, ha->iobase);
1297 spin_lock_init(&ha->hardware_lock);
1299 ha->prev_topology = 0;
1300 ha->ports = MAX_BUSES;
1301 ha->init_cb_size = sizeof(init_cb_t);
1302 ha->mgmt_svr_loop_id = MANAGEMENT_SERVER;
1304 /* Assign ISP specific operations. */
1305 ha->isp_ops.pci_config = qla2100_pci_config;
1306 ha->isp_ops.reset_chip = qla2x00_reset_chip;
1307 ha->isp_ops.chip_diag = qla2x00_chip_diag;
1308 ha->isp_ops.config_rings = qla2x00_config_rings;
1309 ha->isp_ops.reset_adapter = qla2x00_reset_adapter;
1310 ha->isp_ops.nvram_config = qla2x00_nvram_config;
1311 ha->isp_ops.update_fw_options = qla2x00_update_fw_options;
1312 ha->isp_ops.load_risc = qla2x00_load_risc;
1313 ha->isp_ops.pci_info_str = qla2x00_pci_info_str;
1314 ha->isp_ops.fw_version_str = qla2x00_fw_version_str;
1315 ha->isp_ops.intr_handler = qla2100_intr_handler;
1316 ha->isp_ops.enable_intrs = qla2x00_enable_intrs;
1317 ha->isp_ops.disable_intrs = qla2x00_disable_intrs;
1318 ha->isp_ops.abort_command = qla2x00_abort_command;
1319 ha->isp_ops.abort_target = qla2x00_abort_target;
1320 ha->isp_ops.fabric_login = qla2x00_login_fabric;
1321 ha->isp_ops.fabric_logout = qla2x00_fabric_logout;
1322 ha->isp_ops.calc_req_entries = qla2x00_calc_iocbs_32;
1323 ha->isp_ops.build_iocbs = qla2x00_build_scsi_iocbs_32;
1324 ha->isp_ops.prep_ms_iocb = qla2x00_prep_ms_iocb;
1325 ha->isp_ops.prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb;
1326 ha->isp_ops.read_nvram = qla2x00_read_nvram_data;
1327 ha->isp_ops.write_nvram = qla2x00_write_nvram_data;
1328 ha->isp_ops.fw_dump = qla2100_fw_dump;
1329 ha->isp_ops.ascii_fw_dump = qla2100_ascii_fw_dump;
1330 if (IS_QLA2100(ha)) {
1331 host->max_id = MAX_TARGETS_2100;
1332 ha->mbx_count = MAILBOX_REGISTER_COUNT_2100;
1333 ha->request_q_length = REQUEST_ENTRY_CNT_2100;
1334 ha->response_q_length = RESPONSE_ENTRY_CNT_2100;
1335 ha->last_loop_id = SNS_LAST_LOOP_ID_2100;
1336 host->sg_tablesize = 32;
1337 ha->gid_list_info_size = 4;
1338 } else if (IS_QLA2200(ha)) {
1339 host->max_id = MAX_TARGETS_2200;
1340 ha->mbx_count = MAILBOX_REGISTER_COUNT;
1341 ha->request_q_length = REQUEST_ENTRY_CNT_2200;
1342 ha->response_q_length = RESPONSE_ENTRY_CNT_2100;
1343 ha->last_loop_id = SNS_LAST_LOOP_ID_2100;
1344 ha->gid_list_info_size = 4;
1345 } else if (IS_QLA23XX(ha)) {
1346 host->max_id = MAX_TARGETS_2200;
1347 ha->mbx_count = MAILBOX_REGISTER_COUNT;
1348 ha->request_q_length = REQUEST_ENTRY_CNT_2200;
1349 ha->response_q_length = RESPONSE_ENTRY_CNT_2300;
1350 ha->last_loop_id = SNS_LAST_LOOP_ID_2300;
1351 ha->isp_ops.pci_config = qla2300_pci_config;
1352 ha->isp_ops.intr_handler = qla2300_intr_handler;
1353 ha->isp_ops.fw_dump = qla2300_fw_dump;
1354 ha->isp_ops.ascii_fw_dump = qla2300_ascii_fw_dump;
1355 ha->gid_list_info_size = 6;
1356 } else if (IS_QLA24XX(ha) || IS_QLA25XX(ha)) {
1357 host->max_id = MAX_TARGETS_2200;
1358 ha->mbx_count = MAILBOX_REGISTER_COUNT;
1359 ha->request_q_length = REQUEST_ENTRY_CNT_24XX;
1360 ha->response_q_length = RESPONSE_ENTRY_CNT_2300;
1361 ha->last_loop_id = SNS_LAST_LOOP_ID_2300;
1362 ha->init_cb_size = sizeof(struct init_cb_24xx);
1363 ha->mgmt_svr_loop_id = 10;
1364 ha->isp_ops.pci_config = qla24xx_pci_config;
1365 ha->isp_ops.reset_chip = qla24xx_reset_chip;
1366 ha->isp_ops.chip_diag = qla24xx_chip_diag;
1367 ha->isp_ops.config_rings = qla24xx_config_rings;
1368 ha->isp_ops.reset_adapter = qla24xx_reset_adapter;
1369 ha->isp_ops.nvram_config = qla24xx_nvram_config;
1370 ha->isp_ops.update_fw_options = qla24xx_update_fw_options;
1371 ha->isp_ops.load_risc = qla24xx_load_risc_flash;
1373 ha->isp_ops.load_risc = qla24xx_load_risc_hotplug;
1374 ha->isp_ops.pci_info_str = qla24xx_pci_info_str;
1375 ha->isp_ops.fw_version_str = qla24xx_fw_version_str;
1376 ha->isp_ops.intr_handler = qla24xx_intr_handler;
1377 ha->isp_ops.enable_intrs = qla24xx_enable_intrs;
1378 ha->isp_ops.disable_intrs = qla24xx_disable_intrs;
1379 ha->isp_ops.abort_command = qla24xx_abort_command;
1380 ha->isp_ops.abort_target = qla24xx_abort_target;
1381 ha->isp_ops.fabric_login = qla24xx_login_fabric;
1382 ha->isp_ops.fabric_logout = qla24xx_fabric_logout;
1383 ha->isp_ops.prep_ms_iocb = qla24xx_prep_ms_iocb;
1384 ha->isp_ops.prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb;
1385 ha->isp_ops.read_nvram = qla24xx_read_nvram_data;
1386 ha->isp_ops.write_nvram = qla24xx_write_nvram_data;
1387 ha->isp_ops.fw_dump = qla24xx_fw_dump;
1388 ha->isp_ops.ascii_fw_dump = qla24xx_ascii_fw_dump;
1389 ha->gid_list_info_size = 8;
1391 host->can_queue = ha->request_q_length + 128;
1393 /* load the F/W, read paramaters, and init the H/W */
1394 ha->instance = num_hosts;
1396 init_MUTEX(&ha->mbx_cmd_sem);
1397 init_MUTEX_LOCKED(&ha->mbx_intr_sem);
1399 INIT_LIST_HEAD(&ha->list);
1400 INIT_LIST_HEAD(&ha->fcports);
1401 INIT_LIST_HEAD(&ha->rscn_fcports);
1404 * These locks are used to prevent more than one CPU
1405 * from modifying the queue at the same time. The
1406 * higher level "host_lock" will reduce most
1407 * contention for these locks.
1409 spin_lock_init(&ha->mbx_reg_lock);
1411 init_completion(&ha->dpc_inited);
1412 init_completion(&ha->dpc_exited);
1414 qla2x00_config_dma_addressing(ha);
1415 if (qla2x00_mem_alloc(ha)) {
1416 qla_printk(KERN_WARNING, ha,
1417 "[ERROR] Failed to allocate memory for adapter\n");
1423 if (qla2x00_initialize_adapter(ha) &&
1424 !(ha->device_flags & DFLG_NO_CABLE)) {
1426 qla_printk(KERN_WARNING, ha,
1427 "Failed to initialize adapter\n");
1429 DEBUG2(printk("scsi(%ld): Failed to initialize adapter - "
1430 "Adapter flags %x.\n",
1431 ha->host_no, ha->device_flags));
1438 * Startup the kernel thread for this host adapter
1440 ha->dpc_should_die = 0;
1441 ha->dpc_pid = kernel_thread(qla2x00_do_dpc, ha, 0);
1442 if (ha->dpc_pid < 0) {
1443 qla_printk(KERN_WARNING, ha,
1444 "Unable to start DPC thread!\n");
1449 wait_for_completion(&ha->dpc_inited);
1451 host->this_id = 255;
1452 host->cmd_per_lun = 3;
1453 host->unique_id = ha->instance;
1454 host->max_cmd_len = MAX_CMDSZ;
1455 host->max_channel = ha->ports - 1;
1456 host->max_lun = MAX_LUNS;
1457 host->transportt = qla2xxx_transport_template;
1459 ret = request_irq(pdev->irq, ha->isp_ops.intr_handler,
1460 SA_INTERRUPT|SA_SHIRQ, ha->brd_info->drv_name, ha);
1462 qla_printk(KERN_WARNING, ha,
1463 "Failed to reserve interrupt %d already in use.\n",
1467 host->irq = pdev->irq;
1469 /* Initialized the timer */
1470 qla2x00_start_timer(ha, qla2x00_timer, WATCH_INTERVAL);
1472 DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n",
1475 ha->isp_ops.disable_intrs(ha);
1477 spin_lock_irqsave(&ha->hardware_lock, flags);
1479 if (IS_QLA24XX(ha) || IS_QLA25XX(ha)) {
1480 WRT_REG_DWORD(®->isp24.hccr, HCCRX_CLR_HOST_INT);
1481 WRT_REG_DWORD(®->isp24.hccr, HCCRX_CLR_RISC_INT);
1483 WRT_REG_WORD(®->isp.semaphore, 0);
1484 WRT_REG_WORD(®->isp.hccr, HCCR_CLR_RISC_INT);
1485 WRT_REG_WORD(®->isp.hccr, HCCR_CLR_HOST_INT);
1487 /* Enable proper parity */
1488 if (!IS_QLA2100(ha) && !IS_QLA2200(ha)) {
1491 WRT_REG_WORD(®->isp.hccr,
1492 (HCCR_ENABLE_PARITY + 0x1));
1494 /* SRAM, Instruction RAM and GP RAM parity */
1495 WRT_REG_WORD(®->isp.hccr,
1496 (HCCR_ENABLE_PARITY + 0x7));
1499 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1501 ha->isp_ops.enable_intrs(ha);
1505 * Wait around max loop_reset_delay secs for the devices to come
1506 * on-line. We don't want Linux scanning before we are ready.
1509 for (wait_switch = jiffies + (ha->loop_reset_delay * HZ);
1510 time_before(jiffies,wait_switch) &&
1511 !(ha->device_flags & (DFLG_NO_CABLE | DFLG_FABRIC_DEVICES))
1512 && (ha->device_flags & SWITCH_FOUND) ;) {
1514 qla2x00_check_fabric_devices(ha);
1519 pci_set_drvdata(pdev, ha);
1520 ha->flags.init_done = 1;
1523 ret = scsi_add_host(host, &pdev->dev);
1527 qla2x00_alloc_sysfs_attr(ha);
1529 qla2x00_init_host_attr(ha);
1531 qla_printk(KERN_INFO, ha, "\n"
1532 " QLogic Fibre Channel HBA Driver: %s\n"
1534 " %s: %s @ %s hdma%c, host#=%ld, fw=%s\n", qla2x00_version_str,
1535 ha->model_number, ha->model_desc ? ha->model_desc: "",
1536 ha->brd_info->isp_name, ha->isp_ops.pci_info_str(ha, pci_info),
1537 pci_name(pdev), ha->flags.enable_64bit_addressing ? '+': '-',
1538 ha->host_no, ha->isp_ops.fw_version_str(ha, fw_str));
1540 /* Go with fc_rport registration. */
1541 list_for_each_entry(fcport, &ha->fcports, list)
1542 qla2x00_reg_remote_port(ha, fcport);
1547 qla2x00_free_device(ha);
1549 scsi_host_put(host);
1551 probe_disable_device:
1552 pci_disable_device(pdev);
1557 EXPORT_SYMBOL_GPL(qla2x00_probe_one);
1559 void qla2x00_remove_one(struct pci_dev *pdev)
1561 scsi_qla_host_t *ha;
1563 ha = pci_get_drvdata(pdev);
1565 qla2x00_free_sysfs_attr(ha);
1567 fc_remove_host(ha->host);
1569 scsi_remove_host(ha->host);
1571 qla2x00_free_device(ha);
1573 scsi_host_put(ha->host);
1575 pci_set_drvdata(pdev, NULL);
1577 EXPORT_SYMBOL_GPL(qla2x00_remove_one);
1580 qla2x00_free_device(scsi_qla_host_t *ha)
1584 /* Abort any outstanding IO descriptors. */
1585 if (!IS_QLA2100(ha) && !IS_QLA2200(ha))
1586 qla2x00_cancel_io_descriptors(ha);
1589 if (ha->timer_active)
1590 qla2x00_stop_timer(ha);
1592 /* Kill the kernel thread for this host */
1593 if (ha->dpc_pid >= 0) {
1594 ha->dpc_should_die = 1;
1596 ret = kill_proc(ha->dpc_pid, SIGHUP, 1);
1598 qla_printk(KERN_ERR, ha,
1599 "Unable to signal DPC thread -- (%d)\n", ret);
1601 /* TODO: SOMETHING MORE??? */
1603 wait_for_completion(&ha->dpc_exited);
1607 /* Stop currently executing firmware. */
1608 qla2x00_stop_firmware(ha);
1610 /* turn-off interrupts on the card */
1611 if (ha->interrupts_on)
1612 ha->isp_ops.disable_intrs(ha);
1614 qla2x00_mem_free(ha);
1616 ha->flags.online = 0;
1618 /* Detach interrupts */
1620 free_irq(ha->pdev->irq, ha);
1622 /* release io space registers */
1624 iounmap(ha->iobase);
1625 pci_release_regions(ha->pdev);
1627 pci_disable_device(ha->pdev);
1631 * qla2x00_mark_device_lost Updates fcport state when device goes offline.
1633 * Input: ha = adapter block pointer. fcport = port structure pointer.
1639 void qla2x00_mark_device_lost(scsi_qla_host_t *ha, fc_port_t *fcport,
1642 if (atomic_read(&fcport->state) == FCS_ONLINE && fcport->rport)
1643 schedule_work(&fcport->rport_del_work);
1646 * We may need to retry the login, so don't change the state of the
1647 * port but do the retries.
1649 if (atomic_read(&fcport->state) != FCS_DEVICE_DEAD)
1650 atomic_set(&fcport->state, FCS_DEVICE_LOST);
1655 if (fcport->login_retry == 0) {
1656 fcport->login_retry = ha->login_retry_count;
1657 set_bit(RELOGIN_NEEDED, &ha->dpc_flags);
1659 DEBUG(printk("scsi(%ld): Port login retry: "
1660 "%02x%02x%02x%02x%02x%02x%02x%02x, "
1661 "id = 0x%04x retry cnt=%d\n",
1663 fcport->port_name[0],
1664 fcport->port_name[1],
1665 fcport->port_name[2],
1666 fcport->port_name[3],
1667 fcport->port_name[4],
1668 fcport->port_name[5],
1669 fcport->port_name[6],
1670 fcport->port_name[7],
1672 fcport->login_retry));
1677 * qla2x00_mark_all_devices_lost
1678 * Updates fcport state when device goes offline.
1681 * ha = adapter block pointer.
1682 * fcport = port structure pointer.
1690 qla2x00_mark_all_devices_lost(scsi_qla_host_t *ha)
1694 list_for_each_entry(fcport, &ha->fcports, list) {
1695 if (fcport->port_type != FCT_TARGET)
1699 * No point in marking the device as lost, if the device is
1702 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD)
1704 if (atomic_read(&fcport->state) == FCS_ONLINE && fcport->rport)
1705 schedule_work(&fcport->rport_del_work);
1706 atomic_set(&fcport->state, FCS_DEVICE_LOST);
1712 * Allocates adapter memory.
1719 qla2x00_mem_alloc(scsi_qla_host_t *ha)
1727 * This will loop only once if everything goes well, else some
1728 * number of retries will be performed to get around a kernel
1729 * bug where available mem is not allocated until after a
1730 * little delay and a retry.
1732 ha->request_ring = dma_alloc_coherent(&ha->pdev->dev,
1733 (ha->request_q_length + 1) * sizeof(request_t),
1734 &ha->request_dma, GFP_KERNEL);
1735 if (ha->request_ring == NULL) {
1736 qla_printk(KERN_WARNING, ha,
1737 "Memory Allocation failed - request_ring\n");
1739 qla2x00_mem_free(ha);
1745 ha->response_ring = dma_alloc_coherent(&ha->pdev->dev,
1746 (ha->response_q_length + 1) * sizeof(response_t),
1747 &ha->response_dma, GFP_KERNEL);
1748 if (ha->response_ring == NULL) {
1749 qla_printk(KERN_WARNING, ha,
1750 "Memory Allocation failed - response_ring\n");
1752 qla2x00_mem_free(ha);
1758 ha->gid_list = dma_alloc_coherent(&ha->pdev->dev, GID_LIST_SIZE,
1759 &ha->gid_list_dma, GFP_KERNEL);
1760 if (ha->gid_list == NULL) {
1761 qla_printk(KERN_WARNING, ha,
1762 "Memory Allocation failed - gid_list\n");
1764 qla2x00_mem_free(ha);
1770 ha->rlc_rsp = dma_alloc_coherent(&ha->pdev->dev,
1771 sizeof(rpt_lun_cmd_rsp_t), &ha->rlc_rsp_dma, GFP_KERNEL);
1772 if (ha->rlc_rsp == NULL) {
1773 qla_printk(KERN_WARNING, ha,
1774 "Memory Allocation failed - rlc");
1776 qla2x00_mem_free(ha);
1782 snprintf(name, sizeof(name), "qla2xxx_%ld", ha->host_no);
1783 ha->s_dma_pool = dma_pool_create(name, &ha->pdev->dev,
1784 DMA_POOL_SIZE, 8, 0);
1785 if (ha->s_dma_pool == NULL) {
1786 qla_printk(KERN_WARNING, ha,
1787 "Memory Allocation failed - s_dma_pool\n");
1789 qla2x00_mem_free(ha);
1795 /* get consistent memory allocated for init control block */
1796 ha->init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
1798 if (ha->init_cb == NULL) {
1799 qla_printk(KERN_WARNING, ha,
1800 "Memory Allocation failed - init_cb\n");
1802 qla2x00_mem_free(ha);
1807 memset(ha->init_cb, 0, ha->init_cb_size);
1809 /* Get consistent memory allocated for Get Port Database cmd */
1810 ha->iodesc_pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
1811 &ha->iodesc_pd_dma);
1812 if (ha->iodesc_pd == NULL) {
1814 qla_printk(KERN_WARNING, ha,
1815 "Memory Allocation failed - iodesc_pd\n");
1817 qla2x00_mem_free(ha);
1822 memset(ha->iodesc_pd, 0, PORT_DATABASE_SIZE);
1824 /* Allocate ioctl related memory. */
1825 if (qla2x00_alloc_ioctl_mem(ha)) {
1826 qla_printk(KERN_WARNING, ha,
1827 "Memory Allocation failed - ioctl_mem\n");
1829 qla2x00_mem_free(ha);
1835 if (qla2x00_allocate_sp_pool(ha)) {
1836 qla_printk(KERN_WARNING, ha,
1837 "Memory Allocation failed - "
1838 "qla2x00_allocate_sp_pool()\n");
1840 qla2x00_mem_free(ha);
1846 /* Allocate memory for SNS commands */
1847 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
1848 /* Get consistent memory allocated for SNS commands */
1849 ha->sns_cmd = dma_alloc_coherent(&ha->pdev->dev,
1850 sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma,
1852 if (ha->sns_cmd == NULL) {
1854 qla_printk(KERN_WARNING, ha,
1855 "Memory Allocation failed - sns_cmd\n");
1857 qla2x00_mem_free(ha);
1862 memset(ha->sns_cmd, 0, sizeof(struct sns_cmd_pkt));
1864 /* Get consistent memory allocated for MS IOCB */
1865 ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
1867 if (ha->ms_iocb == NULL) {
1869 qla_printk(KERN_WARNING, ha,
1870 "Memory Allocation failed - ms_iocb\n");
1872 qla2x00_mem_free(ha);
1877 memset(ha->ms_iocb, 0, sizeof(ms_iocb_entry_t));
1880 * Get consistent memory allocated for CT SNS
1883 ha->ct_sns = dma_alloc_coherent(&ha->pdev->dev,
1884 sizeof(struct ct_sns_pkt), &ha->ct_sns_dma,
1886 if (ha->ct_sns == NULL) {
1888 qla_printk(KERN_WARNING, ha,
1889 "Memory Allocation failed - ct_sns\n");
1891 qla2x00_mem_free(ha);
1896 memset(ha->ct_sns, 0, sizeof(struct ct_sns_pkt));
1899 /* Done all allocations without any error. */
1902 } while (retry-- && status != 0);
1906 "%s(): **** FAILED ****\n", __func__);
1914 * Frees all adapter allocated memory.
1917 * ha = adapter block pointer.
1920 qla2x00_mem_free(scsi_qla_host_t *ha)
1922 struct list_head *fcpl, *fcptemp;
1924 unsigned int wtime;/* max wait time if mbx cmd is busy. */
1928 DEBUG2(printk("%s(): ERROR invalid ha pointer.\n", __func__));
1932 /* Make sure all other threads are stopped. */
1934 while (ha->dpc_wait && wtime)
1935 wtime = msleep_interruptible(wtime);
1937 /* free ioctl memory */
1938 qla2x00_free_ioctl_mem(ha);
1941 qla2x00_free_sp_pool(ha);
1944 dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
1945 ha->sns_cmd, ha->sns_cmd_dma);
1948 dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt),
1949 ha->ct_sns, ha->ct_sns_dma);
1952 dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
1955 dma_pool_free(ha->s_dma_pool, ha->iodesc_pd, ha->iodesc_pd_dma);
1958 dma_pool_free(ha->s_dma_pool, ha->init_cb, ha->init_cb_dma);
1961 dma_pool_destroy(ha->s_dma_pool);
1964 dma_free_coherent(&ha->pdev->dev,
1965 sizeof(rpt_lun_cmd_rsp_t), ha->rlc_rsp,
1969 dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list,
1972 if (ha->response_ring)
1973 dma_free_coherent(&ha->pdev->dev,
1974 (ha->response_q_length + 1) * sizeof(response_t),
1975 ha->response_ring, ha->response_dma);
1977 if (ha->request_ring)
1978 dma_free_coherent(&ha->pdev->dev,
1979 (ha->request_q_length + 1) * sizeof(request_t),
1980 ha->request_ring, ha->request_dma);
1983 ha->sns_cmd_dma = 0;
1987 ha->ms_iocb_dma = 0;
1988 ha->iodesc_pd = NULL;
1989 ha->iodesc_pd_dma = 0;
1991 ha->init_cb_dma = 0;
1993 ha->s_dma_pool = NULL;
1996 ha->rlc_rsp_dma = 0;
1997 ha->gid_list = NULL;
1998 ha->gid_list_dma = 0;
2000 ha->response_ring = NULL;
2001 ha->response_dma = 0;
2002 ha->request_ring = NULL;
2003 ha->request_dma = 0;
2005 list_for_each_safe(fcpl, fcptemp, &ha->fcports) {
2006 fcport = list_entry(fcpl, fc_port_t, list);
2009 list_del_init(&fcport->list);
2012 INIT_LIST_HEAD(&ha->fcports);
2015 free_pages((unsigned long)ha->fw_dump, ha->fw_dump_order);
2017 vfree(ha->fw_dump24);
2019 vfree(ha->fw_dump_buffer);
2022 ha->fw_dump24 = NULL;
2024 ha->fw_dump_reading = 0;
2025 ha->fw_dump_buffer = NULL;
2029 * qla2x00_allocate_sp_pool
2030 * This routine is called during initialization to allocate
2031 * memory for local srb_t.
2034 * ha = adapter block pointer.
2039 * Note: Sets the ref_count for non Null sp to one.
2042 qla2x00_allocate_sp_pool(scsi_qla_host_t *ha)
2047 ha->srb_mempool = mempool_create(SRB_MIN_REQ, mempool_alloc_slab,
2048 mempool_free_slab, srb_cachep);
2049 if (ha->srb_mempool == NULL) {
2050 qla_printk(KERN_INFO, ha, "Unable to allocate SRB mempool.\n");
2051 rval = QLA_FUNCTION_FAILED;
2057 * This routine frees all adapter allocated memory.
2061 qla2x00_free_sp_pool( scsi_qla_host_t *ha)
2063 if (ha->srb_mempool) {
2064 mempool_destroy(ha->srb_mempool);
2065 ha->srb_mempool = NULL;
2069 /**************************************************************************
2071 * This kernel thread is a task that is schedule by the interrupt handler
2072 * to perform the background processing for interrupts.
2075 * This task always run in the context of a kernel thread. It
2076 * is kick-off by the driver's detect code and starts up
2077 * up one per adapter. It immediately goes to sleep and waits for
2078 * some fibre event. When either the interrupt handler or
2079 * the timer routine detects a event it will one of the task
2080 * bits then wake us up.
2081 **************************************************************************/
2083 qla2x00_do_dpc(void *data)
2085 DECLARE_MUTEX_LOCKED(sem);
2086 scsi_qla_host_t *ha;
2089 uint16_t next_loopid;
2091 ha = (scsi_qla_host_t *)data;
2095 daemonize("%s_dpc", ha->host_str);
2096 allow_signal(SIGHUP);
2098 ha->dpc_wait = &sem;
2100 set_user_nice(current, -20);
2104 complete(&ha->dpc_inited);
2107 DEBUG3(printk("qla2x00: DPC handler sleeping\n"));
2109 if (down_interruptible(&sem))
2112 if (ha->dpc_should_die)
2115 DEBUG3(printk("qla2x00: DPC handler waking up\n"));
2117 /* Initialization not yet finished. Don't do anything yet. */
2118 if (!ha->flags.init_done || ha->dpc_active)
2121 DEBUG3(printk("scsi(%ld): DPC handler\n", ha->host_no));
2125 if (ha->flags.mbox_busy) {
2130 if (test_and_clear_bit(ISP_ABORT_NEEDED, &ha->dpc_flags)) {
2132 DEBUG(printk("scsi(%ld): dpc: sched "
2133 "qla2x00_abort_isp ha = %p\n",
2135 if (!(test_and_set_bit(ABORT_ISP_ACTIVE,
2138 if (qla2x00_abort_isp(ha)) {
2139 /* failed. retry later */
2140 set_bit(ISP_ABORT_NEEDED,
2143 clear_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags);
2145 DEBUG(printk("scsi(%ld): dpc: qla2x00_abort_isp end\n",
2149 if (test_and_clear_bit(LOOP_RESET_NEEDED, &ha->dpc_flags)) {
2150 DEBUG(printk("scsi(%ld): dpc: sched loop_reset()\n",
2152 qla2x00_loop_reset(ha);
2155 if (test_and_clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags) &&
2156 (!(test_and_set_bit(RESET_ACTIVE, &ha->dpc_flags)))) {
2158 DEBUG(printk("scsi(%ld): qla2x00_reset_marker()\n",
2161 qla2x00_rst_aen(ha);
2162 clear_bit(RESET_ACTIVE, &ha->dpc_flags);
2165 /* Retry each device up to login retry count */
2166 if ((test_and_clear_bit(RELOGIN_NEEDED, &ha->dpc_flags)) &&
2167 !test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) &&
2168 atomic_read(&ha->loop_state) != LOOP_DOWN) {
2170 DEBUG(printk("scsi(%ld): qla2x00_port_login()\n",
2174 list_for_each_entry(fcport, &ha->fcports, list) {
2175 if (fcport->port_type != FCT_TARGET)
2179 * If the port is not ONLINE then try to login
2180 * to it if we haven't run out of retries.
2182 if (atomic_read(&fcport->state) != FCS_ONLINE &&
2183 fcport->login_retry) {
2185 fcport->login_retry--;
2186 if (fcport->flags & FCF_FABRIC_DEVICE) {
2189 ha->isp_ops.fabric_logout(
2190 ha, fcport->loop_id,
2191 fcport->d_id.b.domain,
2192 fcport->d_id.b.area,
2193 fcport->d_id.b.al_pa);
2194 status = qla2x00_fabric_login(
2195 ha, fcport, &next_loopid);
2198 qla2x00_local_device_login(
2199 ha, fcport->loop_id);
2201 if (status == QLA_SUCCESS) {
2202 fcport->old_loop_id = fcport->loop_id;
2204 DEBUG(printk("scsi(%ld): port login OK: logged in ID 0x%x\n",
2205 ha->host_no, fcport->loop_id));
2207 fcport->port_login_retry_count =
2208 ha->port_down_retry_count * PORT_RETRY_TIME;
2209 atomic_set(&fcport->state, FCS_ONLINE);
2210 atomic_set(&fcport->port_down_timer,
2211 ha->port_down_retry_count * PORT_RETRY_TIME);
2213 fcport->login_retry = 0;
2214 } else if (status == 1) {
2215 set_bit(RELOGIN_NEEDED, &ha->dpc_flags);
2216 /* retry the login again */
2217 DEBUG(printk("scsi(%ld): Retrying %d login again loop_id 0x%x\n",
2219 fcport->login_retry, fcport->loop_id));
2221 fcport->login_retry = 0;
2224 if (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags))
2227 DEBUG(printk("scsi(%ld): qla2x00_port_login - end\n",
2231 if ((test_bit(LOGIN_RETRY_NEEDED, &ha->dpc_flags)) &&
2232 atomic_read(&ha->loop_state) != LOOP_DOWN) {
2234 clear_bit(LOGIN_RETRY_NEEDED, &ha->dpc_flags);
2235 DEBUG(printk("scsi(%ld): qla2x00_login_retry()\n",
2238 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
2240 DEBUG(printk("scsi(%ld): qla2x00_login_retry - end\n",
2244 if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) {
2246 DEBUG(printk("scsi(%ld): qla2x00_loop_resync()\n",
2249 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE,
2252 qla2x00_loop_resync(ha);
2254 clear_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags);
2257 DEBUG(printk("scsi(%ld): qla2x00_loop_resync - end\n",
2261 if (test_and_clear_bit(FCPORT_RESCAN_NEEDED, &ha->dpc_flags)) {
2263 DEBUG(printk("scsi(%ld): Rescan flagged fcports...\n",
2266 qla2x00_rescan_fcports(ha);
2268 DEBUG(printk("scsi(%ld): Rescan flagged fcports..."
2273 if (!ha->interrupts_on)
2274 ha->isp_ops.enable_intrs(ha);
2277 } /* End of while(1) */
2279 DEBUG(printk("scsi(%ld): DPC handler exiting\n", ha->host_no));
2282 * Make sure that nobody tries to wake us up again.
2284 ha->dpc_wait = NULL;
2287 complete_and_exit(&ha->dpc_exited, 0);
2292 * Processes asynchronous reset.
2295 * ha = adapter block pointer.
2298 qla2x00_rst_aen(scsi_qla_host_t *ha)
2300 if (ha->flags.online && !ha->flags.reset_active &&
2301 !atomic_read(&ha->loop_down_timer) &&
2302 !(test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags))) {
2304 clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
2307 * Issue marker command only when we are going to start
2310 ha->marker_needed = 1;
2311 } while (!atomic_read(&ha->loop_down_timer) &&
2312 (test_bit(RESET_MARKER_NEEDED, &ha->dpc_flags)));
2317 qla2x00_sp_free_dma(scsi_qla_host_t *ha, srb_t *sp)
2319 struct scsi_cmnd *cmd = sp->cmd;
2321 if (sp->flags & SRB_DMA_VALID) {
2323 dma_unmap_sg(&ha->pdev->dev, cmd->request_buffer,
2324 cmd->use_sg, cmd->sc_data_direction);
2325 } else if (cmd->request_bufflen) {
2326 dma_unmap_single(&ha->pdev->dev, sp->dma_handle,
2327 cmd->request_bufflen, cmd->sc_data_direction);
2329 sp->flags &= ~SRB_DMA_VALID;
2335 qla2x00_sp_compl(scsi_qla_host_t *ha, srb_t *sp)
2337 struct scsi_cmnd *cmd = sp->cmd;
2339 qla2x00_sp_free_dma(ha, sp);
2341 mempool_free(sp, ha->srb_mempool);
2343 cmd->scsi_done(cmd);
2346 /**************************************************************************
2352 * Context: Interrupt
2353 ***************************************************************************/
2355 qla2x00_timer(scsi_qla_host_t *ha)
2357 unsigned long cpu_flags = 0;
2365 * Ports - Port down timer.
2367 * Whenever, a port is in the LOST state we start decrementing its port
2368 * down timer every second until it reaches zero. Once it reaches zero
2369 * the port it marked DEAD.
2372 list_for_each_entry(fcport, &ha->fcports, list) {
2373 if (fcport->port_type != FCT_TARGET)
2376 if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) {
2378 if (atomic_read(&fcport->port_down_timer) == 0)
2381 if (atomic_dec_and_test(&fcport->port_down_timer) != 0)
2382 atomic_set(&fcport->state, FCS_DEVICE_DEAD);
2384 DEBUG(printk("scsi(%ld): fcport-%d - port retry count: "
2387 t, atomic_read(&fcport->port_down_timer)));
2390 } /* End of for fcport */
2393 /* Loop down handler. */
2394 if (atomic_read(&ha->loop_down_timer) > 0 &&
2395 !(test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags)) && ha->flags.online) {
2397 if (atomic_read(&ha->loop_down_timer) ==
2398 ha->loop_down_abort_time) {
2400 DEBUG(printk("scsi(%ld): Loop Down - aborting the "
2401 "queues before time expire\n",
2404 if (!IS_QLA2100(ha) && ha->link_down_timeout)
2405 atomic_set(&ha->loop_state, LOOP_DEAD);
2407 /* Schedule an ISP abort to return any tape commands. */
2408 spin_lock_irqsave(&ha->hardware_lock, cpu_flags);
2409 for (index = 1; index < MAX_OUTSTANDING_COMMANDS;
2413 sp = ha->outstanding_cmds[index];
2417 if (!(sfcp->flags & FCF_TAPE_PRESENT))
2420 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
2423 spin_unlock_irqrestore(&ha->hardware_lock, cpu_flags);
2425 set_bit(ABORT_QUEUES_NEEDED, &ha->dpc_flags);
2429 /* if the loop has been down for 4 minutes, reinit adapter */
2430 if (atomic_dec_and_test(&ha->loop_down_timer) != 0) {
2431 DEBUG(printk("scsi(%ld): Loop down exceed 4 mins - "
2432 "restarting queues.\n",
2435 set_bit(RESTART_QUEUES_NEEDED, &ha->dpc_flags);
2438 if (!(ha->device_flags & DFLG_NO_CABLE)) {
2439 DEBUG(printk("scsi(%ld): Loop down - "
2442 qla_printk(KERN_WARNING, ha,
2443 "Loop down - aborting ISP.\n");
2445 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
2448 DEBUG3(printk("scsi(%ld): Loop Down - seconds remaining %d\n",
2450 atomic_read(&ha->loop_down_timer)));
2453 /* Schedule the DPC routine if needed */
2454 if ((test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) ||
2455 test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) ||
2456 test_bit(LOOP_RESET_NEEDED, &ha->dpc_flags) ||
2458 test_bit(LOGIN_RETRY_NEEDED, &ha->dpc_flags) ||
2459 test_bit(RESET_MARKER_NEEDED, &ha->dpc_flags) ||
2460 test_bit(RELOGIN_NEEDED, &ha->dpc_flags)) &&
2461 ha->dpc_wait && !ha->dpc_active) {
2466 qla2x00_restart_timer(ha, WATCH_INTERVAL);
2469 /* XXX(hch): crude hack to emulate a down_timeout() */
2471 qla2x00_down_timeout(struct semaphore *sema, unsigned long timeout)
2473 const unsigned int step = 100; /* msecs */
2474 unsigned int iterations = jiffies_to_msecs(timeout)/100;
2477 if (!down_trylock(sema))
2479 if (msleep_interruptible(step))
2481 } while (--iterations >= 0);
2486 static struct qla_board_info qla_board_tbl[] = {
2488 .drv_name = "qla2400",
2489 .isp_name = "ISP2422",
2490 .fw_fname = "ql2400_fw.bin",
2491 .sht = &qla24xx_driver_template,
2494 .drv_name = "qla2400",
2495 .isp_name = "ISP2432",
2496 .fw_fname = "ql2400_fw.bin",
2497 .sht = &qla24xx_driver_template,
2501 static struct pci_device_id qla2xxx_pci_tbl[] = {
2503 .vendor = PCI_VENDOR_ID_QLOGIC,
2504 .device = PCI_DEVICE_ID_QLOGIC_ISP2422,
2505 .subvendor = PCI_ANY_ID,
2506 .subdevice = PCI_ANY_ID,
2507 .driver_data = (unsigned long)&qla_board_tbl[0],
2510 .vendor = PCI_VENDOR_ID_QLOGIC,
2511 .device = PCI_DEVICE_ID_QLOGIC_ISP2432,
2512 .subvendor = PCI_ANY_ID,
2513 .subdevice = PCI_ANY_ID,
2514 .driver_data = (unsigned long)&qla_board_tbl[1],
2518 MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);
2520 static int __devinit
2521 qla2xxx_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2523 return qla2x00_probe_one(pdev,
2524 (struct qla_board_info *)id->driver_data);
2527 static void __devexit
2528 qla2xxx_remove_one(struct pci_dev *pdev)
2530 qla2x00_remove_one(pdev);
2533 static struct pci_driver qla2xxx_pci_driver = {
2535 .id_table = qla2xxx_pci_tbl,
2536 .probe = qla2xxx_probe_one,
2537 .remove = __devexit_p(qla2xxx_remove_one),
2541 * qla2x00_module_init - Module initialization.
2544 qla2x00_module_init(void)
2548 /* Allocate cache for SRBs. */
2549 srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0,
2550 SLAB_HWCACHE_ALIGN, NULL, NULL);
2551 if (srb_cachep == NULL) {
2553 "qla2xxx: Unable to allocate SRB cache...Failing load!\n");
2557 /* Derive version string. */
2558 strcpy(qla2x00_version_str, QLA2XXX_VERSION);
2560 strcat(qla2x00_version_str, "-debug");
2562 qla2xxx_transport_template =
2563 fc_attach_transport(&qla2xxx_transport_functions);
2564 if (!qla2xxx_transport_template)
2567 printk(KERN_INFO "QLogic Fibre Channel HBA Driver\n");
2568 ret = pci_module_init(&qla2xxx_pci_driver);
2570 kmem_cache_destroy(srb_cachep);
2571 fc_release_transport(qla2xxx_transport_template);
2577 * qla2x00_module_exit - Module cleanup.
2580 qla2x00_module_exit(void)
2582 pci_unregister_driver(&qla2xxx_pci_driver);
2583 kmem_cache_destroy(srb_cachep);
2584 fc_release_transport(qla2xxx_transport_template);
2587 module_init(qla2x00_module_init);
2588 module_exit(qla2x00_module_exit);
2590 MODULE_AUTHOR("QLogic Corporation");
2591 MODULE_DESCRIPTION("QLogic Fibre Channel HBA Driver");
2592 MODULE_LICENSE("GPL");
2593 MODULE_VERSION(QLA2XXX_VERSION);