1 /******************************************************************************
2 * QLOGIC LINUX SOFTWARE
4 * QLogic QLA1280 (Ultra2) and QLA12160 (Ultra3) SCSI driver
5 * Copyright (C) 2000 Qlogic Corporation (www.qlogic.com)
6 * Copyright (C) 2001-2004 Jes Sorensen, Wild Open Source Inc.
7 * Copyright (C) 2003-2004 Christoph Hellwig
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2, or (at your option) any
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
19 ******************************************************************************/
20 #define QLA1280_VERSION "3.26"
21 /*****************************************************************************
23 Rev 3.26, January 16, 2006 Jes Sorensen
24 - Ditch all < 2.6 support
25 Rev 3.25.1, February 10, 2005 Christoph Hellwig
26 - use pci_map_single to map non-S/G requests
27 - remove qla1280_proc_info
28 Rev 3.25, September 28, 2004, Christoph Hellwig
29 - add support for ISP1020/1040
30 - don't include "scsi.h" anymore for 2.6.x
31 Rev 3.24.4 June 7, 2004 Christoph Hellwig
32 - restructure firmware loading, cleanup initialization code
33 - prepare support for ISP1020/1040 chips
34 Rev 3.24.3 January 19, 2004, Jes Sorensen
35 - Handle PCI DMA mask settings correctly
36 - Correct order of error handling in probe_one, free_irq should not
37 be called if request_irq failed
38 Rev 3.24.2 January 19, 2004, James Bottomley & Andrew Vasquez
39 - Big endian fixes (James)
40 - Remove bogus IOCB content on zero data transfer commands (Andrew)
41 Rev 3.24.1 January 5, 2004, Jes Sorensen
42 - Initialize completion queue to avoid OOPS on probe
43 - Handle interrupts during mailbox testing
44 Rev 3.24 November 17, 2003, Christoph Hellwig
45 - use struct list_head for completion queue
46 - avoid old Scsi_FOO typedefs
47 - cleanup 2.4 compat glue a bit
48 - use <scsi/scsi_*.h> headers on 2.6 instead of "scsi.h"
49 - make initialization for memory mapped vs port I/O more similar
50 - remove broken pci config space manipulation
52 - this is an almost perfect 2.6 scsi driver now! ;)
53 Rev 3.23.39 December 17, 2003, Jes Sorensen
54 - Delete completion queue from srb if mailbox command failed to
55 to avoid qla1280_done completeting qla1280_error_action's
57 - Reduce arguments for qla1280_done
58 Rev 3.23.38 October 18, 2003, Christoph Hellwig
59 - Convert to new-style hotplugable driver for 2.6
60 - Fix missing scsi_unregister/scsi_host_put on HBA removal
61 - Kill some more cruft
62 Rev 3.23.37 October 1, 2003, Jes Sorensen
63 - Make MMIO depend on CONFIG_X86_VISWS instead of yet another
65 - Clean up locking in probe path
66 Rev 3.23.36 October 1, 2003, Christoph Hellwig
67 - queuecommand only ever receives new commands - clear flags
68 - Reintegrate lost fixes from Linux 2.5
69 Rev 3.23.35 August 14, 2003, Jes Sorensen
71 Rev 3.23.34 July 23, 2003, Jes Sorensen
72 - Remove pointless TRUE/FALSE macros
73 - Clean up vchan handling
74 Rev 3.23.33 July 3, 2003, Jes Sorensen
75 - Don't define register access macros before define determining MMIO.
76 This just happend to work out on ia64 but not elsewhere.
77 - Don't try and read from the card while it is in reset as
78 it won't respond and causes an MCA
79 Rev 3.23.32 June 23, 2003, Jes Sorensen
80 - Basic support for boot time arguments
81 Rev 3.23.31 June 8, 2003, Jes Sorensen
82 - Reduce boot time messages
83 Rev 3.23.30 June 6, 2003, Jes Sorensen
84 - Do not enable sync/wide/ppr before it has been determined
85 that the target device actually supports it
86 - Enable DMA arbitration for multi channel controllers
87 Rev 3.23.29 June 3, 2003, Jes Sorensen
89 Rev 3.23.28 June 3, 2003, Jes Sorensen
90 - Eliminate duplicate marker commands on bus resets
91 - Handle outstanding commands appropriately on bus/device resets
92 Rev 3.23.27 May 28, 2003, Jes Sorensen
93 - Remove bogus input queue code, let the Linux SCSI layer do the work
94 - Clean up NVRAM handling, only read it once from the card
95 - Add a number of missing default nvram parameters
96 Rev 3.23.26 Beta May 28, 2003, Jes Sorensen
97 - Use completion queue for mailbox commands instead of busy wait
98 Rev 3.23.25 Beta May 27, 2003, James Bottomley
99 - Migrate to use new error handling code
100 Rev 3.23.24 Beta May 21, 2003, James Bottomley
102 - Cleanup data direction code
103 Rev 3.23.23 Beta May 12, 2003, Jes Sorensen
104 - Switch to using MMIO instead of PIO
105 Rev 3.23.22 Beta April 15, 2003, Jes Sorensen
106 - Fix PCI parity problem with 12160 during reset.
107 Rev 3.23.21 Beta April 14, 2003, Jes Sorensen
108 - Use pci_map_page()/pci_unmap_page() instead of map_single version.
109 Rev 3.23.20 Beta April 9, 2003, Jes Sorensen
110 - Remove < 2.4.x support
111 - Introduce HOST_LOCK to make the spin lock changes portable.
112 - Remove a bunch of idiotic and unnecessary typedef's
113 - Kill all leftovers of target-mode support which never worked anyway
114 Rev 3.23.19 Beta April 11, 2002, Linus Torvalds
115 - Do qla1280_pci_config() before calling request_irq() and
117 - Use pci_dma_hi32() to handle upper word of DMA addresses instead
119 - Hand correct arguments to free_irq() in case of failure
120 Rev 3.23.18 Beta April 11, 2002, Jes Sorensen
121 - Run source through Lindent and clean up the output
122 Rev 3.23.17 Beta April 11, 2002, Jes Sorensen
123 - Update SCSI firmware to qla1280 v8.15.00 and qla12160 v10.04.32
124 Rev 3.23.16 Beta March 19, 2002, Jes Sorensen
125 - Rely on mailbox commands generating interrupts - do not
126 run qla1280_isr() from ql1280_mailbox_command()
127 - Remove device_reg_t
128 - Integrate ql12160_set_target_parameters() with 1280 version
129 - Make qla1280_setup() non static
130 - Do not call qla1280_check_for_dead_scsi_bus() on every I/O request
131 sent to the card - this command pauses the firmare!!!
132 Rev 3.23.15 Beta March 19, 2002, Jes Sorensen
133 - Clean up qla1280.h - remove obsolete QL_DEBUG_LEVEL_x definitions
134 - Remove a pile of pointless and confusing (srb_t **) and
135 (scsi_lu_t *) typecasts
136 - Explicit mark that we do not use the new error handling (for now)
137 - Remove scsi_qla_host_t and use 'struct' instead
138 - Remove in_abort, watchdog_enabled, dpc, dpc_sched, bios_enabled,
139 pci_64bit_slot flags which weren't used for anything anyway
140 - Grab host->host_lock while calling qla1280_isr() from abort()
141 - Use spin_lock()/spin_unlock() in qla1280_intr_handler() - we
142 do not need to save/restore flags in the interrupt handler
143 - Enable interrupts early (before any mailbox access) in preparation
144 for cleaning up the mailbox handling
145 Rev 3.23.14 Beta March 14, 2002, Jes Sorensen
146 - Further cleanups. Remove all trace of QL_DEBUG_LEVEL_x and replace
147 it with proper use of dprintk().
148 - Make qla1280_print_scsi_cmd() and qla1280_dump_buffer() both take
149 a debug level argument to determine if data is to be printed
150 - Add KERN_* info to printk()
151 Rev 3.23.13 Beta March 14, 2002, Jes Sorensen
152 - Significant cosmetic cleanups
153 - Change debug code to use dprintk() and remove #if mess
154 Rev 3.23.12 Beta March 13, 2002, Jes Sorensen
155 - More cosmetic cleanups, fix places treating return as function
156 - use cpu_relax() in qla1280_debounce_register()
157 Rev 3.23.11 Beta March 13, 2002, Jes Sorensen
158 - Make it compile under 2.5.5
159 Rev 3.23.10 Beta October 1, 2001, Jes Sorensen
160 - Do no typecast short * to long * in QL1280BoardTbl, this
161 broke miserably on big endian boxes
162 Rev 3.23.9 Beta September 30, 2001, Jes Sorensen
163 - Remove pre 2.2 hack for checking for reentrance in interrupt handler
164 - Make data types used to receive from SCSI_{BUS,TCN,LUN}_32
165 unsigned int to match the types from struct scsi_cmnd
166 Rev 3.23.8 Beta September 29, 2001, Jes Sorensen
167 - Remove bogus timer_t typedef from qla1280.h
168 - Remove obsolete pre 2.2 PCI setup code, use proper #define's
169 for PCI_ values, call pci_set_master()
170 - Fix memleak of qla1280_buffer on module unload
171 - Only compile module parsing code #ifdef MODULE - should be
172 changed to use individual MODULE_PARM's later
173 - Remove dummy_buffer that was never modified nor printed
174 - ENTER()/LEAVE() are noops unless QL_DEBUG_LEVEL_3, hence remove
175 #ifdef QL_DEBUG_LEVEL_3/#endif around ENTER()/LEAVE() calls
176 - Remove \r from print statements, this is Linux, not DOS
177 - Remove obsolete QLA1280_{SCSILU,INTR,RING}_{LOCK,UNLOCK}
179 - Remove C++ compile hack in header file as Linux driver are not
180 supposed to be compiled as C++
181 - Kill MS_64BITS macro as it makes the code more readable
182 - Remove unnecessary flags.in_interrupts bit
183 Rev 3.23.7 Beta August 20, 2001, Jes Sorensen
184 - Dont' check for set flags on q->q_flag one by one in qla1280_next()
185 - Check whether the interrupt was generated by the QLA1280 before
187 - qla1280_status_entry(): Only zero out part of sense_buffer that
188 is not being copied into
189 - Remove more superflouous typecasts
190 - qla1280_32bit_start_scsi() replace home-brew memcpy() with memcpy()
191 Rev 3.23.6 Beta August 20, 2001, Tony Luck, Intel
192 - Don't walk the entire list in qla1280_putq_t() just to directly
193 grab the pointer to the last element afterwards
194 Rev 3.23.5 Beta August 9, 2001, Jes Sorensen
195 - Don't use IRQF_DISABLED, it's use is deprecated for this kinda driver
196 Rev 3.23.4 Beta August 8, 2001, Jes Sorensen
197 - Set dev->max_sectors to 1024
198 Rev 3.23.3 Beta August 6, 2001, Jes Sorensen
199 - Provide compat macros for pci_enable_device(), pci_find_subsys()
200 and scsi_set_pci_device()
201 - Call scsi_set_pci_device() for all devices
202 - Reduce size of kernel version dependent device probe code
203 - Move duplicate probe/init code to separate function
204 - Handle error if qla1280_mem_alloc() fails
205 - Kill OFFSET() macro and use Linux's PCI definitions instead
206 - Kill private structure defining PCI config space (struct config_reg)
207 - Only allocate I/O port region if not in MMIO mode
208 - Remove duplicate (unused) sanity check of sife of srb_t
209 Rev 3.23.2 Beta August 6, 2001, Jes Sorensen
210 - Change home-brew memset() implementations to use memset()
211 - Remove all references to COMTRACE() - accessing a PC's COM2 serial
212 port directly is not legal under Linux.
213 Rev 3.23.1 Beta April 24, 2001, Jes Sorensen
214 - Remove pre 2.2 kernel support
215 - clean up 64 bit DMA setting to use 2.4 API (provide backwards compat)
216 - Fix MMIO access to use readl/writel instead of directly
217 dereferencing pointers
218 - Nuke MSDOS debugging code
219 - Change true/false data types to int from uint8_t
220 - Use int for counters instead of uint8_t etc.
221 - Clean up size & byte order conversion macro usage
222 Rev 3.23 Beta January 11, 2001 BN Qlogic
223 - Added check of device_id when handling non
224 QLA12160s during detect().
225 Rev 3.22 Beta January 5, 2001 BN Qlogic
226 - Changed queue_task() to schedule_task()
227 for kernels 2.4.0 and higher.
228 Note: 2.4.0-testxx kernels released prior to
229 the actual 2.4.0 kernel release on January 2001
230 will get compile/link errors with schedule_task().
231 Please update your kernel to released 2.4.0 level,
232 or comment lines in this file flagged with 3.22
233 to resolve compile/link error of schedule_task().
234 - Added -DCONFIG_SMP in addition to -D__SMP__
235 in Makefile for 2.4.0 builds of driver as module.
236 Rev 3.21 Beta January 4, 2001 BN Qlogic
237 - Changed criteria of 64/32 Bit mode of HBA
238 operation according to BITS_PER_LONG rather
239 than HBA's NVRAM setting of >4Gig memory bit;
240 so that the HBA auto-configures without the need
241 to setup each system individually.
242 Rev 3.20 Beta December 5, 2000 BN Qlogic
243 - Added priority handling to IA-64 onboard SCSI
244 ISP12160 chip for kernels greater than 2.3.18.
245 - Added irqrestore for qla1280_intr_handler.
246 - Enabled /proc/scsi/qla1280 interface.
247 - Clear /proc/scsi/qla1280 counters in detect().
248 Rev 3.19 Beta October 13, 2000 BN Qlogic
249 - Declare driver_template for new kernel
250 (2.4.0 and greater) scsi initialization scheme.
251 - Update /proc/scsi entry for 2.3.18 kernels and
253 Rev 3.18 Beta October 10, 2000 BN Qlogic
254 - Changed scan order of adapters to map
255 the QLA12160 followed by the QLA1280.
256 Rev 3.17 Beta September 18, 2000 BN Qlogic
257 - Removed warnings for 32 bit 2.4.x compiles
258 - Corrected declared size for request and response
259 DMA addresses that are kept in each ha
260 Rev. 3.16 Beta August 25, 2000 BN Qlogic
261 - Corrected 64 bit addressing issue on IA-64
262 where the upper 32 bits were not properly
263 passed to the RISC engine.
264 Rev. 3.15 Beta August 22, 2000 BN Qlogic
265 - Modified qla1280_setup_chip to properly load
266 ISP firmware for greater that 4 Gig memory on IA-64
267 Rev. 3.14 Beta August 16, 2000 BN Qlogic
268 - Added setting of dma_mask to full 64 bit
269 if flags.enable_64bit_addressing is set in NVRAM
270 Rev. 3.13 Beta August 16, 2000 BN Qlogic
271 - Use new PCI DMA mapping APIs for 2.4.x kernel
272 Rev. 3.12 July 18, 2000 Redhat & BN Qlogic
273 - Added check of pci_enable_device to detect() for 2.3.x
274 - Use pci_resource_start() instead of
275 pdev->resource[0].start in detect() for 2.3.x
276 - Updated driver version
277 Rev. 3.11 July 14, 2000 BN Qlogic
278 - Updated SCSI Firmware to following versions:
281 - Updated driver version to 3.11
282 Rev. 3.10 June 23, 2000 BN Qlogic
283 - Added filtering of AMI SubSys Vendor ID devices
285 - DEBUG_QLA1280 undefined and new version BN Qlogic
286 Rev. 3.08b May 9, 2000 MD Dell
287 - Added logic to check against AMI subsystem vendor ID
288 Rev. 3.08 May 4, 2000 DG Qlogic
289 - Added logic to check for PCI subsystem ID.
290 Rev. 3.07 Apr 24, 2000 DG & BN Qlogic
291 - Updated SCSI Firmware to following versions:
294 Rev. 3.06 Apr 12, 2000 DG & BN Qlogic
295 - Internal revision; not released
296 Rev. 3.05 Mar 28, 2000 DG & BN Qlogic
297 - Edit correction for virt_to_bus and PROC.
298 Rev. 3.04 Mar 28, 2000 DG & BN Qlogic
299 - Merge changes from ia64 port.
300 Rev. 3.03 Mar 28, 2000 BN Qlogic
301 - Increase version to reflect new code drop with compile fix
302 of issue with inclusion of linux/spinlock for 2.3 kernels
303 Rev. 3.02 Mar 15, 2000 BN Qlogic
304 - Merge qla1280_proc_info from 2.10 code base
305 Rev. 3.01 Feb 10, 2000 BN Qlogic
306 - Corrected code to compile on a 2.2.x kernel.
307 Rev. 3.00 Jan 17, 2000 DG Qlogic
308 - Added 64-bit support.
309 Rev. 2.07 Nov 9, 1999 DG Qlogic
310 - Added new routine to set target parameters for ISP12160.
311 Rev. 2.06 Sept 10, 1999 DG Qlogic
312 - Added support for ISP12160 Ultra 3 chip.
313 Rev. 2.03 August 3, 1999 Fred Lewis, Intel DuPont
314 - Modified code to remove errors generated when compiling with
315 Cygnus IA64 Compiler.
316 - Changed conversion of pointers to unsigned longs instead of integers.
317 - Changed type of I/O port variables from uint32_t to unsigned long.
318 - Modified OFFSET macro to work with 64-bit as well as 32-bit.
319 - Changed sprintf and printk format specifiers for pointers to %p.
320 - Changed some int to long type casts where needed in sprintf & printk.
321 - Added l modifiers to sprintf and printk format specifiers for longs.
322 - Removed unused local variables.
323 Rev. 1.20 June 8, 1999 DG, Qlogic
324 Changes to support RedHat release 6.0 (kernel 2.2.5).
325 - Added SCSI exclusive access lock (io_request_lock) when accessing
327 - Added changes for the new LINUX interface template. Some new error
328 handling routines have been added to the template, but for now we
329 will use the old ones.
330 - Initial Beta Release.
331 *****************************************************************************/
334 #include <linux/module.h>
336 #include <linux/types.h>
337 #include <linux/string.h>
338 #include <linux/errno.h>
339 #include <linux/kernel.h>
340 #include <linux/ioport.h>
341 #include <linux/delay.h>
342 #include <linux/timer.h>
343 #include <linux/pci.h>
344 #include <linux/proc_fs.h>
345 #include <linux/stat.h>
346 #include <linux/slab.h>
347 #include <linux/pci_ids.h>
348 #include <linux/interrupt.h>
349 #include <linux/init.h>
350 #include <linux/dma-mapping.h>
354 #include <asm/byteorder.h>
355 #include <asm/processor.h>
356 #include <asm/types.h>
357 #include <asm/system.h>
359 #include <scsi/scsi.h>
360 #include <scsi/scsi_cmnd.h>
361 #include <scsi/scsi_device.h>
362 #include <scsi/scsi_host.h>
363 #include <scsi/scsi_tcq.h>
365 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
366 #include <asm/sn/io.h>
371 * Compile time Options:
372 * 0 - Disable and 1 - Enable
374 #define DEBUG_QLA1280_INTR 0
375 #define DEBUG_PRINT_NVRAM 0
376 #define DEBUG_QLA1280 0
379 * The SGI VISWS is broken and doesn't support MMIO ;-(
381 #ifdef CONFIG_X86_VISWS
382 #define MEMORY_MAPPED_IO 0
384 #define MEMORY_MAPPED_IO 1
387 #define UNIQUE_FW_NAME
389 #include "ql12160_fw.h" /* ISP RISC codes */
390 #include "ql1280_fw.h"
391 #include "ql1040_fw.h"
393 #ifndef BITS_PER_LONG
394 #error "BITS_PER_LONG not defined!"
396 #if (BITS_PER_LONG == 64) || defined CONFIG_HIGHMEM
397 #define QLA_64BIT_PTR 1
401 #define pci_dma_hi32(a) ((a >> 16) >> 16)
403 #define pci_dma_hi32(a) 0
405 #define pci_dma_lo32(a) (a & 0xffffffff)
407 #define NVRAM_DELAY() udelay(500) /* 2 microseconds */
409 #if defined(__ia64__) && !defined(ia64_platform_is)
410 #define ia64_platform_is(foo) (!strcmp(x, platform_name))
414 #define IS_ISP1040(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1020)
415 #define IS_ISP1x40(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1020 || \
416 ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1240)
417 #define IS_ISP1x160(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP10160 || \
418 ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP12160)
421 static int qla1280_probe_one(struct pci_dev *, const struct pci_device_id *);
422 static void qla1280_remove_one(struct pci_dev *);
425 * QLogic Driver Support Function Prototypes.
427 static void qla1280_done(struct scsi_qla_host *);
428 static int qla1280_get_token(char *);
429 static int qla1280_setup(char *s) __init;
432 * QLogic ISP1280 Hardware Support Function Prototypes.
434 static int qla1280_load_firmware(struct scsi_qla_host *);
435 static int qla1280_init_rings(struct scsi_qla_host *);
436 static int qla1280_nvram_config(struct scsi_qla_host *);
437 static int qla1280_mailbox_command(struct scsi_qla_host *,
438 uint8_t, uint16_t *);
439 static int qla1280_bus_reset(struct scsi_qla_host *, int);
440 static int qla1280_device_reset(struct scsi_qla_host *, int, int);
441 static int qla1280_abort_device(struct scsi_qla_host *, int, int, int);
442 static int qla1280_abort_command(struct scsi_qla_host *, struct srb *, int);
443 static int qla1280_abort_isp(struct scsi_qla_host *);
445 static int qla1280_64bit_start_scsi(struct scsi_qla_host *, struct srb *);
447 static int qla1280_32bit_start_scsi(struct scsi_qla_host *, struct srb *);
449 static void qla1280_nv_write(struct scsi_qla_host *, uint16_t);
450 static void qla1280_poll(struct scsi_qla_host *);
451 static void qla1280_reset_adapter(struct scsi_qla_host *);
452 static void qla1280_marker(struct scsi_qla_host *, int, int, int, u8);
453 static void qla1280_isp_cmd(struct scsi_qla_host *);
454 static void qla1280_isr(struct scsi_qla_host *, struct list_head *);
455 static void qla1280_rst_aen(struct scsi_qla_host *);
456 static void qla1280_status_entry(struct scsi_qla_host *, struct response *,
458 static void qla1280_error_entry(struct scsi_qla_host *, struct response *,
460 static uint16_t qla1280_get_nvram_word(struct scsi_qla_host *, uint32_t);
461 static uint16_t qla1280_nvram_request(struct scsi_qla_host *, uint32_t);
462 static uint16_t qla1280_debounce_register(volatile uint16_t __iomem *);
463 static request_t *qla1280_req_pkt(struct scsi_qla_host *);
464 static int qla1280_check_for_dead_scsi_bus(struct scsi_qla_host *,
466 static void qla1280_get_target_parameters(struct scsi_qla_host *,
467 struct scsi_device *);
468 static int qla1280_set_target_parameters(struct scsi_qla_host *, int, int);
471 static struct qla_driver_setup driver_setup;
474 * convert scsi data direction to request_t control flags
476 static inline uint16_t
477 qla1280_data_direction(struct scsi_cmnd *cmnd)
479 switch(cmnd->sc_data_direction) {
480 case DMA_FROM_DEVICE:
484 case DMA_BIDIRECTIONAL:
485 return BIT_5 | BIT_6;
487 * We could BUG() on default here if one of the four cases aren't
488 * met, but then again if we receive something like that from the
489 * SCSI layer we have more serious problems. This shuts up GCC.
498 static void __qla1280_print_scsi_cmd(struct scsi_cmnd * cmd);
499 static void __qla1280_dump_buffer(char *, int);
504 * insmod needs to find the variable and make it point to something
507 static char *qla1280;
509 /* insmod qla1280 options=verbose" */
510 module_param(qla1280, charp, 0);
512 __setup("qla1280=", qla1280_setup);
517 * We use the scsi_pointer structure that's included with each scsi_command
518 * to overlay our struct srb over it. qla1280_init() checks that a srb is not
519 * bigger than a scsi_pointer.
522 #define CMD_SP(Cmnd) &Cmnd->SCp
523 #define CMD_CDBLEN(Cmnd) Cmnd->cmd_len
524 #define CMD_CDBP(Cmnd) Cmnd->cmnd
525 #define CMD_SNSP(Cmnd) Cmnd->sense_buffer
526 #define CMD_SNSLEN(Cmnd) SCSI_SENSE_BUFFERSIZE
527 #define CMD_RESULT(Cmnd) Cmnd->result
528 #define CMD_HANDLE(Cmnd) Cmnd->host_scribble
529 #define CMD_REQUEST(Cmnd) Cmnd->request->cmd
531 #define CMD_HOST(Cmnd) Cmnd->device->host
532 #define SCSI_BUS_32(Cmnd) Cmnd->device->channel
533 #define SCSI_TCN_32(Cmnd) Cmnd->device->id
534 #define SCSI_LUN_32(Cmnd) Cmnd->device->lun
537 /*****************************************/
538 /* ISP Boards supported by this driver */
539 /*****************************************/
542 unsigned char name[9]; /* Board ID String */
543 int numPorts; /* Number of SCSI ports */
544 unsigned short *fwcode; /* pointer to FW array */
545 unsigned short *fwlen; /* number of words in array */
546 unsigned short *fwstart; /* start address for F/W */
547 unsigned char *fwver; /* Ptr to F/W version array */
550 /* NOTE: the last argument in each entry is used to index ql1280_board_tbl */
551 static struct pci_device_id qla1280_pci_tbl[] = {
552 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP12160,
553 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
554 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1020,
555 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
556 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1080,
557 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2},
558 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1240,
559 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3},
560 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1280,
561 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4},
562 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP10160,
563 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5},
566 MODULE_DEVICE_TABLE(pci, qla1280_pci_tbl);
568 static struct qla_boards ql1280_board_tbl[] = {
569 /* Name , Number of ports, FW details */
570 {"QLA12160", 2, &fw12160i_code01[0], &fw12160i_length01,
571 &fw12160i_addr01, &fw12160i_version_str[0]},
572 {"QLA1040", 1, &risc_code01[0], &risc_code_length01,
573 &risc_code_addr01, &firmware_version[0]},
574 {"QLA1080", 1, &fw1280ei_code01[0], &fw1280ei_length01,
575 &fw1280ei_addr01, &fw1280ei_version_str[0]},
576 {"QLA1240", 2, &fw1280ei_code01[0], &fw1280ei_length01,
577 &fw1280ei_addr01, &fw1280ei_version_str[0]},
578 {"QLA1280", 2, &fw1280ei_code01[0], &fw1280ei_length01,
579 &fw1280ei_addr01, &fw1280ei_version_str[0]},
580 {"QLA10160", 1, &fw12160i_code01[0], &fw12160i_length01,
581 &fw12160i_addr01, &fw12160i_version_str[0]},
585 static int qla1280_verbose = 1;
588 static int ql_debug_level = 1;
589 #define dprintk(level, format, a...) \
590 do { if (ql_debug_level >= level) printk(KERN_ERR format, ##a); } while(0)
591 #define qla1280_dump_buffer(level, buf, size) \
592 if (ql_debug_level >= level) __qla1280_dump_buffer(buf, size)
593 #define qla1280_print_scsi_cmd(level, cmd) \
594 if (ql_debug_level >= level) __qla1280_print_scsi_cmd(cmd)
596 #define ql_debug_level 0
597 #define dprintk(level, format, a...) do{}while(0)
598 #define qla1280_dump_buffer(a, b, c) do{}while(0)
599 #define qla1280_print_scsi_cmd(a, b) do{}while(0)
602 #define ENTER(x) dprintk(3, "qla1280 : Entering %s()\n", x);
603 #define LEAVE(x) dprintk(3, "qla1280 : Leaving %s()\n", x);
604 #define ENTER_INTR(x) dprintk(4, "qla1280 : Entering %s()\n", x);
605 #define LEAVE_INTR(x) dprintk(4, "qla1280 : Leaving %s()\n", x);
608 static int qla1280_read_nvram(struct scsi_qla_host *ha)
615 ENTER("qla1280_read_nvram");
617 if (driver_setup.no_nvram)
620 printk(KERN_INFO "scsi(%ld): Reading NVRAM\n", ha->host_no);
622 wptr = (uint16_t *)&ha->nvram;
625 for (cnt = 0; cnt < 3; cnt++) {
626 *wptr = qla1280_get_nvram_word(ha, cnt);
627 chksum += *wptr & 0xff;
628 chksum += (*wptr >> 8) & 0xff;
632 if (nv->id0 != 'I' || nv->id1 != 'S' ||
633 nv->id2 != 'P' || nv->id3 != ' ' || nv->version < 1) {
634 dprintk(2, "Invalid nvram ID or version!\n");
637 for (; cnt < sizeof(struct nvram); cnt++) {
638 *wptr = qla1280_get_nvram_word(ha, cnt);
639 chksum += *wptr & 0xff;
640 chksum += (*wptr >> 8) & 0xff;
645 dprintk(3, "qla1280_read_nvram: NVRAM Magic ID= %c %c %c %02x"
646 " version %i\n", nv->id0, nv->id1, nv->id2, nv->id3,
651 if (!driver_setup.no_nvram)
652 printk(KERN_WARNING "scsi(%ld): Unable to identify or "
653 "validate NVRAM checksum, using default "
654 "settings\n", ha->host_no);
659 /* The firmware interface is, um, interesting, in that the
660 * actual firmware image on the chip is little endian, thus,
661 * the process of taking that image to the CPU would end up
662 * little endian. However, the firmare interface requires it
663 * to be read a word (two bytes) at a time.
665 * The net result of this would be that the word (and
666 * doubleword) quantites in the firmware would be correct, but
667 * the bytes would be pairwise reversed. Since most of the
668 * firmware quantites are, in fact, bytes, we do an extra
669 * le16_to_cpu() in the firmware read routine.
671 * The upshot of all this is that the bytes in the firmware
672 * are in the correct places, but the 16 and 32 bit quantites
673 * are still in little endian format. We fix that up below by
674 * doing extra reverses on them */
675 nv->isp_parameter = cpu_to_le16(nv->isp_parameter);
676 nv->firmware_feature.w = cpu_to_le16(nv->firmware_feature.w);
677 for(i = 0; i < MAX_BUSES; i++) {
678 nv->bus[i].selection_timeout = cpu_to_le16(nv->bus[i].selection_timeout);
679 nv->bus[i].max_queue_depth = cpu_to_le16(nv->bus[i].max_queue_depth);
681 dprintk(1, "qla1280_read_nvram: Completed Reading NVRAM\n");
682 LEAVE("qla1280_read_nvram");
687 /**************************************************************************
689 * Return a string describing the driver.
690 **************************************************************************/
692 qla1280_info(struct Scsi_Host *host)
694 static char qla1280_scsi_name_buffer[125];
696 struct scsi_qla_host *ha;
697 struct qla_boards *bdp;
699 bp = &qla1280_scsi_name_buffer[0];
700 ha = (struct scsi_qla_host *)host->hostdata;
701 bdp = &ql1280_board_tbl[ha->devnum];
702 memset(bp, 0, sizeof(qla1280_scsi_name_buffer));
705 "QLogic %s PCI to SCSI Host Adapter\n"
706 " Firmware version: %2d.%02d.%02d, Driver version %s",
707 &bdp->name[0], bdp->fwver[0], bdp->fwver[1], bdp->fwver[2],
712 /**************************************************************************
713 * qla1200_queuecommand
714 * Queue a command to the controller.
717 * The mid-level driver tries to ensures that queuecommand never gets invoked
718 * concurrently with itself or the interrupt handler (although the
719 * interrupt handler may call this routine as part of request-completion
720 * handling). Unfortunely, it sometimes calls the scheduler in interrupt
721 * context which is a big NO! NO!.
722 **************************************************************************/
724 qla1280_queuecommand(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *))
726 struct Scsi_Host *host = cmd->device->host;
727 struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata;
728 struct srb *sp = (struct srb *)&cmd->SCp;
735 qla1280_print_scsi_cmd(5, cmd);
739 * Using 64 bit commands if the PCI bridge doesn't support it is a
740 * bit wasteful, however this should really only happen if one's
741 * PCI controller is completely broken, like the BCM1250. For
742 * sane hardware this is not an issue.
744 status = qla1280_64bit_start_scsi(ha, sp);
746 status = qla1280_32bit_start_scsi(ha, sp);
760 /* timer action for error action processor */
761 static void qla1280_error_wait_timeout(unsigned long __data)
763 struct scsi_cmnd *cmd = (struct scsi_cmnd *)__data;
764 struct srb *sp = (struct srb *)CMD_SP(cmd);
769 static void qla1280_mailbox_timeout(unsigned long __data)
771 struct scsi_qla_host *ha = (struct scsi_qla_host *)__data;
772 struct device_reg __iomem *reg;
775 ha->mailbox_out[0] = RD_REG_WORD(®->mailbox0);
776 printk(KERN_ERR "scsi(%ld): mailbox timed out, mailbox0 %04x, "
777 "ictrl %04x, istatus %04x\n", ha->host_no, ha->mailbox_out[0],
778 RD_REG_WORD(®->ictrl), RD_REG_WORD(®->istatus));
779 complete(ha->mailbox_wait);
782 /**************************************************************************
783 * qla1200_error_action
784 * The function will attempt to perform a specified error action and
785 * wait for the results (or time out).
788 * cmd = Linux SCSI command packet of the command that cause the
790 * action = error action to take (see action_t)
796 * Resetting the bus always succeeds - is has to, otherwise the
797 * kernel will panic! Try a surgical technique - sending a BUS
798 * DEVICE RESET message - on the offending target before pulling
799 * the SCSI bus reset line.
800 **************************************************************************/
802 qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
804 struct scsi_qla_host *ha;
805 int bus, target, lun;
808 unsigned char *handle;
810 DECLARE_COMPLETION_ONSTACK(wait);
811 struct timer_list timer;
813 ha = (struct scsi_qla_host *)(CMD_HOST(cmd)->hostdata);
815 dprintk(4, "error_action %i, istatus 0x%04x\n", action,
816 RD_REG_WORD(&ha->iobase->istatus));
818 dprintk(4, "host_cmd 0x%04x, ictrl 0x%04x, jiffies %li\n",
819 RD_REG_WORD(&ha->iobase->host_cmd),
820 RD_REG_WORD(&ha->iobase->ictrl), jiffies);
822 ENTER("qla1280_error_action");
824 printk(KERN_INFO "scsi(%li): Resetting Cmnd=0x%p, "
825 "Handle=0x%p, action=0x%x\n",
826 ha->host_no, cmd, CMD_HANDLE(cmd), action);
829 printk(KERN_WARNING "(scsi?:?:?:?) Reset called with NULL "
830 "si_Cmnd pointer, failing.\n");
831 LEAVE("qla1280_error_action");
835 ha = (struct scsi_qla_host *)cmd->device->host->hostdata;
836 sp = (struct srb *)CMD_SP(cmd);
837 handle = CMD_HANDLE(cmd);
839 /* Check for pending interrupts. */
840 data = qla1280_debounce_register(&ha->iobase->istatus);
842 * The io_request_lock is held when the reset handler is called, hence
843 * the interrupt handler cannot be running in parallel as it also
844 * grabs the lock. /Jes
847 qla1280_isr(ha, &ha->done_q);
850 * Determine the suggested action that the mid-level driver wants
853 if (handle == (unsigned char *)INVALID_HANDLE || handle == NULL) {
854 if(action == ABORT_COMMAND) {
855 /* we never got this command */
856 printk(KERN_INFO "qla1280: Aborting a NULL handle\n");
857 return SUCCESS; /* no action - we don't have command */
863 bus = SCSI_BUS_32(cmd);
864 target = SCSI_TCN_32(cmd);
865 lun = SCSI_LUN_32(cmd);
867 /* Overloading result. Here it means the success or fail of the
868 * *issue* of the action. When we return from the routine, it must
869 * mean the actual success or fail of the action */
876 if ((sp->flags & SRB_ABORT_PENDING)) {
878 "scsi(): Command has a pending abort "
879 "message - ABORT_PENDING.\n");
880 /* This should technically be impossible since we
881 * now wait for abort completion */
885 for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) {
886 if (sp == ha->outstanding_cmds[i]) {
887 dprintk(1, "qla1280: RISC aborting command\n");
888 if (qla1280_abort_command(ha, sp, i) == 0)
892 * Since we don't know what might
893 * have happend to the command, it
894 * is unsafe to remove it from the
895 * device's queue at this point.
896 * Wait and let the escalation
897 * process take care of it.
900 "scsi(%li:%i:%i:%i): Unable"
901 " to abort command!\n",
902 ha->host_no, bus, target, lun);
911 "scsi(%ld:%d:%d:%d): Queueing abort device "
912 "command.\n", ha->host_no, bus, target, lun);
913 if (qla1280_abort_device(ha, bus, target, lun) == 0)
920 "scsi(%ld:%d:%d:%d): Queueing device reset "
921 "command.\n", ha->host_no, bus, target, lun);
922 if (qla1280_device_reset(ha, bus, target) == 0)
928 printk(KERN_INFO "qla1280(%ld:%d): Issued bus "
929 "reset.\n", ha->host_no, bus);
930 if (qla1280_bus_reset(ha, bus) == 0)
936 if (qla1280_verbose) {
938 "scsi(%ld): Issued ADAPTER RESET\n",
940 printk(KERN_INFO "scsi(%ld): I/O processing will "
941 "continue automatically\n", ha->host_no);
943 ha->flags.reset_active = 1;
945 * We restarted all of the commands automatically, so the
946 * mid-level code can expect completions momentitarily.
948 if (qla1280_abort_isp(ha) == 0)
951 ha->flags.reset_active = 0;
954 if (!list_empty(&ha->done_q))
957 /* If we didn't manage to issue the action, or we have no
958 * command to wait for, exit here */
959 if (result == FAILED || handle == NULL ||
960 handle == (unsigned char *)INVALID_HANDLE) {
962 * Clear completion queue to avoid qla1280_done() trying
963 * to complete the command at a later stage after we
964 * have exited the current context
970 /* set up a timer just in case we're really jammed */
972 timer.expires = jiffies + 4*HZ;
973 timer.data = (unsigned long)cmd;
974 timer.function = qla1280_error_wait_timeout;
977 /* wait for the action to complete (or the timer to expire) */
978 spin_unlock_irq(ha->host->host_lock);
979 wait_for_completion(&wait);
980 del_timer_sync(&timer);
981 spin_lock_irq(ha->host->host_lock);
984 /* the only action we might get a fail for is abort */
985 if (action == ABORT_COMMAND) {
986 if(sp->flags & SRB_ABORTED)
993 dprintk(1, "RESET returning %d\n", result);
995 LEAVE("qla1280_error_action");
999 /**************************************************************************
1001 * Abort the specified SCSI command(s).
1002 **************************************************************************/
1004 qla1280_eh_abort(struct scsi_cmnd * cmd)
1008 spin_lock_irq(cmd->device->host->host_lock);
1009 rc = qla1280_error_action(cmd, ABORT_COMMAND);
1010 spin_unlock_irq(cmd->device->host->host_lock);
1015 /**************************************************************************
1016 * qla1280_device_reset
1017 * Reset the specified SCSI device
1018 **************************************************************************/
1020 qla1280_eh_device_reset(struct scsi_cmnd *cmd)
1024 spin_lock_irq(cmd->device->host->host_lock);
1025 rc = qla1280_error_action(cmd, DEVICE_RESET);
1026 spin_unlock_irq(cmd->device->host->host_lock);
1031 /**************************************************************************
1033 * Reset the specified bus.
1034 **************************************************************************/
1036 qla1280_eh_bus_reset(struct scsi_cmnd *cmd)
1040 spin_lock_irq(cmd->device->host->host_lock);
1041 rc = qla1280_error_action(cmd, BUS_RESET);
1042 spin_unlock_irq(cmd->device->host->host_lock);
1047 /**************************************************************************
1048 * qla1280_adapter_reset
1049 * Reset the specified adapter (both channels)
1050 **************************************************************************/
1052 qla1280_eh_adapter_reset(struct scsi_cmnd *cmd)
1056 spin_lock_irq(cmd->device->host->host_lock);
1057 rc = qla1280_error_action(cmd, ADAPTER_RESET);
1058 spin_unlock_irq(cmd->device->host->host_lock);
1064 qla1280_biosparam(struct scsi_device *sdev, struct block_device *bdev,
1065 sector_t capacity, int geom[])
1067 int heads, sectors, cylinders;
1071 cylinders = (unsigned long)capacity / (heads * sectors);
1072 if (cylinders > 1024) {
1075 cylinders = (unsigned long)capacity / (heads * sectors);
1076 /* if (cylinders > 1023)
1077 cylinders = 1023; */
1082 geom[2] = cylinders;
1088 /* disable risc and host interrupts */
1090 qla1280_disable_intrs(struct scsi_qla_host *ha)
1092 WRT_REG_WORD(&ha->iobase->ictrl, 0);
1093 RD_REG_WORD(&ha->iobase->ictrl); /* PCI Posted Write flush */
1096 /* enable risc and host interrupts */
1098 qla1280_enable_intrs(struct scsi_qla_host *ha)
1100 WRT_REG_WORD(&ha->iobase->ictrl, (ISP_EN_INT | ISP_EN_RISC));
1101 RD_REG_WORD(&ha->iobase->ictrl); /* PCI Posted Write flush */
1104 /**************************************************************************
1105 * qla1280_intr_handler
1106 * Handles the H/W interrupt
1107 **************************************************************************/
1109 qla1280_intr_handler(int irq, void *dev_id)
1111 struct scsi_qla_host *ha;
1112 struct device_reg __iomem *reg;
1116 ENTER_INTR ("qla1280_intr_handler");
1117 ha = (struct scsi_qla_host *)dev_id;
1119 spin_lock(ha->host->host_lock);
1124 qla1280_disable_intrs(ha);
1126 data = qla1280_debounce_register(®->istatus);
1127 /* Check for pending interrupts. */
1128 if (data & RISC_INT) {
1129 qla1280_isr(ha, &ha->done_q);
1132 if (!list_empty(&ha->done_q))
1135 spin_unlock(ha->host->host_lock);
1137 qla1280_enable_intrs(ha);
1139 LEAVE_INTR("qla1280_intr_handler");
1140 return IRQ_RETVAL(handled);
1145 qla1280_set_target_parameters(struct scsi_qla_host *ha, int bus, int target)
1148 uint16_t mb[MAILBOX_REGISTER_COUNT];
1154 mr = BIT_3 | BIT_2 | BIT_1 | BIT_0;
1156 /* Set Target Parameters. */
1157 mb[0] = MBC_SET_TARGET_PARAMETERS;
1158 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
1159 mb[2] = nv->bus[bus].target[target].parameter.renegotiate_on_error << 8;
1160 mb[2] |= nv->bus[bus].target[target].parameter.stop_queue_on_check << 9;
1161 mb[2] |= nv->bus[bus].target[target].parameter.auto_request_sense << 10;
1162 mb[2] |= nv->bus[bus].target[target].parameter.tag_queuing << 11;
1163 mb[2] |= nv->bus[bus].target[target].parameter.enable_sync << 12;
1164 mb[2] |= nv->bus[bus].target[target].parameter.enable_wide << 13;
1165 mb[2] |= nv->bus[bus].target[target].parameter.parity_checking << 14;
1166 mb[2] |= nv->bus[bus].target[target].parameter.disconnect_allowed << 15;
1168 if (IS_ISP1x160(ha)) {
1169 mb[2] |= nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr << 5;
1170 mb[3] = (nv->bus[bus].target[target].flags.flags1x160.sync_offset << 8);
1171 mb[6] = (nv->bus[bus].target[target].ppr_1x160.flags.ppr_options << 8) |
1172 nv->bus[bus].target[target].ppr_1x160.flags.ppr_bus_width;
1175 mb[3] = (nv->bus[bus].target[target].flags.flags1x80.sync_offset << 8);
1177 mb[3] |= nv->bus[bus].target[target].sync_period;
1179 status = qla1280_mailbox_command(ha, mr, mb);
1181 /* Set Device Queue Parameters. */
1182 for (lun = 0; lun < MAX_LUNS; lun++) {
1183 mb[0] = MBC_SET_DEVICE_QUEUE;
1184 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
1186 mb[2] = nv->bus[bus].max_queue_depth;
1187 mb[3] = nv->bus[bus].target[target].execution_throttle;
1188 status |= qla1280_mailbox_command(ha, 0x0f, mb);
1192 printk(KERN_WARNING "scsi(%ld:%i:%i): "
1193 "qla1280_set_target_parameters() failed\n",
1194 ha->host_no, bus, target);
1199 /**************************************************************************
1200 * qla1280_slave_configure
1203 * Determines the queue depth for a given device. There are two ways
1204 * a queue depth can be obtained for a tagged queueing device. One
1205 * way is the default queue depth which is determined by whether
1206 * If it is defined, then it is used
1207 * as the default queue depth. Otherwise, we use either 4 or 8 as the
1208 * default queue depth (dependent on the number of hardware SCBs).
1209 **************************************************************************/
1211 qla1280_slave_configure(struct scsi_device *device)
1213 struct scsi_qla_host *ha;
1214 int default_depth = 3;
1215 int bus = device->channel;
1216 int target = device->id;
1219 unsigned long flags;
1221 ha = (struct scsi_qla_host *)device->host->hostdata;
1224 if (qla1280_check_for_dead_scsi_bus(ha, bus))
1227 if (device->tagged_supported &&
1228 (ha->bus_settings[bus].qtag_enables & (BIT_0 << target))) {
1229 scsi_adjust_queue_depth(device, MSG_ORDERED_TAG,
1230 ha->bus_settings[bus].hiwat);
1232 scsi_adjust_queue_depth(device, 0, default_depth);
1235 nv->bus[bus].target[target].parameter.enable_sync = device->sdtr;
1236 nv->bus[bus].target[target].parameter.enable_wide = device->wdtr;
1237 nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = device->ppr;
1239 if (driver_setup.no_sync ||
1240 (driver_setup.sync_mask &&
1241 (~driver_setup.sync_mask & (1 << target))))
1242 nv->bus[bus].target[target].parameter.enable_sync = 0;
1243 if (driver_setup.no_wide ||
1244 (driver_setup.wide_mask &&
1245 (~driver_setup.wide_mask & (1 << target))))
1246 nv->bus[bus].target[target].parameter.enable_wide = 0;
1247 if (IS_ISP1x160(ha)) {
1248 if (driver_setup.no_ppr ||
1249 (driver_setup.ppr_mask &&
1250 (~driver_setup.ppr_mask & (1 << target))))
1251 nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 0;
1254 spin_lock_irqsave(ha->host->host_lock, flags);
1255 if (nv->bus[bus].target[target].parameter.enable_sync)
1256 status = qla1280_set_target_parameters(ha, bus, target);
1257 qla1280_get_target_parameters(ha, device);
1258 spin_unlock_irqrestore(ha->host->host_lock, flags);
1265 * Process completed commands.
1268 * ha = adapter block pointer.
1271 qla1280_done(struct scsi_qla_host *ha)
1274 struct list_head *done_q;
1275 int bus, target, lun;
1276 struct scsi_cmnd *cmd;
1278 ENTER("qla1280_done");
1280 done_q = &ha->done_q;
1282 while (!list_empty(done_q)) {
1283 sp = list_entry(done_q->next, struct srb, list);
1285 list_del(&sp->list);
1288 bus = SCSI_BUS_32(cmd);
1289 target = SCSI_TCN_32(cmd);
1290 lun = SCSI_LUN_32(cmd);
1292 switch ((CMD_RESULT(cmd) >> 16)) {
1294 /* Issue marker command. */
1295 qla1280_marker(ha, bus, target, 0, MK_SYNC_ID);
1298 sp->flags &= ~SRB_ABORT_PENDING;
1299 sp->flags |= SRB_ABORTED;
1300 if (sp->flags & SRB_TIMEOUT)
1301 CMD_RESULT(sp->cmd) = DID_TIME_OUT << 16;
1307 /* Release memory used for this I/O */
1308 scsi_dma_unmap(cmd);
1310 /* Call the mid-level driver interrupt handler */
1311 CMD_HANDLE(sp->cmd) = (unsigned char *)INVALID_HANDLE;
1314 (*(cmd)->scsi_done)(cmd);
1316 if(sp->wait != NULL)
1319 LEAVE("qla1280_done");
1323 * Translates a ISP error to a Linux SCSI error
1326 qla1280_return_status(struct response * sts, struct scsi_cmnd *cp)
1328 int host_status = DID_ERROR;
1329 uint16_t comp_status = le16_to_cpu(sts->comp_status);
1330 uint16_t state_flags = le16_to_cpu(sts->state_flags);
1331 uint32_t residual_length = le32_to_cpu(sts->residual_length);
1332 uint16_t scsi_status = le16_to_cpu(sts->scsi_status);
1333 #if DEBUG_QLA1280_INTR
1334 static char *reason[] = {
1346 #endif /* DEBUG_QLA1280_INTR */
1348 ENTER("qla1280_return_status");
1350 #if DEBUG_QLA1280_INTR
1352 dprintk(1, "qla1280_return_status: compl status = 0x%04x\n",
1357 switch (comp_status) {
1359 host_status = DID_OK;
1363 if (!(state_flags & SF_GOT_BUS))
1364 host_status = DID_NO_CONNECT;
1365 else if (!(state_flags & SF_GOT_TARGET))
1366 host_status = DID_BAD_TARGET;
1367 else if (!(state_flags & SF_SENT_CDB))
1368 host_status = DID_ERROR;
1369 else if (!(state_flags & SF_TRANSFERRED_DATA))
1370 host_status = DID_ERROR;
1371 else if (!(state_flags & SF_GOT_STATUS))
1372 host_status = DID_ERROR;
1373 else if (!(state_flags & SF_GOT_SENSE))
1374 host_status = DID_ERROR;
1378 host_status = DID_RESET;
1382 host_status = DID_ABORT;
1386 host_status = DID_TIME_OUT;
1389 case CS_DATA_OVERRUN:
1390 dprintk(2, "Data overrun 0x%x\n", residual_length);
1391 dprintk(2, "qla1280_return_status: response packet data\n");
1392 qla1280_dump_buffer(2, (char *)sts, RESPONSE_ENTRY_SIZE);
1393 host_status = DID_ERROR;
1396 case CS_DATA_UNDERRUN:
1397 if ((scsi_bufflen(cp) - residual_length) <
1400 "scsi: Underflow detected - retrying "
1402 host_status = DID_ERROR;
1404 scsi_set_resid(cp, residual_length);
1405 host_status = DID_OK;
1410 host_status = DID_ERROR;
1414 #if DEBUG_QLA1280_INTR
1415 dprintk(1, "qla1280 ISP status: host status (%s) scsi status %x\n",
1416 reason[host_status], scsi_status);
1419 LEAVE("qla1280_return_status");
1421 return (scsi_status & 0xff) | (host_status << 16);
1424 /****************************************************************************/
1425 /* QLogic ISP1280 Hardware Support Functions. */
1426 /****************************************************************************/
1429 * qla1280_initialize_adapter
1433 * ha = adapter block pointer.
1438 static int __devinit
1439 qla1280_initialize_adapter(struct scsi_qla_host *ha)
1441 struct device_reg __iomem *reg;
1444 unsigned long flags;
1446 ENTER("qla1280_initialize_adapter");
1448 /* Clear adapter flags. */
1449 ha->flags.online = 0;
1450 ha->flags.disable_host_adapter = 0;
1451 ha->flags.reset_active = 0;
1452 ha->flags.abort_isp_active = 0;
1454 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
1455 if (ia64_platform_is("sn2")) {
1456 printk(KERN_INFO "scsi(%li): Enabling SN2 PCI DMA "
1457 "dual channel lockup workaround\n", ha->host_no);
1458 ha->flags.use_pci_vchannel = 1;
1459 driver_setup.no_nvram = 1;
1463 /* TODO: implement support for the 1040 nvram format */
1465 driver_setup.no_nvram = 1;
1467 dprintk(1, "Configure PCI space for adapter...\n");
1471 /* Insure mailbox registers are free. */
1472 WRT_REG_WORD(®->semaphore, 0);
1473 WRT_REG_WORD(®->host_cmd, HC_CLR_RISC_INT);
1474 WRT_REG_WORD(®->host_cmd, HC_CLR_HOST_INT);
1475 RD_REG_WORD(®->host_cmd);
1477 if (qla1280_read_nvram(ha)) {
1478 dprintk(2, "qla1280_initialize_adapter: failed to read "
1483 * It's necessary to grab the spin here as qla1280_mailbox_command
1484 * needs to be able to drop the lock unconditionally to wait
1487 spin_lock_irqsave(ha->host->host_lock, flags);
1489 status = qla1280_load_firmware(ha);
1491 printk(KERN_ERR "scsi(%li): initialize: pci probe failed!\n",
1496 /* Setup adapter based on NVRAM parameters. */
1497 dprintk(1, "scsi(%ld): Configure NVRAM parameters\n", ha->host_no);
1498 qla1280_nvram_config(ha);
1500 if (ha->flags.disable_host_adapter) {
1505 status = qla1280_init_rings(ha);
1509 /* Issue SCSI reset, if we can't reset twice then bus is dead */
1510 for (bus = 0; bus < ha->ports; bus++) {
1511 if (!ha->bus_settings[bus].disable_scsi_reset &&
1512 qla1280_bus_reset(ha, bus) &&
1513 qla1280_bus_reset(ha, bus))
1514 ha->bus_settings[bus].scsi_bus_dead = 1;
1517 ha->flags.online = 1;
1519 spin_unlock_irqrestore(ha->host->host_lock, flags);
1522 dprintk(2, "qla1280_initialize_adapter: **** FAILED ****\n");
1524 LEAVE("qla1280_initialize_adapter");
1530 * Test chip for proper operation.
1533 * ha = adapter block pointer.
1539 qla1280_chip_diag(struct scsi_qla_host *ha)
1541 uint16_t mb[MAILBOX_REGISTER_COUNT];
1542 struct device_reg __iomem *reg = ha->iobase;
1546 dprintk(3, "qla1280_chip_diag: testing device at 0x%p \n", ®->id_l);
1548 dprintk(1, "scsi(%ld): Verifying chip\n", ha->host_no);
1550 /* Soft reset chip and wait for it to finish. */
1551 WRT_REG_WORD(®->ictrl, ISP_RESET);
1554 * We can't do a traditional PCI write flush here by reading
1555 * back the register. The card will not respond once the reset
1556 * is in action and we end up with a machine check exception
1557 * instead. Nothing to do but wait and hope for the best.
1558 * A portable pci_write_flush(pdev) call would be very useful here.
1561 data = qla1280_debounce_register(®->ictrl);
1563 * Yet another QLogic gem ;-(
1565 for (cnt = 1000000; cnt && data & ISP_RESET; cnt--) {
1567 data = RD_REG_WORD(®->ictrl);
1573 /* Reset register cleared by chip reset. */
1574 dprintk(3, "qla1280_chip_diag: reset register cleared by chip reset\n");
1576 WRT_REG_WORD(®->cfg_1, 0);
1578 /* Reset RISC and disable BIOS which
1579 allows RISC to execute out of RAM. */
1580 WRT_REG_WORD(®->host_cmd, HC_RESET_RISC |
1581 HC_RELEASE_RISC | HC_DISABLE_BIOS);
1583 RD_REG_WORD(®->id_l); /* Flush PCI write */
1584 data = qla1280_debounce_register(®->mailbox0);
1587 * I *LOVE* this code!
1589 for (cnt = 1000000; cnt && data == MBS_BUSY; cnt--) {
1591 data = RD_REG_WORD(®->mailbox0);
1597 /* Check product ID of chip */
1598 dprintk(3, "qla1280_chip_diag: Checking product ID of chip\n");
1600 if (RD_REG_WORD(®->mailbox1) != PROD_ID_1 ||
1601 (RD_REG_WORD(®->mailbox2) != PROD_ID_2 &&
1602 RD_REG_WORD(®->mailbox2) != PROD_ID_2a) ||
1603 RD_REG_WORD(®->mailbox3) != PROD_ID_3 ||
1604 RD_REG_WORD(®->mailbox4) != PROD_ID_4) {
1605 printk(KERN_INFO "qla1280: Wrong product ID = "
1606 "0x%x,0x%x,0x%x,0x%x\n",
1607 RD_REG_WORD(®->mailbox1),
1608 RD_REG_WORD(®->mailbox2),
1609 RD_REG_WORD(®->mailbox3),
1610 RD_REG_WORD(®->mailbox4));
1615 * Enable ints early!!!
1617 qla1280_enable_intrs(ha);
1619 dprintk(1, "qla1280_chip_diag: Checking mailboxes of chip\n");
1620 /* Wrap Incoming Mailboxes Test. */
1621 mb[0] = MBC_MAILBOX_REGISTER_TEST;
1630 status = qla1280_mailbox_command(ha, 0xff, mb);
1634 if (mb[1] != 0xAAAA || mb[2] != 0x5555 || mb[3] != 0xAA55 ||
1635 mb[4] != 0x55AA || mb[5] != 0xA5A5 || mb[6] != 0x5A5A ||
1637 printk(KERN_INFO "qla1280: Failed mbox check\n");
1641 dprintk(3, "qla1280_chip_diag: exiting normally\n");
1644 dprintk(2, "qla1280_chip_diag: **** FAILED ****\n");
1649 qla1280_load_firmware_pio(struct scsi_qla_host *ha)
1651 uint16_t risc_address, *risc_code_address, risc_code_size;
1652 uint16_t mb[MAILBOX_REGISTER_COUNT], i;
1655 /* Load RISC code. */
1656 risc_address = *ql1280_board_tbl[ha->devnum].fwstart;
1657 risc_code_address = ql1280_board_tbl[ha->devnum].fwcode;
1658 risc_code_size = *ql1280_board_tbl[ha->devnum].fwlen;
1660 for (i = 0; i < risc_code_size; i++) {
1661 mb[0] = MBC_WRITE_RAM_WORD;
1662 mb[1] = risc_address + i;
1663 mb[2] = risc_code_address[i];
1665 err = qla1280_mailbox_command(ha, BIT_0 | BIT_1 | BIT_2, mb);
1667 printk(KERN_ERR "scsi(%li): Failed to load firmware\n",
1676 #define DUMP_IT_BACK 0 /* for debug of RISC loading */
1678 qla1280_load_firmware_dma(struct scsi_qla_host *ha)
1680 uint16_t risc_address, *risc_code_address, risc_code_size;
1681 uint16_t mb[MAILBOX_REGISTER_COUNT], cnt;
1682 int err = 0, num, i;
1687 tbuf = pci_alloc_consistent(ha->pdev, 8000, &p_tbuf);
1692 /* Load RISC code. */
1693 risc_address = *ql1280_board_tbl[ha->devnum].fwstart;
1694 risc_code_address = ql1280_board_tbl[ha->devnum].fwcode;
1695 risc_code_size = *ql1280_board_tbl[ha->devnum].fwlen;
1697 dprintk(1, "%s: DMA RISC code (%i) words\n",
1698 __FUNCTION__, risc_code_size);
1701 while (risc_code_size > 0) {
1702 int warn __attribute__((unused)) = 0;
1706 if (cnt > risc_code_size)
1707 cnt = risc_code_size;
1709 dprintk(2, "qla1280_setup_chip: loading risc @ =(0x%p),"
1711 risc_code_address, cnt, num, risc_address);
1712 for(i = 0; i < cnt; i++)
1713 ((__le16 *)ha->request_ring)[i] =
1714 cpu_to_le16(risc_code_address[i]);
1716 mb[0] = MBC_LOAD_RAM;
1717 mb[1] = risc_address;
1719 mb[3] = ha->request_dma & 0xffff;
1720 mb[2] = (ha->request_dma >> 16) & 0xffff;
1721 mb[7] = pci_dma_hi32(ha->request_dma) & 0xffff;
1722 mb[6] = pci_dma_hi32(ha->request_dma) >> 16;
1723 dprintk(2, "%s: op=%d 0x%p = 0x%4x,0x%4x,0x%4x,0x%4x\n",
1724 __FUNCTION__, mb[0],
1725 (void *)(long)ha->request_dma,
1726 mb[6], mb[7], mb[2], mb[3]);
1727 err = qla1280_mailbox_command(ha, BIT_4 | BIT_3 | BIT_2 |
1730 printk(KERN_ERR "scsi(%li): Failed to load partial "
1731 "segment of f\n", ha->host_no);
1736 mb[0] = MBC_DUMP_RAM;
1737 mb[1] = risc_address;
1739 mb[3] = p_tbuf & 0xffff;
1740 mb[2] = (p_tbuf >> 16) & 0xffff;
1741 mb[7] = pci_dma_hi32(p_tbuf) & 0xffff;
1742 mb[6] = pci_dma_hi32(p_tbuf) >> 16;
1744 err = qla1280_mailbox_command(ha, BIT_4 | BIT_3 | BIT_2 |
1748 "Failed to dump partial segment of f/w\n");
1751 sp = (uint8_t *)ha->request_ring;
1752 for (i = 0; i < (cnt << 1); i++) {
1753 if (tbuf[i] != sp[i] && warn++ < 10) {
1754 printk(KERN_ERR "%s: FW compare error @ "
1755 "byte(0x%x) loop#=%x\n",
1756 __FUNCTION__, i, num);
1757 printk(KERN_ERR "%s: FWbyte=%x "
1759 __FUNCTION__, sp[i], tbuf[i]);
1764 risc_address += cnt;
1765 risc_code_size = risc_code_size - cnt;
1766 risc_code_address = risc_code_address + cnt;
1772 pci_free_consistent(ha->pdev, 8000, tbuf, p_tbuf);
1778 qla1280_start_firmware(struct scsi_qla_host *ha)
1780 uint16_t mb[MAILBOX_REGISTER_COUNT];
1783 dprintk(1, "%s: Verifying checksum of loaded RISC code.\n",
1786 /* Verify checksum of loaded RISC code. */
1787 mb[0] = MBC_VERIFY_CHECKSUM;
1788 /* mb[1] = ql12_risc_code_addr01; */
1789 mb[1] = *ql1280_board_tbl[ha->devnum].fwstart;
1790 err = qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
1792 printk(KERN_ERR "scsi(%li): RISC checksum failed.\n", ha->host_no);
1796 /* Start firmware execution. */
1797 dprintk(1, "%s: start firmware running.\n", __FUNCTION__);
1798 mb[0] = MBC_EXECUTE_FIRMWARE;
1799 mb[1] = *ql1280_board_tbl[ha->devnum].fwstart;
1800 err = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
1802 printk(KERN_ERR "scsi(%li): Failed to start firmware\n",
1810 qla1280_load_firmware(struct scsi_qla_host *ha)
1814 err = qla1280_chip_diag(ha);
1818 err = qla1280_load_firmware_pio(ha);
1820 err = qla1280_load_firmware_dma(ha);
1823 err = qla1280_start_firmware(ha);
1832 * ha = adapter block pointer.
1833 * ha->request_ring = request ring virtual address
1834 * ha->response_ring = response ring virtual address
1835 * ha->request_dma = request ring physical address
1836 * ha->response_dma = response ring physical address
1842 qla1280_init_rings(struct scsi_qla_host *ha)
1844 uint16_t mb[MAILBOX_REGISTER_COUNT];
1847 ENTER("qla1280_init_rings");
1849 /* Clear outstanding commands array. */
1850 memset(ha->outstanding_cmds, 0,
1851 sizeof(struct srb *) * MAX_OUTSTANDING_COMMANDS);
1853 /* Initialize request queue. */
1854 ha->request_ring_ptr = ha->request_ring;
1855 ha->req_ring_index = 0;
1856 ha->req_q_cnt = REQUEST_ENTRY_CNT;
1857 /* mb[0] = MBC_INIT_REQUEST_QUEUE; */
1858 mb[0] = MBC_INIT_REQUEST_QUEUE_A64;
1859 mb[1] = REQUEST_ENTRY_CNT;
1860 mb[3] = ha->request_dma & 0xffff;
1861 mb[2] = (ha->request_dma >> 16) & 0xffff;
1863 mb[7] = pci_dma_hi32(ha->request_dma) & 0xffff;
1864 mb[6] = pci_dma_hi32(ha->request_dma) >> 16;
1865 if (!(status = qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_4 |
1866 BIT_3 | BIT_2 | BIT_1 | BIT_0,
1868 /* Initialize response queue. */
1869 ha->response_ring_ptr = ha->response_ring;
1870 ha->rsp_ring_index = 0;
1871 /* mb[0] = MBC_INIT_RESPONSE_QUEUE; */
1872 mb[0] = MBC_INIT_RESPONSE_QUEUE_A64;
1873 mb[1] = RESPONSE_ENTRY_CNT;
1874 mb[3] = ha->response_dma & 0xffff;
1875 mb[2] = (ha->response_dma >> 16) & 0xffff;
1877 mb[7] = pci_dma_hi32(ha->response_dma) & 0xffff;
1878 mb[6] = pci_dma_hi32(ha->response_dma) >> 16;
1879 status = qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_5 |
1880 BIT_3 | BIT_2 | BIT_1 | BIT_0,
1885 dprintk(2, "qla1280_init_rings: **** FAILED ****\n");
1887 LEAVE("qla1280_init_rings");
1892 qla1280_print_settings(struct nvram *nv)
1894 dprintk(1, "qla1280 : initiator scsi id bus[0]=%d\n",
1895 nv->bus[0].config_1.initiator_id);
1896 dprintk(1, "qla1280 : initiator scsi id bus[1]=%d\n",
1897 nv->bus[1].config_1.initiator_id);
1899 dprintk(1, "qla1280 : bus reset delay[0]=%d\n",
1900 nv->bus[0].bus_reset_delay);
1901 dprintk(1, "qla1280 : bus reset delay[1]=%d\n",
1902 nv->bus[1].bus_reset_delay);
1904 dprintk(1, "qla1280 : retry count[0]=%d\n", nv->bus[0].retry_count);
1905 dprintk(1, "qla1280 : retry delay[0]=%d\n", nv->bus[0].retry_delay);
1906 dprintk(1, "qla1280 : retry count[1]=%d\n", nv->bus[1].retry_count);
1907 dprintk(1, "qla1280 : retry delay[1]=%d\n", nv->bus[1].retry_delay);
1909 dprintk(1, "qla1280 : async data setup time[0]=%d\n",
1910 nv->bus[0].config_2.async_data_setup_time);
1911 dprintk(1, "qla1280 : async data setup time[1]=%d\n",
1912 nv->bus[1].config_2.async_data_setup_time);
1914 dprintk(1, "qla1280 : req/ack active negation[0]=%d\n",
1915 nv->bus[0].config_2.req_ack_active_negation);
1916 dprintk(1, "qla1280 : req/ack active negation[1]=%d\n",
1917 nv->bus[1].config_2.req_ack_active_negation);
1919 dprintk(1, "qla1280 : data line active negation[0]=%d\n",
1920 nv->bus[0].config_2.data_line_active_negation);
1921 dprintk(1, "qla1280 : data line active negation[1]=%d\n",
1922 nv->bus[1].config_2.data_line_active_negation);
1924 dprintk(1, "qla1280 : disable loading risc code=%d\n",
1925 nv->cntr_flags_1.disable_loading_risc_code);
1927 dprintk(1, "qla1280 : enable 64bit addressing=%d\n",
1928 nv->cntr_flags_1.enable_64bit_addressing);
1930 dprintk(1, "qla1280 : selection timeout limit[0]=%d\n",
1931 nv->bus[0].selection_timeout);
1932 dprintk(1, "qla1280 : selection timeout limit[1]=%d\n",
1933 nv->bus[1].selection_timeout);
1935 dprintk(1, "qla1280 : max queue depth[0]=%d\n",
1936 nv->bus[0].max_queue_depth);
1937 dprintk(1, "qla1280 : max queue depth[1]=%d\n",
1938 nv->bus[1].max_queue_depth);
1942 qla1280_set_target_defaults(struct scsi_qla_host *ha, int bus, int target)
1944 struct nvram *nv = &ha->nvram;
1946 nv->bus[bus].target[target].parameter.renegotiate_on_error = 1;
1947 nv->bus[bus].target[target].parameter.auto_request_sense = 1;
1948 nv->bus[bus].target[target].parameter.tag_queuing = 1;
1949 nv->bus[bus].target[target].parameter.enable_sync = 1;
1950 #if 1 /* Some SCSI Processors do not seem to like this */
1951 nv->bus[bus].target[target].parameter.enable_wide = 1;
1953 nv->bus[bus].target[target].execution_throttle =
1954 nv->bus[bus].max_queue_depth - 1;
1955 nv->bus[bus].target[target].parameter.parity_checking = 1;
1956 nv->bus[bus].target[target].parameter.disconnect_allowed = 1;
1958 if (IS_ISP1x160(ha)) {
1959 nv->bus[bus].target[target].flags.flags1x160.device_enable = 1;
1960 nv->bus[bus].target[target].flags.flags1x160.sync_offset = 0x0e;
1961 nv->bus[bus].target[target].sync_period = 9;
1962 nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 1;
1963 nv->bus[bus].target[target].ppr_1x160.flags.ppr_options = 2;
1964 nv->bus[bus].target[target].ppr_1x160.flags.ppr_bus_width = 1;
1966 nv->bus[bus].target[target].flags.flags1x80.device_enable = 1;
1967 nv->bus[bus].target[target].flags.flags1x80.sync_offset = 12;
1968 nv->bus[bus].target[target].sync_period = 10;
1973 qla1280_set_defaults(struct scsi_qla_host *ha)
1975 struct nvram *nv = &ha->nvram;
1978 dprintk(1, "Using defaults for NVRAM: \n");
1979 memset(nv, 0, sizeof(struct nvram));
1981 /* nv->cntr_flags_1.disable_loading_risc_code = 1; */
1982 nv->firmware_feature.f.enable_fast_posting = 1;
1983 nv->firmware_feature.f.disable_synchronous_backoff = 1;
1984 nv->termination.scsi_bus_0_control = 3;
1985 nv->termination.scsi_bus_1_control = 3;
1986 nv->termination.auto_term_support = 1;
1989 * Set default FIFO magic - What appropriate values would be here
1990 * is unknown. This is what I have found testing with 12160s.
1992 * Now, I would love the magic decoder ring for this one, the
1993 * header file provided by QLogic seems to be bogus or incomplete
1996 nv->isp_config.burst_enable = 1;
1998 nv->isp_config.fifo_threshold |= 3;
2000 nv->isp_config.fifo_threshold |= 4;
2002 if (IS_ISP1x160(ha))
2003 nv->isp_parameter = 0x01; /* fast memory enable */
2005 for (bus = 0; bus < MAX_BUSES; bus++) {
2006 nv->bus[bus].config_1.initiator_id = 7;
2007 nv->bus[bus].config_2.req_ack_active_negation = 1;
2008 nv->bus[bus].config_2.data_line_active_negation = 1;
2009 nv->bus[bus].selection_timeout = 250;
2010 nv->bus[bus].max_queue_depth = 32;
2012 if (IS_ISP1040(ha)) {
2013 nv->bus[bus].bus_reset_delay = 3;
2014 nv->bus[bus].config_2.async_data_setup_time = 6;
2015 nv->bus[bus].retry_delay = 1;
2017 nv->bus[bus].bus_reset_delay = 5;
2018 nv->bus[bus].config_2.async_data_setup_time = 8;
2021 for (target = 0; target < MAX_TARGETS; target++)
2022 qla1280_set_target_defaults(ha, bus, target);
2027 qla1280_config_target(struct scsi_qla_host *ha, int bus, int target)
2029 struct nvram *nv = &ha->nvram;
2030 uint16_t mb[MAILBOX_REGISTER_COUNT];
2034 /* Set Target Parameters. */
2035 mb[0] = MBC_SET_TARGET_PARAMETERS;
2036 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
2039 * Do not enable sync and ppr for the initial INQUIRY run. We
2040 * enable this later if we determine the target actually
2043 mb[2] = (TP_RENEGOTIATE | TP_AUTO_REQUEST_SENSE | TP_TAGGED_QUEUE
2044 | TP_WIDE | TP_PARITY | TP_DISCONNECT);
2046 if (IS_ISP1x160(ha))
2047 mb[3] = nv->bus[bus].target[target].flags.flags1x160.sync_offset << 8;
2049 mb[3] = nv->bus[bus].target[target].flags.flags1x80.sync_offset << 8;
2050 mb[3] |= nv->bus[bus].target[target].sync_period;
2051 status = qla1280_mailbox_command(ha, 0x0f, mb);
2053 /* Save Tag queuing enable flag. */
2054 flag = (BIT_0 << target);
2055 if (nv->bus[bus].target[target].parameter.tag_queuing)
2056 ha->bus_settings[bus].qtag_enables |= flag;
2058 /* Save Device enable flag. */
2059 if (IS_ISP1x160(ha)) {
2060 if (nv->bus[bus].target[target].flags.flags1x160.device_enable)
2061 ha->bus_settings[bus].device_enables |= flag;
2062 ha->bus_settings[bus].lun_disables |= 0;
2064 if (nv->bus[bus].target[target].flags.flags1x80.device_enable)
2065 ha->bus_settings[bus].device_enables |= flag;
2066 /* Save LUN disable flag. */
2067 if (nv->bus[bus].target[target].flags.flags1x80.lun_disable)
2068 ha->bus_settings[bus].lun_disables |= flag;
2071 /* Set Device Queue Parameters. */
2072 for (lun = 0; lun < MAX_LUNS; lun++) {
2073 mb[0] = MBC_SET_DEVICE_QUEUE;
2074 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
2076 mb[2] = nv->bus[bus].max_queue_depth;
2077 mb[3] = nv->bus[bus].target[target].execution_throttle;
2078 status |= qla1280_mailbox_command(ha, 0x0f, mb);
2085 qla1280_config_bus(struct scsi_qla_host *ha, int bus)
2087 struct nvram *nv = &ha->nvram;
2088 uint16_t mb[MAILBOX_REGISTER_COUNT];
2091 /* SCSI Reset Disable. */
2092 ha->bus_settings[bus].disable_scsi_reset =
2093 nv->bus[bus].config_1.scsi_reset_disable;
2096 ha->bus_settings[bus].id = nv->bus[bus].config_1.initiator_id;
2097 mb[0] = MBC_SET_INITIATOR_ID;
2098 mb[1] = bus ? ha->bus_settings[bus].id | BIT_7 :
2099 ha->bus_settings[bus].id;
2100 status = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
2103 ha->bus_settings[bus].bus_reset_delay =
2104 nv->bus[bus].bus_reset_delay;
2106 /* Command queue depth per device. */
2107 ha->bus_settings[bus].hiwat = nv->bus[bus].max_queue_depth - 1;
2109 /* Set target parameters. */
2110 for (target = 0; target < MAX_TARGETS; target++)
2111 status |= qla1280_config_target(ha, bus, target);
2117 qla1280_nvram_config(struct scsi_qla_host *ha)
2119 struct device_reg __iomem *reg = ha->iobase;
2120 struct nvram *nv = &ha->nvram;
2121 int bus, target, status = 0;
2122 uint16_t mb[MAILBOX_REGISTER_COUNT];
2124 ENTER("qla1280_nvram_config");
2126 if (ha->nvram_valid) {
2127 /* Always force AUTO sense for LINUX SCSI */
2128 for (bus = 0; bus < MAX_BUSES; bus++)
2129 for (target = 0; target < MAX_TARGETS; target++) {
2130 nv->bus[bus].target[target].parameter.
2131 auto_request_sense = 1;
2134 qla1280_set_defaults(ha);
2137 qla1280_print_settings(nv);
2139 /* Disable RISC load of firmware. */
2140 ha->flags.disable_risc_code_load =
2141 nv->cntr_flags_1.disable_loading_risc_code;
2143 if (IS_ISP1040(ha)) {
2144 uint16_t hwrev, cfg1, cdma_conf, ddma_conf;
2146 hwrev = RD_REG_WORD(®->cfg_0) & ISP_CFG0_HWMSK;
2148 cfg1 = RD_REG_WORD(®->cfg_1) & ~(BIT_4 | BIT_5 | BIT_6);
2149 cdma_conf = RD_REG_WORD(®->cdma_cfg);
2150 ddma_conf = RD_REG_WORD(®->ddma_cfg);
2152 /* Busted fifo, says mjacob. */
2153 if (hwrev != ISP_CFG0_1040A)
2154 cfg1 |= nv->isp_config.fifo_threshold << 4;
2156 cfg1 |= nv->isp_config.burst_enable << 2;
2157 WRT_REG_WORD(®->cfg_1, cfg1);
2159 WRT_REG_WORD(®->cdma_cfg, cdma_conf | CDMA_CONF_BENAB);
2160 WRT_REG_WORD(®->ddma_cfg, cdma_conf | DDMA_CONF_BENAB);
2162 uint16_t cfg1, term;
2164 /* Set ISP hardware DMA burst */
2165 cfg1 = nv->isp_config.fifo_threshold << 4;
2166 cfg1 |= nv->isp_config.burst_enable << 2;
2167 /* Enable DMA arbitration on dual channel controllers */
2170 WRT_REG_WORD(®->cfg_1, cfg1);
2172 /* Set SCSI termination. */
2173 WRT_REG_WORD(®->gpio_enable,
2174 BIT_7 | BIT_3 | BIT_2 | BIT_1 | BIT_0);
2175 term = nv->termination.scsi_bus_1_control;
2176 term |= nv->termination.scsi_bus_0_control << 2;
2177 term |= nv->termination.auto_term_support << 7;
2178 RD_REG_WORD(®->id_l); /* Flush PCI write */
2179 WRT_REG_WORD(®->gpio_data, term);
2181 RD_REG_WORD(®->id_l); /* Flush PCI write */
2183 /* ISP parameter word. */
2184 mb[0] = MBC_SET_SYSTEM_PARAMETER;
2185 mb[1] = nv->isp_parameter;
2186 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
2188 if (IS_ISP1x40(ha)) {
2189 /* clock rate - for qla1240 and older, only */
2190 mb[0] = MBC_SET_CLOCK_RATE;
2192 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
2195 /* Firmware feature word. */
2196 mb[0] = MBC_SET_FIRMWARE_FEATURES;
2197 mb[1] = nv->firmware_feature.f.enable_fast_posting;
2198 mb[1] |= nv->firmware_feature.f.report_lvd_bus_transition << 1;
2199 mb[1] |= nv->firmware_feature.f.disable_synchronous_backoff << 5;
2200 #if defined(CONFIG_IA64_GENERIC) || defined (CONFIG_IA64_SGI_SN2)
2201 if (ia64_platform_is("sn2")) {
2202 printk(KERN_INFO "scsi(%li): Enabling SN2 PCI DMA "
2203 "workaround\n", ha->host_no);
2204 mb[1] |= nv->firmware_feature.f.unused_9 << 9; /* XXX */
2207 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
2209 /* Retry count and delay. */
2210 mb[0] = MBC_SET_RETRY_COUNT;
2211 mb[1] = nv->bus[0].retry_count;
2212 mb[2] = nv->bus[0].retry_delay;
2213 mb[6] = nv->bus[1].retry_count;
2214 mb[7] = nv->bus[1].retry_delay;
2215 status |= qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_2 |
2216 BIT_1 | BIT_0, &mb[0]);
2218 /* ASYNC data setup time. */
2219 mb[0] = MBC_SET_ASYNC_DATA_SETUP;
2220 mb[1] = nv->bus[0].config_2.async_data_setup_time;
2221 mb[2] = nv->bus[1].config_2.async_data_setup_time;
2222 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]);
2224 /* Active negation states. */
2225 mb[0] = MBC_SET_ACTIVE_NEGATION;
2227 if (nv->bus[0].config_2.req_ack_active_negation)
2229 if (nv->bus[0].config_2.data_line_active_negation)
2232 if (nv->bus[1].config_2.req_ack_active_negation)
2234 if (nv->bus[1].config_2.data_line_active_negation)
2236 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
2238 mb[0] = MBC_SET_DATA_OVERRUN_RECOVERY;
2239 mb[1] = 2; /* Reset SCSI bus and return all outstanding IO */
2240 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
2243 mb[0] = MBC_SET_PCI_CONTROL;
2244 mb[1] = BIT_1; /* Data DMA Channel Burst Enable */
2245 mb[2] = BIT_1; /* Command DMA Channel Burst Enable */
2246 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
2248 mb[0] = MBC_SET_TAG_AGE_LIMIT;
2250 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
2252 /* Selection timeout. */
2253 mb[0] = MBC_SET_SELECTION_TIMEOUT;
2254 mb[1] = nv->bus[0].selection_timeout;
2255 mb[2] = nv->bus[1].selection_timeout;
2256 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
2258 for (bus = 0; bus < ha->ports; bus++)
2259 status |= qla1280_config_bus(ha, bus);
2262 dprintk(2, "qla1280_nvram_config: **** FAILED ****\n");
2264 LEAVE("qla1280_nvram_config");
2269 * Get NVRAM data word
2270 * Calculates word position in NVRAM and calls request routine to
2271 * get the word from NVRAM.
2274 * ha = adapter block pointer.
2275 * address = NVRAM word address.
2281 qla1280_get_nvram_word(struct scsi_qla_host *ha, uint32_t address)
2286 nv_cmd = address << 16;
2287 nv_cmd |= NV_READ_OP;
2289 data = le16_to_cpu(qla1280_nvram_request(ha, nv_cmd));
2291 dprintk(8, "qla1280_get_nvram_word: exiting normally NVRAM data = "
2299 * Sends read command to NVRAM and gets data from NVRAM.
2302 * ha = adapter block pointer.
2303 * nv_cmd = Bit 26 = start bit
2304 * Bit 25, 24 = opcode
2305 * Bit 23-16 = address
2306 * Bit 15-0 = write data
2312 qla1280_nvram_request(struct scsi_qla_host *ha, uint32_t nv_cmd)
2314 struct device_reg __iomem *reg = ha->iobase;
2319 /* Send command to NVRAM. */
2322 for (cnt = 0; cnt < 11; cnt++) {
2323 if (nv_cmd & BIT_31)
2324 qla1280_nv_write(ha, NV_DATA_OUT);
2326 qla1280_nv_write(ha, 0);
2330 /* Read data from NVRAM. */
2332 for (cnt = 0; cnt < 16; cnt++) {
2333 WRT_REG_WORD(®->nvram, (NV_SELECT | NV_CLOCK));
2334 RD_REG_WORD(®->id_l); /* Flush PCI write */
2337 reg_data = RD_REG_WORD(®->nvram);
2338 if (reg_data & NV_DATA_IN)
2340 WRT_REG_WORD(®->nvram, NV_SELECT);
2341 RD_REG_WORD(®->id_l); /* Flush PCI write */
2345 /* Deselect chip. */
2347 WRT_REG_WORD(®->nvram, NV_DESELECT);
2348 RD_REG_WORD(®->id_l); /* Flush PCI write */
2355 qla1280_nv_write(struct scsi_qla_host *ha, uint16_t data)
2357 struct device_reg __iomem *reg = ha->iobase;
2359 WRT_REG_WORD(®->nvram, data | NV_SELECT);
2360 RD_REG_WORD(®->id_l); /* Flush PCI write */
2362 WRT_REG_WORD(®->nvram, data | NV_SELECT | NV_CLOCK);
2363 RD_REG_WORD(®->id_l); /* Flush PCI write */
2365 WRT_REG_WORD(®->nvram, data | NV_SELECT);
2366 RD_REG_WORD(®->id_l); /* Flush PCI write */
2372 * Issue mailbox command and waits for completion.
2375 * ha = adapter block pointer.
2376 * mr = mailbox registers to load.
2377 * mb = data pointer for mailbox registers.
2380 * mb[MAILBOX_REGISTER_COUNT] = returned mailbox data.
2386 qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb)
2388 struct device_reg __iomem *reg = ha->iobase;
2394 uint16_t *optr, *iptr;
2395 uint16_t __iomem *mptr;
2397 DECLARE_COMPLETION_ONSTACK(wait);
2398 struct timer_list timer;
2400 ENTER("qla1280_mailbox_command");
2402 if (ha->mailbox_wait) {
2403 printk(KERN_ERR "Warning mailbox wait already in use!\n");
2405 ha->mailbox_wait = &wait;
2408 * We really should start out by verifying that the mailbox is
2409 * available before starting sending the command data
2411 /* Load mailbox registers. */
2412 mptr = (uint16_t __iomem *) ®->mailbox0;
2414 for (cnt = 0; cnt < MAILBOX_REGISTER_COUNT; cnt++) {
2416 WRT_REG_WORD(mptr, (*iptr));
2424 /* Issue set host interrupt command. */
2426 /* set up a timer just in case we're really jammed */
2428 timer.expires = jiffies + 20*HZ;
2429 timer.data = (unsigned long)ha;
2430 timer.function = qla1280_mailbox_timeout;
2433 spin_unlock_irq(ha->host->host_lock);
2434 WRT_REG_WORD(®->host_cmd, HC_SET_HOST_INT);
2435 data = qla1280_debounce_register(®->istatus);
2437 wait_for_completion(&wait);
2438 del_timer_sync(&timer);
2440 spin_lock_irq(ha->host->host_lock);
2442 ha->mailbox_wait = NULL;
2444 /* Check for mailbox command timeout. */
2445 if (ha->mailbox_out[0] != MBS_CMD_CMP) {
2446 printk(KERN_WARNING "qla1280_mailbox_command: Command failed, "
2447 "mailbox0 = 0x%04x, mailbox_out0 = 0x%04x, istatus = "
2449 mb[0], ha->mailbox_out[0], RD_REG_WORD(®->istatus));
2450 printk(KERN_WARNING "m0 %04x, m1 %04x, m2 %04x, m3 %04x\n",
2451 RD_REG_WORD(®->mailbox0), RD_REG_WORD(®->mailbox1),
2452 RD_REG_WORD(®->mailbox2), RD_REG_WORD(®->mailbox3));
2453 printk(KERN_WARNING "m4 %04x, m5 %04x, m6 %04x, m7 %04x\n",
2454 RD_REG_WORD(®->mailbox4), RD_REG_WORD(®->mailbox5),
2455 RD_REG_WORD(®->mailbox6), RD_REG_WORD(®->mailbox7));
2459 /* Load return mailbox registers. */
2461 iptr = (uint16_t *) &ha->mailbox_out[0];
2462 mr = MAILBOX_REGISTER_COUNT;
2463 memcpy(optr, iptr, MAILBOX_REGISTER_COUNT * sizeof(uint16_t));
2466 /* Go check for any response interrupts pending. */
2467 qla1280_isr(ha, &done_q);
2470 if (ha->flags.reset_marker)
2471 qla1280_rst_aen(ha);
2474 if (!list_empty(&done_q))
2475 qla1280_done(ha, &done_q);
2479 dprintk(2, "qla1280_mailbox_command: **** FAILED, mailbox0 = "
2480 "0x%x ****\n", mb[0]);
2482 LEAVE("qla1280_mailbox_command");
2488 * Polls ISP for interrupts.
2491 * ha = adapter block pointer.
2494 qla1280_poll(struct scsi_qla_host *ha)
2496 struct device_reg __iomem *reg = ha->iobase;
2500 /* ENTER("qla1280_poll"); */
2502 /* Check for pending interrupts. */
2503 data = RD_REG_WORD(®->istatus);
2504 if (data & RISC_INT)
2505 qla1280_isr(ha, &done_q);
2507 if (!ha->mailbox_wait) {
2508 if (ha->flags.reset_marker)
2509 qla1280_rst_aen(ha);
2512 if (!list_empty(&done_q))
2515 /* LEAVE("qla1280_poll"); */
2520 * Issue SCSI bus reset.
2523 * ha = adapter block pointer.
2524 * bus = SCSI bus number.
2530 qla1280_bus_reset(struct scsi_qla_host *ha, int bus)
2532 uint16_t mb[MAILBOX_REGISTER_COUNT];
2533 uint16_t reset_delay;
2536 dprintk(3, "qla1280_bus_reset: entered\n");
2538 if (qla1280_verbose)
2539 printk(KERN_INFO "scsi(%li:%i): Resetting SCSI BUS\n",
2542 reset_delay = ha->bus_settings[bus].bus_reset_delay;
2543 mb[0] = MBC_BUS_RESET;
2544 mb[1] = reset_delay;
2545 mb[2] = (uint16_t) bus;
2546 status = qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]);
2549 if (ha->bus_settings[bus].failed_reset_count > 2)
2550 ha->bus_settings[bus].scsi_bus_dead = 1;
2551 ha->bus_settings[bus].failed_reset_count++;
2553 spin_unlock_irq(ha->host->host_lock);
2554 ssleep(reset_delay);
2555 spin_lock_irq(ha->host->host_lock);
2557 ha->bus_settings[bus].scsi_bus_dead = 0;
2558 ha->bus_settings[bus].failed_reset_count = 0;
2559 ha->bus_settings[bus].reset_marker = 0;
2560 /* Issue marker command. */
2561 qla1280_marker(ha, bus, 0, 0, MK_SYNC_ALL);
2565 * We should probably call qla1280_set_target_parameters()
2566 * here as well for all devices on the bus.
2570 dprintk(2, "qla1280_bus_reset: **** FAILED ****\n");
2572 dprintk(3, "qla1280_bus_reset: exiting normally\n");
2578 * qla1280_device_reset
2579 * Issue bus device reset message to the target.
2582 * ha = adapter block pointer.
2583 * bus = SCSI BUS number.
2590 qla1280_device_reset(struct scsi_qla_host *ha, int bus, int target)
2592 uint16_t mb[MAILBOX_REGISTER_COUNT];
2595 ENTER("qla1280_device_reset");
2597 mb[0] = MBC_ABORT_TARGET;
2598 mb[1] = (bus ? (target | BIT_7) : target) << 8;
2600 status = qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]);
2602 /* Issue marker command. */
2603 qla1280_marker(ha, bus, target, 0, MK_SYNC_ID);
2606 dprintk(2, "qla1280_device_reset: **** FAILED ****\n");
2608 LEAVE("qla1280_device_reset");
2613 * qla1280_abort_device
2614 * Issue an abort message to the device
2617 * ha = adapter block pointer.
2626 qla1280_abort_device(struct scsi_qla_host *ha, int bus, int target, int lun)
2628 uint16_t mb[MAILBOX_REGISTER_COUNT];
2631 ENTER("qla1280_abort_device");
2633 mb[0] = MBC_ABORT_DEVICE;
2634 mb[1] = (bus ? target | BIT_7 : target) << 8 | lun;
2635 status = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
2637 /* Issue marker command. */
2638 qla1280_marker(ha, bus, target, lun, MK_SYNC_ID_LUN);
2641 dprintk(2, "qla1280_abort_device: **** FAILED ****\n");
2643 LEAVE("qla1280_abort_device");
2648 * qla1280_abort_command
2649 * Abort command aborts a specified IOCB.
2652 * ha = adapter block pointer.
2653 * sp = SB structure pointer.
2659 qla1280_abort_command(struct scsi_qla_host *ha, struct srb * sp, int handle)
2661 uint16_t mb[MAILBOX_REGISTER_COUNT];
2662 unsigned int bus, target, lun;
2665 ENTER("qla1280_abort_command");
2667 bus = SCSI_BUS_32(sp->cmd);
2668 target = SCSI_TCN_32(sp->cmd);
2669 lun = SCSI_LUN_32(sp->cmd);
2671 sp->flags |= SRB_ABORT_PENDING;
2673 mb[0] = MBC_ABORT_COMMAND;
2674 mb[1] = (bus ? target | BIT_7 : target) << 8 | lun;
2675 mb[2] = handle >> 16;
2676 mb[3] = handle & 0xffff;
2677 status = qla1280_mailbox_command(ha, 0x0f, &mb[0]);
2680 dprintk(2, "qla1280_abort_command: **** FAILED ****\n");
2681 sp->flags &= ~SRB_ABORT_PENDING;
2685 LEAVE("qla1280_abort_command");
2690 * qla1280_reset_adapter
2694 * ha = adapter block pointer.
2697 qla1280_reset_adapter(struct scsi_qla_host *ha)
2699 struct device_reg __iomem *reg = ha->iobase;
2701 ENTER("qla1280_reset_adapter");
2703 /* Disable ISP chip */
2704 ha->flags.online = 0;
2705 WRT_REG_WORD(®->ictrl, ISP_RESET);
2706 WRT_REG_WORD(®->host_cmd,
2707 HC_RESET_RISC | HC_RELEASE_RISC | HC_DISABLE_BIOS);
2708 RD_REG_WORD(®->id_l); /* Flush PCI write */
2710 LEAVE("qla1280_reset_adapter");
2714 * Issue marker command.
2715 * Function issues marker IOCB.
2718 * ha = adapter block pointer.
2719 * bus = SCSI BUS number
2722 * type = marker modifier
2725 qla1280_marker(struct scsi_qla_host *ha, int bus, int id, int lun, u8 type)
2727 struct mrk_entry *pkt;
2729 ENTER("qla1280_marker");
2731 /* Get request packet. */
2732 if ((pkt = (struct mrk_entry *) qla1280_req_pkt(ha))) {
2733 pkt->entry_type = MARKER_TYPE;
2734 pkt->lun = (uint8_t) lun;
2735 pkt->target = (uint8_t) (bus ? (id | BIT_7) : id);
2736 pkt->modifier = type;
2737 pkt->entry_status = 0;
2739 /* Issue command to ISP */
2740 qla1280_isp_cmd(ha);
2743 LEAVE("qla1280_marker");
2748 * qla1280_64bit_start_scsi
2749 * The start SCSI is responsible for building request packets on
2750 * request ring and modifying ISP input pointer.
2753 * ha = adapter block pointer.
2754 * sp = SB structure pointer.
2757 * 0 = success, was able to issue command.
2759 #ifdef QLA_64BIT_PTR
2761 qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
2763 struct device_reg __iomem *reg = ha->iobase;
2764 struct scsi_cmnd *cmd = sp->cmd;
2765 cmd_a64_entry_t *pkt;
2767 dma_addr_t dma_handle;
2774 ENTER("qla1280_64bit_start_scsi:");
2776 /* Calculate number of entries and segments required. */
2778 seg_cnt = scsi_dma_map(cmd);
2781 req_cnt += (seg_cnt - 2) / 5;
2782 if ((seg_cnt - 2) % 5)
2785 } else if (seg_cnt < 0) {
2790 if ((req_cnt + 2) >= ha->req_q_cnt) {
2791 /* Calculate number of free request entries. */
2792 cnt = RD_REG_WORD(®->mailbox4);
2793 if (ha->req_ring_index < cnt)
2794 ha->req_q_cnt = cnt - ha->req_ring_index;
2797 REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt);
2800 dprintk(3, "Number of free entries=(%d) seg_cnt=0x%x\n",
2801 ha->req_q_cnt, seg_cnt);
2803 /* If room for request in request ring. */
2804 if ((req_cnt + 2) >= ha->req_q_cnt) {
2806 dprintk(2, "qla1280_start_scsi: in-ptr=0x%x req_q_cnt="
2807 "0x%xreq_cnt=0x%x", ha->req_ring_index, ha->req_q_cnt,
2812 /* Check for room in outstanding command list. */
2813 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS &&
2814 ha->outstanding_cmds[cnt] != NULL; cnt++);
2816 if (cnt >= MAX_OUTSTANDING_COMMANDS) {
2818 dprintk(2, "qla1280_start_scsi: NO ROOM IN "
2819 "OUTSTANDING ARRAY, req_q_cnt=0x%x", ha->req_q_cnt);
2823 ha->outstanding_cmds[cnt] = sp;
2824 ha->req_q_cnt -= req_cnt;
2825 CMD_HANDLE(sp->cmd) = (unsigned char *)(unsigned long)(cnt + 1);
2827 dprintk(2, "start: cmd=%p sp=%p CDB=%xm, handle %lx\n", cmd, sp,
2828 cmd->cmnd[0], (long)CMD_HANDLE(sp->cmd));
2829 dprintk(2, " bus %i, target %i, lun %i\n",
2830 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
2831 qla1280_dump_buffer(2, cmd->cmnd, MAX_COMMAND_SIZE);
2834 * Build command packet.
2836 pkt = (cmd_a64_entry_t *) ha->request_ring_ptr;
2838 pkt->entry_type = COMMAND_A64_TYPE;
2839 pkt->entry_count = (uint8_t) req_cnt;
2840 pkt->sys_define = (uint8_t) ha->req_ring_index;
2841 pkt->entry_status = 0;
2842 pkt->handle = cpu_to_le32(cnt);
2844 /* Zero out remaining portion of packet. */
2845 memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
2847 /* Set ISP command timeout. */
2848 pkt->timeout = cpu_to_le16(cmd->timeout_per_command/HZ);
2850 /* Set device target ID and LUN */
2851 pkt->lun = SCSI_LUN_32(cmd);
2852 pkt->target = SCSI_BUS_32(cmd) ?
2853 (SCSI_TCN_32(cmd) | BIT_7) : SCSI_TCN_32(cmd);
2855 /* Enable simple tag queuing if device supports it. */
2856 if (cmd->device->simple_tags)
2857 pkt->control_flags |= cpu_to_le16(BIT_3);
2859 /* Load SCSI command packet. */
2860 pkt->cdb_len = cpu_to_le16(CMD_CDBLEN(cmd));
2861 memcpy(pkt->scsi_cdb, CMD_CDBP(cmd), CMD_CDBLEN(cmd));
2862 /* dprintk(1, "Build packet for command[0]=0x%x\n",pkt->scsi_cdb[0]); */
2864 /* Set transfer direction. */
2865 dir = qla1280_data_direction(cmd);
2866 pkt->control_flags |= cpu_to_le16(dir);
2868 /* Set total data segment count. */
2869 pkt->dseg_count = cpu_to_le16(seg_cnt);
2872 * Load data segments.
2874 if (seg_cnt) { /* If data transfer. */
2875 struct scatterlist *sg, *s;
2876 int remseg = seg_cnt;
2878 sg = scsi_sglist(cmd);
2880 /* Setup packet address segment pointer. */
2881 dword_ptr = (u32 *)&pkt->dseg_0_address;
2883 /* Load command entry data segments. */
2884 for_each_sg(sg, s, seg_cnt, cnt) {
2888 dma_handle = sg_dma_address(s);
2889 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
2890 if (ha->flags.use_pci_vchannel)
2891 sn_pci_set_vchan(ha->pdev,
2892 (unsigned long *)&dma_handle,
2896 cpu_to_le32(pci_dma_lo32(dma_handle));
2898 cpu_to_le32(pci_dma_hi32(dma_handle));
2899 *dword_ptr++ = cpu_to_le32(sg_dma_len(s));
2900 dprintk(3, "S/G Segment phys_addr=%x %x, len=0x%x\n",
2901 cpu_to_le32(pci_dma_hi32(dma_handle)),
2902 cpu_to_le32(pci_dma_lo32(dma_handle)),
2903 cpu_to_le32(sg_dma_len(sg_next(s))));
2906 dprintk(5, "qla1280_64bit_start_scsi: Scatter/gather "
2907 "command packet data - b %i, t %i, l %i \n",
2908 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd),
2910 qla1280_dump_buffer(5, (char *)pkt,
2911 REQUEST_ENTRY_SIZE);
2914 * Build continuation packets.
2916 dprintk(3, "S/G Building Continuation...seg_cnt=0x%x "
2917 "remains\n", seg_cnt);
2919 while (remseg > 0) {
2920 /* Update sg start */
2922 /* Adjust ring index. */
2923 ha->req_ring_index++;
2924 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
2925 ha->req_ring_index = 0;
2926 ha->request_ring_ptr =
2929 ha->request_ring_ptr++;
2931 pkt = (cmd_a64_entry_t *)ha->request_ring_ptr;
2933 /* Zero out packet. */
2934 memset(pkt, 0, REQUEST_ENTRY_SIZE);
2936 /* Load packet defaults. */
2937 ((struct cont_a64_entry *) pkt)->entry_type =
2939 ((struct cont_a64_entry *) pkt)->entry_count = 1;
2940 ((struct cont_a64_entry *) pkt)->sys_define =
2941 (uint8_t)ha->req_ring_index;
2942 /* Setup packet address segment pointer. */
2944 (u32 *)&((struct cont_a64_entry *) pkt)->dseg_0_address;
2946 /* Load continuation entry data segments. */
2947 for_each_sg(sg, s, remseg, cnt) {
2950 dma_handle = sg_dma_address(s);
2951 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
2952 if (ha->flags.use_pci_vchannel)
2953 sn_pci_set_vchan(ha->pdev,
2954 (unsigned long *)&dma_handle,
2958 cpu_to_le32(pci_dma_lo32(dma_handle));
2960 cpu_to_le32(pci_dma_hi32(dma_handle));
2962 cpu_to_le32(sg_dma_len(s));
2963 dprintk(3, "S/G Segment Cont. phys_addr=%x %x, len=0x%x\n",
2964 cpu_to_le32(pci_dma_hi32(dma_handle)),
2965 cpu_to_le32(pci_dma_lo32(dma_handle)),
2966 cpu_to_le32(sg_dma_len(s)));
2969 dprintk(5, "qla1280_64bit_start_scsi: "
2970 "continuation packet data - b %i, t "
2971 "%i, l %i \n", SCSI_BUS_32(cmd),
2972 SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
2973 qla1280_dump_buffer(5, (char *)pkt,
2974 REQUEST_ENTRY_SIZE);
2976 } else { /* No data transfer */
2977 dprintk(5, "qla1280_64bit_start_scsi: No data, command "
2978 "packet data - b %i, t %i, l %i \n",
2979 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
2980 qla1280_dump_buffer(5, (char *)pkt, REQUEST_ENTRY_SIZE);
2982 /* Adjust ring index. */
2983 ha->req_ring_index++;
2984 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
2985 ha->req_ring_index = 0;
2986 ha->request_ring_ptr = ha->request_ring;
2988 ha->request_ring_ptr++;
2990 /* Set chip new ring index. */
2992 "qla1280_64bit_start_scsi: Wakeup RISC for pending command\n");
2993 sp->flags |= SRB_SENT;
2995 WRT_REG_WORD(®->mailbox4, ha->req_ring_index);
2996 /* Enforce mmio write ordering; see comment in qla1280_isp_cmd(). */
3001 dprintk(2, "qla1280_64bit_start_scsi: **** FAILED ****\n");
3003 dprintk(3, "qla1280_64bit_start_scsi: exiting normally\n");
3007 #else /* !QLA_64BIT_PTR */
3010 * qla1280_32bit_start_scsi
3011 * The start SCSI is responsible for building request packets on
3012 * request ring and modifying ISP input pointer.
3014 * The Qlogic firmware interface allows every queue slot to have a SCSI
3015 * command and up to 4 scatter/gather (SG) entries. If we need more
3016 * than 4 SG entries, then continuation entries are used that can
3017 * hold another 7 entries each. The start routine determines if there
3018 * is eought empty slots then build the combination of requests to
3019 * fulfill the OS request.
3022 * ha = adapter block pointer.
3023 * sp = SCSI Request Block structure pointer.
3026 * 0 = success, was able to issue command.
3029 qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
3031 struct device_reg __iomem *reg = ha->iobase;
3032 struct scsi_cmnd *cmd = sp->cmd;
3033 struct cmd_entry *pkt;
3041 ENTER("qla1280_32bit_start_scsi");
3043 dprintk(1, "32bit_start: cmd=%p sp=%p CDB=%x\n", cmd, sp,
3046 /* Calculate number of entries and segments required. */
3048 seg_cnt = scsi_dma_map(cmd);
3051 * if greater than four sg entries then we need to allocate
3052 * continuation entries
3055 req_cnt += (seg_cnt - 4) / 7;
3056 if ((seg_cnt - 4) % 7)
3059 dprintk(3, "S/G Transfer cmd=%p seg_cnt=0x%x, req_cnt=%x\n",
3060 cmd, seg_cnt, req_cnt);
3061 } else if (seg_cnt < 0) {
3066 if ((req_cnt + 2) >= ha->req_q_cnt) {
3067 /* Calculate number of free request entries. */
3068 cnt = RD_REG_WORD(®->mailbox4);
3069 if (ha->req_ring_index < cnt)
3070 ha->req_q_cnt = cnt - ha->req_ring_index;
3073 REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt);
3076 dprintk(3, "Number of free entries=(%d) seg_cnt=0x%x\n",
3077 ha->req_q_cnt, seg_cnt);
3078 /* If room for request in request ring. */
3079 if ((req_cnt + 2) >= ha->req_q_cnt) {
3081 dprintk(2, "qla1280_32bit_start_scsi: in-ptr=0x%x, "
3082 "req_q_cnt=0x%x, req_cnt=0x%x", ha->req_ring_index,
3083 ha->req_q_cnt, req_cnt);
3087 /* Check for empty slot in outstanding command list. */
3088 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS &&
3089 (ha->outstanding_cmds[cnt] != 0); cnt++) ;
3091 if (cnt >= MAX_OUTSTANDING_COMMANDS) {
3093 dprintk(2, "qla1280_32bit_start_scsi: NO ROOM IN OUTSTANDING "
3094 "ARRAY, req_q_cnt=0x%x\n", ha->req_q_cnt);
3098 CMD_HANDLE(sp->cmd) = (unsigned char *) (unsigned long)(cnt + 1);
3099 ha->outstanding_cmds[cnt] = sp;
3100 ha->req_q_cnt -= req_cnt;
3103 * Build command packet.
3105 pkt = (struct cmd_entry *) ha->request_ring_ptr;
3107 pkt->entry_type = COMMAND_TYPE;
3108 pkt->entry_count = (uint8_t) req_cnt;
3109 pkt->sys_define = (uint8_t) ha->req_ring_index;
3110 pkt->entry_status = 0;
3111 pkt->handle = cpu_to_le32(cnt);
3113 /* Zero out remaining portion of packet. */
3114 memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
3116 /* Set ISP command timeout. */
3117 pkt->timeout = cpu_to_le16(cmd->timeout_per_command/HZ);
3119 /* Set device target ID and LUN */
3120 pkt->lun = SCSI_LUN_32(cmd);
3121 pkt->target = SCSI_BUS_32(cmd) ?
3122 (SCSI_TCN_32(cmd) | BIT_7) : SCSI_TCN_32(cmd);
3124 /* Enable simple tag queuing if device supports it. */
3125 if (cmd->device->simple_tags)
3126 pkt->control_flags |= cpu_to_le16(BIT_3);
3128 /* Load SCSI command packet. */
3129 pkt->cdb_len = cpu_to_le16(CMD_CDBLEN(cmd));
3130 memcpy(pkt->scsi_cdb, CMD_CDBP(cmd), CMD_CDBLEN(cmd));
3132 /*dprintk(1, "Build packet for command[0]=0x%x\n",pkt->scsi_cdb[0]); */
3133 /* Set transfer direction. */
3134 dir = qla1280_data_direction(cmd);
3135 pkt->control_flags |= cpu_to_le16(dir);
3137 /* Set total data segment count. */
3138 pkt->dseg_count = cpu_to_le16(seg_cnt);
3141 * Load data segments.
3144 struct scatterlist *sg, *s;
3145 int remseg = seg_cnt;
3147 sg = scsi_sglist(cmd);
3149 /* Setup packet address segment pointer. */
3150 dword_ptr = &pkt->dseg_0_address;
3152 dprintk(3, "Building S/G data segments..\n");
3153 qla1280_dump_buffer(1, (char *)sg, 4 * 16);
3155 /* Load command entry data segments. */
3156 for_each_sg(sg, s, seg_cnt, cnt) {
3160 cpu_to_le32(pci_dma_lo32(sg_dma_address(s)));
3161 *dword_ptr++ = cpu_to_le32(sg_dma_len(s));
3162 dprintk(3, "S/G Segment phys_addr=0x%lx, len=0x%x\n",
3163 (pci_dma_lo32(sg_dma_address(s))),
3168 * Build continuation packets.
3170 dprintk(3, "S/G Building Continuation"
3171 "...seg_cnt=0x%x remains\n", seg_cnt);
3172 while (remseg > 0) {
3173 /* Continue from end point */
3175 /* Adjust ring index. */
3176 ha->req_ring_index++;
3177 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
3178 ha->req_ring_index = 0;
3179 ha->request_ring_ptr =
3182 ha->request_ring_ptr++;
3184 pkt = (struct cmd_entry *)ha->request_ring_ptr;
3186 /* Zero out packet. */
3187 memset(pkt, 0, REQUEST_ENTRY_SIZE);
3189 /* Load packet defaults. */
3190 ((struct cont_entry *) pkt)->
3191 entry_type = CONTINUE_TYPE;
3192 ((struct cont_entry *) pkt)->entry_count = 1;
3194 ((struct cont_entry *) pkt)->sys_define =
3195 (uint8_t) ha->req_ring_index;
3197 /* Setup packet address segment pointer. */
3199 &((struct cont_entry *) pkt)->dseg_0_address;
3201 /* Load continuation entry data segments. */
3202 for_each_sg(sg, s, remseg, cnt) {
3206 cpu_to_le32(pci_dma_lo32(sg_dma_address(s)));
3208 cpu_to_le32(sg_dma_len(s));
3210 "S/G Segment Cont. phys_addr=0x%x, "
3212 cpu_to_le32(pci_dma_lo32(sg_dma_address(s))),
3213 cpu_to_le32(sg_dma_len(s)));
3216 dprintk(5, "qla1280_32bit_start_scsi: "
3217 "continuation packet data - "
3218 "scsi(%i:%i:%i)\n", SCSI_BUS_32(cmd),
3219 SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
3220 qla1280_dump_buffer(5, (char *)pkt,
3221 REQUEST_ENTRY_SIZE);
3223 } else { /* No data transfer at all */
3224 dprintk(5, "qla1280_32bit_start_scsi: No data, command "
3225 "packet data - \n");
3226 qla1280_dump_buffer(5, (char *)pkt, REQUEST_ENTRY_SIZE);
3228 dprintk(5, "qla1280_32bit_start_scsi: First IOCB block:\n");
3229 qla1280_dump_buffer(5, (char *)ha->request_ring_ptr,
3230 REQUEST_ENTRY_SIZE);
3232 /* Adjust ring index. */
3233 ha->req_ring_index++;
3234 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
3235 ha->req_ring_index = 0;
3236 ha->request_ring_ptr = ha->request_ring;
3238 ha->request_ring_ptr++;
3240 /* Set chip new ring index. */
3241 dprintk(2, "qla1280_32bit_start_scsi: Wakeup RISC "
3242 "for pending command\n");
3243 sp->flags |= SRB_SENT;
3245 WRT_REG_WORD(®->mailbox4, ha->req_ring_index);
3246 /* Enforce mmio write ordering; see comment in qla1280_isp_cmd(). */
3251 dprintk(2, "qla1280_32bit_start_scsi: **** FAILED ****\n");
3253 LEAVE("qla1280_32bit_start_scsi");
3261 * Function is responsible for locking ring and
3262 * getting a zeroed out request packet.
3265 * ha = adapter block pointer.
3268 * 0 = failed to get slot.
3271 qla1280_req_pkt(struct scsi_qla_host *ha)
3273 struct device_reg __iomem *reg = ha->iobase;
3274 request_t *pkt = NULL;
3278 ENTER("qla1280_req_pkt");
3281 * This can be called from interrupt context, damn it!!!
3283 /* Wait for 30 seconds for slot. */
3284 for (timer = 15000000; timer; timer--) {
3285 if (ha->req_q_cnt > 0) {
3286 /* Calculate number of free request entries. */
3287 cnt = RD_REG_WORD(®->mailbox4);
3288 if (ha->req_ring_index < cnt)
3289 ha->req_q_cnt = cnt - ha->req_ring_index;
3292 REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt);
3295 /* Found empty request ring slot? */
3296 if (ha->req_q_cnt > 0) {
3298 pkt = ha->request_ring_ptr;
3300 /* Zero out packet. */
3301 memset(pkt, 0, REQUEST_ENTRY_SIZE);
3304 * How can this be right when we have a ring
3307 /* Set system defined field. */
3308 pkt->sys_define = (uint8_t) ha->req_ring_index;
3310 /* Set entry count. */
3311 pkt->entry_count = 1;
3318 /* Check for pending interrupts. */
3323 dprintk(2, "qla1280_req_pkt: **** FAILED ****\n");
3325 dprintk(3, "qla1280_req_pkt: exiting normally\n");
3332 * Function is responsible for modifying ISP input pointer.
3333 * Releases ring lock.
3336 * ha = adapter block pointer.
3339 qla1280_isp_cmd(struct scsi_qla_host *ha)
3341 struct device_reg __iomem *reg = ha->iobase;
3343 ENTER("qla1280_isp_cmd");
3345 dprintk(5, "qla1280_isp_cmd: IOCB data:\n");
3346 qla1280_dump_buffer(5, (char *)ha->request_ring_ptr,
3347 REQUEST_ENTRY_SIZE);
3349 /* Adjust ring index. */
3350 ha->req_ring_index++;
3351 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
3352 ha->req_ring_index = 0;
3353 ha->request_ring_ptr = ha->request_ring;
3355 ha->request_ring_ptr++;
3358 * Update request index to mailbox4 (Request Queue In).
3359 * The mmiowb() ensures that this write is ordered with writes by other
3360 * CPUs. Without the mmiowb(), it is possible for the following:
3361 * CPUA posts write of index 5 to mailbox4
3362 * CPUA releases host lock
3363 * CPUB acquires host lock
3364 * CPUB posts write of index 6 to mailbox4
3365 * On PCI bus, order reverses and write of 6 posts, then index 5,
3366 * causing chip to issue full queue of stale commands
3367 * The mmiowb() prevents future writes from crossing the barrier.
3368 * See Documentation/DocBook/deviceiobook.tmpl for more information.
3370 WRT_REG_WORD(®->mailbox4, ha->req_ring_index);
3373 LEAVE("qla1280_isp_cmd");
3376 /****************************************************************************/
3377 /* Interrupt Service Routine. */
3378 /****************************************************************************/
3380 /****************************************************************************
3382 * Calls I/O done on command completion.
3385 * ha = adapter block pointer.
3386 * done_q = done queue.
3387 ****************************************************************************/
3389 qla1280_isr(struct scsi_qla_host *ha, struct list_head *done_q)
3391 struct device_reg __iomem *reg = ha->iobase;
3392 struct response *pkt;
3393 struct srb *sp = NULL;
3394 uint16_t mailbox[MAILBOX_REGISTER_COUNT];
3399 ENTER("qla1280_isr");
3401 istatus = RD_REG_WORD(®->istatus);
3402 if (!(istatus & (RISC_INT | PCI_INT)))
3405 /* Save mailbox register 5 */
3406 mailbox[5] = RD_REG_WORD(®->mailbox5);
3408 /* Check for mailbox interrupt. */
3410 mailbox[0] = RD_REG_WORD_dmasync(®->semaphore);
3412 if (mailbox[0] & BIT_0) {
3413 /* Get mailbox data. */
3414 /* dprintk(1, "qla1280_isr: In Get mailbox data \n"); */
3417 *wptr++ = RD_REG_WORD(®->mailbox0);
3418 *wptr++ = RD_REG_WORD(®->mailbox1);
3419 *wptr = RD_REG_WORD(®->mailbox2);
3420 if (mailbox[0] != MBA_SCSI_COMPLETION) {
3422 *wptr++ = RD_REG_WORD(®->mailbox3);
3423 *wptr++ = RD_REG_WORD(®->mailbox4);
3425 *wptr++ = RD_REG_WORD(®->mailbox6);
3426 *wptr = RD_REG_WORD(®->mailbox7);
3429 /* Release mailbox registers. */
3431 WRT_REG_WORD(®->semaphore, 0);
3432 WRT_REG_WORD(®->host_cmd, HC_CLR_RISC_INT);
3434 dprintk(5, "qla1280_isr: mailbox interrupt mailbox[0] = 0x%x",
3437 /* Handle asynchronous event */
3438 switch (mailbox[0]) {
3439 case MBA_SCSI_COMPLETION: /* Response completion */
3440 dprintk(5, "qla1280_isr: mailbox SCSI response "
3443 if (ha->flags.online) {
3444 /* Get outstanding command index. */
3445 index = mailbox[2] << 16 | mailbox[1];
3447 /* Validate handle. */
3448 if (index < MAX_OUTSTANDING_COMMANDS)
3449 sp = ha->outstanding_cmds[index];
3454 /* Free outstanding command slot. */
3455 ha->outstanding_cmds[index] = NULL;
3457 /* Save ISP completion status */
3458 CMD_RESULT(sp->cmd) = 0;
3460 /* Place block on done queue */
3461 list_add_tail(&sp->list, done_q);
3464 * If we get here we have a real problem!
3467 "qla1280: ISP invalid handle");
3472 case MBA_BUS_RESET: /* SCSI Bus Reset */
3473 ha->flags.reset_marker = 1;
3474 index = mailbox[6] & BIT_0;
3475 ha->bus_settings[index].reset_marker = 1;
3477 printk(KERN_DEBUG "qla1280_isr(): index %i "
3478 "asynchronous BUS_RESET\n", index);
3481 case MBA_SYSTEM_ERR: /* System Error */
3483 "qla1280: ISP System Error - mbx1=%xh, mbx2="
3484 "%xh, mbx3=%xh\n", mailbox[1], mailbox[2],
3488 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
3490 "qla1280: ISP Request Transfer Error\n");
3493 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
3495 "qla1280: ISP Response Transfer Error\n");
3498 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
3499 dprintk(2, "qla1280_isr: asynchronous WAKEUP_THRES\n");
3502 case MBA_TIMEOUT_RESET: /* Execution Timeout Reset */
3504 "qla1280_isr: asynchronous TIMEOUT_RESET\n");
3507 case MBA_DEVICE_RESET: /* Bus Device Reset */
3508 printk(KERN_INFO "qla1280_isr(): asynchronous "
3509 "BUS_DEVICE_RESET\n");
3511 ha->flags.reset_marker = 1;
3512 index = mailbox[6] & BIT_0;
3513 ha->bus_settings[index].reset_marker = 1;
3516 case MBA_BUS_MODE_CHANGE:
3518 "qla1280_isr: asynchronous BUS_MODE_CHANGE\n");
3522 /* dprintk(1, "qla1280_isr: default case of switch MB \n"); */
3523 if (mailbox[0] < MBA_ASYNC_EVENT) {
3525 memcpy((uint16_t *) ha->mailbox_out, wptr,
3526 MAILBOX_REGISTER_COUNT *
3529 if(ha->mailbox_wait != NULL)
3530 complete(ha->mailbox_wait);
3535 WRT_REG_WORD(®->host_cmd, HC_CLR_RISC_INT);
3539 * We will receive interrupts during mailbox testing prior to
3540 * the card being marked online, hence the double check.
3542 if (!(ha->flags.online && !ha->mailbox_wait)) {
3543 dprintk(2, "qla1280_isr: Response pointer Error\n");
3547 if (mailbox[5] >= RESPONSE_ENTRY_CNT)
3550 while (ha->rsp_ring_index != mailbox[5]) {
3551 pkt = ha->response_ring_ptr;
3553 dprintk(5, "qla1280_isr: ha->rsp_ring_index = 0x%x, mailbox[5]"
3554 " = 0x%x\n", ha->rsp_ring_index, mailbox[5]);
3555 dprintk(5,"qla1280_isr: response packet data\n");
3556 qla1280_dump_buffer(5, (char *)pkt, RESPONSE_ENTRY_SIZE);
3558 if (pkt->entry_type == STATUS_TYPE) {
3559 if ((le16_to_cpu(pkt->scsi_status) & 0xff)
3560 || pkt->comp_status || pkt->entry_status) {
3561 dprintk(2, "qla1280_isr: ha->rsp_ring_index = "
3562 "0x%x mailbox[5] = 0x%x, comp_status "
3563 "= 0x%x, scsi_status = 0x%x\n",
3564 ha->rsp_ring_index, mailbox[5],
3565 le16_to_cpu(pkt->comp_status),
3566 le16_to_cpu(pkt->scsi_status));
3569 dprintk(2, "qla1280_isr: ha->rsp_ring_index = "
3570 "0x%x, mailbox[5] = 0x%x\n",
3571 ha->rsp_ring_index, mailbox[5]);
3572 dprintk(2, "qla1280_isr: response packet data\n");
3573 qla1280_dump_buffer(2, (char *)pkt,
3574 RESPONSE_ENTRY_SIZE);
3577 if (pkt->entry_type == STATUS_TYPE || pkt->entry_status) {
3578 dprintk(2, "status: Cmd %p, handle %i\n",
3579 ha->outstanding_cmds[pkt->handle]->cmd,
3581 if (pkt->entry_type == STATUS_TYPE)
3582 qla1280_status_entry(ha, pkt, done_q);
3584 qla1280_error_entry(ha, pkt, done_q);
3585 /* Adjust ring index. */
3586 ha->rsp_ring_index++;
3587 if (ha->rsp_ring_index == RESPONSE_ENTRY_CNT) {
3588 ha->rsp_ring_index = 0;
3589 ha->response_ring_ptr = ha->response_ring;
3591 ha->response_ring_ptr++;
3592 WRT_REG_WORD(®->mailbox5, ha->rsp_ring_index);
3597 LEAVE("qla1280_isr");
3602 * Processes asynchronous reset.
3605 * ha = adapter block pointer.
3608 qla1280_rst_aen(struct scsi_qla_host *ha)
3612 ENTER("qla1280_rst_aen");
3614 if (ha->flags.online && !ha->flags.reset_active &&
3615 !ha->flags.abort_isp_active) {
3616 ha->flags.reset_active = 1;
3617 while (ha->flags.reset_marker) {
3618 /* Issue marker command. */
3619 ha->flags.reset_marker = 0;
3620 for (bus = 0; bus < ha->ports &&
3621 !ha->flags.reset_marker; bus++) {
3622 if (ha->bus_settings[bus].reset_marker) {
3623 ha->bus_settings[bus].reset_marker = 0;
3624 qla1280_marker(ha, bus, 0, 0,
3631 LEAVE("qla1280_rst_aen");
3636 * qla1280_status_entry
3637 * Processes received ISP status entry.
3640 * ha = adapter block pointer.
3641 * pkt = entry pointer.
3642 * done_q = done queue.
3645 qla1280_status_entry(struct scsi_qla_host *ha, struct response *pkt,
3646 struct list_head *done_q)
3648 unsigned int bus, target, lun;
3651 struct scsi_cmnd *cmd;
3652 uint32_t handle = le32_to_cpu(pkt->handle);
3653 uint16_t scsi_status = le16_to_cpu(pkt->scsi_status);
3654 uint16_t comp_status = le16_to_cpu(pkt->comp_status);
3656 ENTER("qla1280_status_entry");
3658 /* Validate handle. */
3659 if (handle < MAX_OUTSTANDING_COMMANDS)
3660 sp = ha->outstanding_cmds[handle];
3665 printk(KERN_WARNING "qla1280: Status Entry invalid handle\n");
3669 /* Free outstanding command slot. */
3670 ha->outstanding_cmds[handle] = NULL;
3674 /* Generate LU queue on cntrl, target, LUN */
3675 bus = SCSI_BUS_32(cmd);
3676 target = SCSI_TCN_32(cmd);
3677 lun = SCSI_LUN_32(cmd);
3679 if (comp_status || scsi_status) {
3680 dprintk(3, "scsi: comp_status = 0x%x, scsi_status = "
3681 "0x%x, handle = 0x%x\n", comp_status,
3682 scsi_status, handle);
3685 /* Target busy or queue full */
3686 if ((scsi_status & 0xFF) == SAM_STAT_TASK_SET_FULL ||
3687 (scsi_status & 0xFF) == SAM_STAT_BUSY) {
3688 CMD_RESULT(cmd) = scsi_status & 0xff;
3691 /* Save ISP completion status */
3692 CMD_RESULT(cmd) = qla1280_return_status(pkt, cmd);
3694 if (scsi_status & SAM_STAT_CHECK_CONDITION) {
3695 if (comp_status != CS_ARS_FAILED) {
3696 uint16_t req_sense_length =
3697 le16_to_cpu(pkt->req_sense_length);
3698 if (req_sense_length < CMD_SNSLEN(cmd))
3699 sense_sz = req_sense_length;
3702 * scsi_cmnd->sense_buffer is
3703 * 64 bytes, why only copy 63?
3704 * This looks wrong! /Jes
3706 sense_sz = CMD_SNSLEN(cmd) - 1;
3708 memcpy(cmd->sense_buffer,
3709 &pkt->req_sense_data, sense_sz);
3712 memset(cmd->sense_buffer + sense_sz, 0,
3713 SCSI_SENSE_BUFFERSIZE - sense_sz);
3715 dprintk(2, "qla1280_status_entry: Check "
3716 "condition Sense data, b %i, t %i, "
3717 "l %i\n", bus, target, lun);
3719 qla1280_dump_buffer(2,
3720 (char *)cmd->sense_buffer,
3725 /* Place command on done queue. */
3726 list_add_tail(&sp->list, done_q);
3728 LEAVE("qla1280_status_entry");
3732 * qla1280_error_entry
3733 * Processes error entry.
3736 * ha = adapter block pointer.
3737 * pkt = entry pointer.
3738 * done_q = done queue.
3741 qla1280_error_entry(struct scsi_qla_host *ha, struct response *pkt,
3742 struct list_head *done_q)
3745 uint32_t handle = le32_to_cpu(pkt->handle);
3747 ENTER("qla1280_error_entry");
3749 if (pkt->entry_status & BIT_3)
3750 dprintk(2, "qla1280_error_entry: BAD PAYLOAD flag error\n");
3751 else if (pkt->entry_status & BIT_2)
3752 dprintk(2, "qla1280_error_entry: BAD HEADER flag error\n");
3753 else if (pkt->entry_status & BIT_1)
3754 dprintk(2, "qla1280_error_entry: FULL flag error\n");
3756 dprintk(2, "qla1280_error_entry: UNKNOWN flag error\n");
3758 /* Validate handle. */
3759 if (handle < MAX_OUTSTANDING_COMMANDS)
3760 sp = ha->outstanding_cmds[handle];
3765 /* Free outstanding command slot. */
3766 ha->outstanding_cmds[handle] = NULL;
3768 /* Bad payload or header */
3769 if (pkt->entry_status & (BIT_3 + BIT_2)) {
3770 /* Bad payload or header, set error status. */
3771 /* CMD_RESULT(sp->cmd) = CS_BAD_PAYLOAD; */
3772 CMD_RESULT(sp->cmd) = DID_ERROR << 16;
3773 } else if (pkt->entry_status & BIT_1) { /* FULL flag */
3774 CMD_RESULT(sp->cmd) = DID_BUS_BUSY << 16;
3776 /* Set error status. */
3777 CMD_RESULT(sp->cmd) = DID_ERROR << 16;
3780 /* Place command on done queue. */
3781 list_add_tail(&sp->list, done_q);
3783 #ifdef QLA_64BIT_PTR
3784 else if (pkt->entry_type == COMMAND_A64_TYPE) {
3785 printk(KERN_WARNING "!qla1280: Error Entry invalid handle");
3789 LEAVE("qla1280_error_entry");
3794 * Resets ISP and aborts all outstanding commands.
3797 * ha = adapter block pointer.
3803 qla1280_abort_isp(struct scsi_qla_host *ha)
3805 struct device_reg __iomem *reg = ha->iobase;
3811 ENTER("qla1280_abort_isp");
3813 if (ha->flags.abort_isp_active || !ha->flags.online)
3816 ha->flags.abort_isp_active = 1;
3818 /* Disable ISP interrupts. */
3819 qla1280_disable_intrs(ha);
3820 WRT_REG_WORD(®->host_cmd, HC_PAUSE_RISC);
3821 RD_REG_WORD(®->id_l);
3823 printk(KERN_INFO "scsi(%li): dequeuing outstanding commands\n",
3825 /* Dequeue all commands in outstanding command list. */
3826 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
3827 struct scsi_cmnd *cmd;
3828 sp = ha->outstanding_cmds[cnt];
3832 CMD_RESULT(cmd) = DID_RESET << 16;
3835 ha->outstanding_cmds[cnt] = NULL;
3837 (*cmd->scsi_done)(cmd);
3843 status = qla1280_load_firmware(ha);
3847 /* Setup adapter based on NVRAM parameters. */
3848 qla1280_nvram_config (ha);
3850 status = qla1280_init_rings(ha);
3854 /* Issue SCSI reset. */
3855 for (bus = 0; bus < ha->ports; bus++)
3856 qla1280_bus_reset(ha, bus);
3858 ha->flags.abort_isp_active = 0;
3862 "qla1280: ISP error recovery failed, board disabled");
3863 qla1280_reset_adapter(ha);
3864 dprintk(2, "qla1280_abort_isp: **** FAILED ****\n");
3867 LEAVE("qla1280_abort_isp");
3873 * qla1280_debounce_register
3874 * Debounce register.
3877 * port = register address.
3883 qla1280_debounce_register(volatile u16 __iomem * addr)
3888 ret = RD_REG_WORD(addr);
3889 ret2 = RD_REG_WORD(addr);
3896 ret = RD_REG_WORD(addr);
3897 ret2 = RD_REG_WORD(addr);
3898 } while (ret != ret2);
3904 /************************************************************************
3905 * qla1280_check_for_dead_scsi_bus *
3907 * This routine checks for a dead SCSI bus *
3908 ************************************************************************/
3909 #define SET_SXP_BANK 0x0100
3910 #define SCSI_PHASE_INVALID 0x87FF
3912 qla1280_check_for_dead_scsi_bus(struct scsi_qla_host *ha, unsigned int bus)
3914 uint16_t config_reg, scsi_control;
3915 struct device_reg __iomem *reg = ha->iobase;
3917 if (ha->bus_settings[bus].scsi_bus_dead) {
3918 WRT_REG_WORD(®->host_cmd, HC_PAUSE_RISC);
3919 config_reg = RD_REG_WORD(®->cfg_1);
3920 WRT_REG_WORD(®->cfg_1, SET_SXP_BANK);
3921 scsi_control = RD_REG_WORD(®->scsiControlPins);
3922 WRT_REG_WORD(®->cfg_1, config_reg);
3923 WRT_REG_WORD(®->host_cmd, HC_RELEASE_RISC);
3925 if (scsi_control == SCSI_PHASE_INVALID) {
3926 ha->bus_settings[bus].scsi_bus_dead = 1;
3928 CMD_RESULT(cp) = DID_NO_CONNECT << 16;
3929 CMD_HANDLE(cp) = INVALID_HANDLE;
3930 /* ha->actthreads--; */
3932 (*(cp)->scsi_done)(cp);
3934 return 1; /* bus is dead */
3936 ha->bus_settings[bus].scsi_bus_dead = 0;
3937 ha->bus_settings[bus].failed_reset_count = 0;
3940 return 0; /* bus is not dead */
3944 qla1280_get_target_parameters(struct scsi_qla_host *ha,
3945 struct scsi_device *device)
3947 uint16_t mb[MAILBOX_REGISTER_COUNT];
3948 int bus, target, lun;
3950 bus = device->channel;
3951 target = device->id;
3955 mb[0] = MBC_GET_TARGET_PARAMETERS;
3956 mb[1] = (uint16_t) (bus ? target | BIT_7 : target);
3958 qla1280_mailbox_command(ha, BIT_6 | BIT_3 | BIT_2 | BIT_1 | BIT_0,
3961 printk(KERN_INFO "scsi(%li:%d:%d:%d):", ha->host_no, bus, target, lun);
3964 printk(" Sync: period %d, offset %d",
3965 (mb[3] & 0xff), (mb[3] >> 8));
3968 if ((mb[2] & BIT_5) && ((mb[6] >> 8) & 0xff) >= 2)
3973 if (device->simple_tags)
3974 printk(", Tagged queuing: depth %d", device->queue_depth);
3981 __qla1280_dump_buffer(char *b, int size)
3986 printk(KERN_DEBUG " 0 1 2 3 4 5 6 7 8 9 Ah "
3987 "Bh Ch Dh Eh Fh\n");
3988 printk(KERN_DEBUG "---------------------------------------------"
3989 "------------------\n");
3991 for (cnt = 0; cnt < size;) {
3994 printk("0x%02x", c);
4005 /**************************************************************************
4006 * ql1280_print_scsi_cmd
4008 **************************************************************************/
4010 __qla1280_print_scsi_cmd(struct scsi_cmnd *cmd)
4012 struct scsi_qla_host *ha;
4013 struct Scsi_Host *host = CMD_HOST(cmd);
4015 /* struct scatterlist *sg; */
4018 ha = (struct scsi_qla_host *)host->hostdata;
4020 sp = (struct srb *)CMD_SP(cmd);
4021 printk("SCSI Command @= 0x%p, Handle=0x%p\n", cmd, CMD_HANDLE(cmd));
4022 printk(" chan=%d, target = 0x%02x, lun = 0x%02x, cmd_len = 0x%02x\n",
4023 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd),
4026 for (i = 0; i < cmd->cmd_len; i++) {
4027 printk("0x%02x ", cmd->cmnd[i]);
4029 printk(" seg_cnt =%d\n", scsi_sg_count(cmd));
4030 printk(" request buffer=0x%p, request buffer len=0x%x\n",
4031 scsi_sglist(cmd), scsi_bufflen(cmd));
4034 sg = (struct scatterlist *) cmd->request_buffer;
4035 printk(" SG buffer: \n");
4036 qla1280_dump_buffer(1, (char *)sg, (cmd->use_sg*sizeof(struct scatterlist)));
4038 printk(" tag=%d, transfersize=0x%x \n",
4039 cmd->tag, cmd->transfersize);
4040 printk(" Pid=%li, SP=0x%p\n", cmd->serial_number, CMD_SP(cmd));
4041 printk(" underflow size = 0x%x, direction=0x%x\n",
4042 cmd->underflow, cmd->sc_data_direction);
4045 /**************************************************************************
4046 * ql1280_dump_device
4048 **************************************************************************/
4050 ql1280_dump_device(struct scsi_qla_host *ha)
4053 struct scsi_cmnd *cp;
4057 printk(KERN_DEBUG "Outstanding Commands on controller:\n");
4059 for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) {
4060 if ((sp = ha->outstanding_cmds[i]) == NULL)
4062 if ((cp = sp->cmd) == NULL)
4064 qla1280_print_scsi_cmd(1, cp);
4079 struct setup_tokens {
4084 static struct setup_tokens setup_token[] __initdata =
4086 { "nvram", TOKEN_NVRAM },
4087 { "sync", TOKEN_SYNC },
4088 { "wide", TOKEN_WIDE },
4089 { "ppr", TOKEN_PPR },
4090 { "verbose", TOKEN_VERBOSE },
4091 { "debug", TOKEN_DEBUG },
4095 /**************************************************************************
4098 * Handle boot parameters. This really needs to be changed so one
4099 * can specify per adapter parameters.
4100 **************************************************************************/
4102 qla1280_setup(char *s)
4110 while (cp && (ptr = strchr(cp, ':'))) {
4112 if (!strcmp(ptr, "yes")) {
4115 } else if (!strcmp(ptr, "no")) {
4119 val = simple_strtoul(ptr, &ptr, 0);
4121 switch ((toke = qla1280_get_token(cp))) {
4124 driver_setup.no_nvram = 1;
4128 driver_setup.no_sync = 1;
4129 else if (val != 0x10000)
4130 driver_setup.sync_mask = val;
4134 driver_setup.no_wide = 1;
4135 else if (val != 0x10000)
4136 driver_setup.wide_mask = val;
4140 driver_setup.no_ppr = 1;
4141 else if (val != 0x10000)
4142 driver_setup.ppr_mask = val;
4145 qla1280_verbose = val;
4148 printk(KERN_INFO "qla1280: unknown boot option %s\n",
4152 cp = strchr(ptr, ';');
4164 qla1280_get_token(char *str)
4170 sep = strchr(str, ':');
4173 for (i = 0; i < ARRAY_SIZE(setup_token); i++) {
4174 if (!strncmp(setup_token[i].token, str, (sep - str))) {
4175 ret = setup_token[i].val;
4185 static struct scsi_host_template qla1280_driver_template = {
4186 .module = THIS_MODULE,
4187 .proc_name = "qla1280",
4188 .name = "Qlogic ISP 1280/12160",
4189 .info = qla1280_info,
4190 .slave_configure = qla1280_slave_configure,
4191 .queuecommand = qla1280_queuecommand,
4192 .eh_abort_handler = qla1280_eh_abort,
4193 .eh_device_reset_handler= qla1280_eh_device_reset,
4194 .eh_bus_reset_handler = qla1280_eh_bus_reset,
4195 .eh_host_reset_handler = qla1280_eh_adapter_reset,
4196 .bios_param = qla1280_biosparam,
4197 .can_queue = 0xfffff,
4199 .sg_tablesize = SG_ALL,
4201 .use_clustering = ENABLE_CLUSTERING,
4205 static int __devinit
4206 qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4208 int devnum = id->driver_data;
4209 struct qla_boards *bdp = &ql1280_board_tbl[devnum];
4210 struct Scsi_Host *host;
4211 struct scsi_qla_host *ha;
4212 int error = -ENODEV;
4214 /* Bypass all AMI SUBSYS VENDOR IDs */
4215 if (pdev->subsystem_vendor == PCI_VENDOR_ID_AMI) {
4217 "qla1280: Skipping AMI SubSys Vendor ID Chip\n");
4221 printk(KERN_INFO "qla1280: %s found on PCI bus %i, dev %i\n",
4222 bdp->name, pdev->bus->number, PCI_SLOT(pdev->devfn));
4224 if (pci_enable_device(pdev)) {
4226 "qla1280: Failed to enabled pci device, aborting.\n");
4230 pci_set_master(pdev);
4233 host = scsi_host_alloc(&qla1280_driver_template, sizeof(*ha));
4236 "qla1280: Failed to register host, aborting.\n");
4237 goto error_disable_device;
4240 ha = (struct scsi_qla_host *)host->hostdata;
4241 memset(ha, 0, sizeof(struct scsi_qla_host));
4244 ha->devnum = devnum; /* specifies microcode load address */
4246 #ifdef QLA_64BIT_PTR
4247 if (pci_set_dma_mask(ha->pdev, DMA_64BIT_MASK)) {
4248 if (pci_set_dma_mask(ha->pdev, DMA_32BIT_MASK)) {
4249 printk(KERN_WARNING "scsi(%li): Unable to set a "
4250 "suitable DMA mask - aborting\n", ha->host_no);
4252 goto error_put_host;
4255 dprintk(2, "scsi(%li): 64 Bit PCI Addressing Enabled\n",
4258 if (pci_set_dma_mask(ha->pdev, DMA_32BIT_MASK)) {
4259 printk(KERN_WARNING "scsi(%li): Unable to set a "
4260 "suitable DMA mask - aborting\n", ha->host_no);
4262 goto error_put_host;
4266 ha->request_ring = pci_alloc_consistent(ha->pdev,
4267 ((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)),
4269 if (!ha->request_ring) {
4270 printk(KERN_INFO "qla1280: Failed to get request memory\n");
4271 goto error_put_host;
4274 ha->response_ring = pci_alloc_consistent(ha->pdev,
4275 ((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)),
4277 if (!ha->response_ring) {
4278 printk(KERN_INFO "qla1280: Failed to get response memory\n");
4279 goto error_free_request_ring;
4282 ha->ports = bdp->numPorts;
4285 ha->host_no = host->host_no;
4287 host->irq = pdev->irq;
4288 host->max_channel = bdp->numPorts - 1;
4289 host->max_lun = MAX_LUNS - 1;
4290 host->max_id = MAX_TARGETS;
4291 host->max_sectors = 1024;
4292 host->unique_id = host->host_no;
4296 #if MEMORY_MAPPED_IO
4297 ha->mmpbase = ioremap(pci_resource_start(ha->pdev, 1),
4298 pci_resource_len(ha->pdev, 1));
4300 printk(KERN_INFO "qla1280: Unable to map I/O memory\n");
4301 goto error_free_response_ring;
4304 host->base = (unsigned long)ha->mmpbase;
4305 ha->iobase = (struct device_reg __iomem *)ha->mmpbase;
4307 host->io_port = pci_resource_start(ha->pdev, 0);
4308 if (!request_region(host->io_port, 0xff, "qla1280")) {
4309 printk(KERN_INFO "qla1280: Failed to reserve i/o region "
4310 "0x%04lx-0x%04lx - already in use\n",
4311 host->io_port, host->io_port + 0xff);
4312 goto error_free_response_ring;
4315 ha->iobase = (struct device_reg *)host->io_port;
4318 INIT_LIST_HEAD(&ha->done_q);
4320 /* Disable ISP interrupts. */
4321 qla1280_disable_intrs(ha);
4323 if (request_irq(pdev->irq, qla1280_intr_handler, IRQF_SHARED,
4325 printk("qla1280 : Failed to reserve interrupt %d already "
4326 "in use\n", pdev->irq);
4327 goto error_release_region;
4330 /* load the F/W, read paramaters, and init the H/W */
4331 if (qla1280_initialize_adapter(ha)) {
4332 printk(KERN_INFO "qla1x160: Failed to initialize adapter\n");
4333 goto error_free_irq;
4336 /* set our host ID (need to do something about our two IDs) */
4337 host->this_id = ha->bus_settings[0].id;
4339 pci_set_drvdata(pdev, host);
4341 error = scsi_add_host(host, &pdev->dev);
4343 goto error_disable_adapter;
4344 scsi_scan_host(host);
4348 error_disable_adapter:
4349 qla1280_disable_intrs(ha);
4351 free_irq(pdev->irq, ha);
4352 error_release_region:
4353 #if MEMORY_MAPPED_IO
4354 iounmap(ha->mmpbase);
4356 release_region(host->io_port, 0xff);
4358 error_free_response_ring:
4359 pci_free_consistent(ha->pdev,
4360 ((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)),
4361 ha->response_ring, ha->response_dma);
4362 error_free_request_ring:
4363 pci_free_consistent(ha->pdev,
4364 ((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)),
4365 ha->request_ring, ha->request_dma);
4367 scsi_host_put(host);
4368 error_disable_device:
4369 pci_disable_device(pdev);
4375 static void __devexit
4376 qla1280_remove_one(struct pci_dev *pdev)
4378 struct Scsi_Host *host = pci_get_drvdata(pdev);
4379 struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata;
4381 scsi_remove_host(host);
4383 qla1280_disable_intrs(ha);
4385 free_irq(pdev->irq, ha);
4387 #if MEMORY_MAPPED_IO
4388 iounmap(ha->mmpbase);
4390 release_region(host->io_port, 0xff);
4393 pci_free_consistent(ha->pdev,
4394 ((REQUEST_ENTRY_CNT + 1) * (sizeof(request_t))),
4395 ha->request_ring, ha->request_dma);
4396 pci_free_consistent(ha->pdev,
4397 ((RESPONSE_ENTRY_CNT + 1) * (sizeof(struct response))),
4398 ha->response_ring, ha->response_dma);
4400 pci_disable_device(pdev);
4402 scsi_host_put(host);
4405 static struct pci_driver qla1280_pci_driver = {
4407 .id_table = qla1280_pci_tbl,
4408 .probe = qla1280_probe_one,
4409 .remove = __devexit_p(qla1280_remove_one),
4415 if (sizeof(struct srb) > sizeof(struct scsi_pointer)) {
4417 "qla1280: struct srb too big, aborting\n");
4423 * If we are called as a module, the qla1280 pointer may not be null
4424 * and it would point to our bootup string, just like on the lilo
4425 * command line. IF not NULL, then process this config string with
4429 * To add options at boot time add a line to your lilo.conf file like:
4430 * append="qla1280=verbose,max_tags:{{255,255,255,255},{255,255,255,255}}"
4431 * which will result in the first four devices on the first two
4432 * controllers being set to a tagged queue depth of 32.
4435 qla1280_setup(qla1280);
4438 return pci_register_driver(&qla1280_pci_driver);
4444 pci_unregister_driver(&qla1280_pci_driver);
4447 module_init(qla1280_init);
4448 module_exit(qla1280_exit);
4451 MODULE_AUTHOR("Qlogic & Jes Sorensen");
4452 MODULE_DESCRIPTION("Qlogic ISP SCSI (qla1x80/qla1x160) driver");
4453 MODULE_LICENSE("GPL");
4454 MODULE_VERSION(QLA1280_VERSION);
4457 * Overrides for Emacs so that we almost follow Linus's tabbing style.
4458 * Emacs will notice this stuff at the end of the file and automatically
4459 * adjust the settings for this buffer only. This must remain at the end
4461 * ---------------------------------------------------------------------------