1 /* qlogicpti.c: Performance Technologies QlogicISP sbus card driver.
3 * Copyright (C) 1996, 2006, 2008 David S. Miller (davem@davemloft.net)
5 * A lot of this driver was directly stolen from Erik H. Moe's PCI
6 * Qlogic ISP driver. Mucho kudos to him for this code.
8 * An even bigger kudos to John Grana at Performance Technologies
9 * for providing me with the hardware to write this driver, you rule
12 * May, 2, 1997: Added support for QLGC,isp --jj
15 #include <linux/kernel.h>
16 #include <linux/delay.h>
17 #include <linux/types.h>
18 #include <linux/string.h>
19 #include <linux/slab.h>
20 #include <linux/blkdev.h>
21 #include <linux/proc_fs.h>
22 #include <linux/stat.h>
23 #include <linux/init.h>
24 #include <linux/spinlock.h>
25 #include <linux/interrupt.h>
26 #include <linux/module.h>
27 #include <linux/jiffies.h>
28 #include <linux/dma-mapping.h>
30 #include <linux/of_device.h>
32 #include <asm/byteorder.h>
34 #include "qlogicpti.h"
37 #include <asm/system.h>
38 #include <asm/ptrace.h>
39 #include <asm/pgtable.h>
40 #include <asm/oplib.h>
44 #include <scsi/scsi.h>
45 #include <scsi/scsi_cmnd.h>
46 #include <scsi/scsi_device.h>
47 #include <scsi/scsi_eh.h>
48 #include <scsi/scsi_tcq.h>
49 #include <scsi/scsi_host.h>
51 #define MAX_TARGETS 16
52 #define MAX_LUNS 8 /* 32 for 1.31 F/W */
54 #define DEFAULT_LOOP_COUNT 10000
56 #include "qlogicpti_asm.c"
58 static struct qlogicpti *qptichain = NULL;
59 static DEFINE_SPINLOCK(qptichain_lock);
61 #define PACKB(a, b) (((a)<<4)|(b))
63 static const u_char mbox_param[] = {
64 PACKB(1, 1), /* MBOX_NO_OP */
65 PACKB(5, 5), /* MBOX_LOAD_RAM */
66 PACKB(2, 0), /* MBOX_EXEC_FIRMWARE */
67 PACKB(5, 5), /* MBOX_DUMP_RAM */
68 PACKB(3, 3), /* MBOX_WRITE_RAM_WORD */
69 PACKB(2, 3), /* MBOX_READ_RAM_WORD */
70 PACKB(6, 6), /* MBOX_MAILBOX_REG_TEST */
71 PACKB(2, 3), /* MBOX_VERIFY_CHECKSUM */
72 PACKB(1, 3), /* MBOX_ABOUT_FIRMWARE */
73 PACKB(0, 0), /* 0x0009 */
74 PACKB(0, 0), /* 0x000a */
75 PACKB(0, 0), /* 0x000b */
76 PACKB(0, 0), /* 0x000c */
77 PACKB(0, 0), /* 0x000d */
78 PACKB(1, 2), /* MBOX_CHECK_FIRMWARE */
79 PACKB(0, 0), /* 0x000f */
80 PACKB(5, 5), /* MBOX_INIT_REQ_QUEUE */
81 PACKB(6, 6), /* MBOX_INIT_RES_QUEUE */
82 PACKB(4, 4), /* MBOX_EXECUTE_IOCB */
83 PACKB(2, 2), /* MBOX_WAKE_UP */
84 PACKB(1, 6), /* MBOX_STOP_FIRMWARE */
85 PACKB(4, 4), /* MBOX_ABORT */
86 PACKB(2, 2), /* MBOX_ABORT_DEVICE */
87 PACKB(3, 3), /* MBOX_ABORT_TARGET */
88 PACKB(2, 2), /* MBOX_BUS_RESET */
89 PACKB(2, 3), /* MBOX_STOP_QUEUE */
90 PACKB(2, 3), /* MBOX_START_QUEUE */
91 PACKB(2, 3), /* MBOX_SINGLE_STEP_QUEUE */
92 PACKB(2, 3), /* MBOX_ABORT_QUEUE */
93 PACKB(2, 4), /* MBOX_GET_DEV_QUEUE_STATUS */
94 PACKB(0, 0), /* 0x001e */
95 PACKB(1, 3), /* MBOX_GET_FIRMWARE_STATUS */
96 PACKB(1, 2), /* MBOX_GET_INIT_SCSI_ID */
97 PACKB(1, 2), /* MBOX_GET_SELECT_TIMEOUT */
98 PACKB(1, 3), /* MBOX_GET_RETRY_COUNT */
99 PACKB(1, 2), /* MBOX_GET_TAG_AGE_LIMIT */
100 PACKB(1, 2), /* MBOX_GET_CLOCK_RATE */
101 PACKB(1, 2), /* MBOX_GET_ACT_NEG_STATE */
102 PACKB(1, 2), /* MBOX_GET_ASYNC_DATA_SETUP_TIME */
103 PACKB(1, 3), /* MBOX_GET_SBUS_PARAMS */
104 PACKB(2, 4), /* MBOX_GET_TARGET_PARAMS */
105 PACKB(2, 4), /* MBOX_GET_DEV_QUEUE_PARAMS */
106 PACKB(0, 0), /* 0x002a */
107 PACKB(0, 0), /* 0x002b */
108 PACKB(0, 0), /* 0x002c */
109 PACKB(0, 0), /* 0x002d */
110 PACKB(0, 0), /* 0x002e */
111 PACKB(0, 0), /* 0x002f */
112 PACKB(2, 2), /* MBOX_SET_INIT_SCSI_ID */
113 PACKB(2, 2), /* MBOX_SET_SELECT_TIMEOUT */
114 PACKB(3, 3), /* MBOX_SET_RETRY_COUNT */
115 PACKB(2, 2), /* MBOX_SET_TAG_AGE_LIMIT */
116 PACKB(2, 2), /* MBOX_SET_CLOCK_RATE */
117 PACKB(2, 2), /* MBOX_SET_ACTIVE_NEG_STATE */
118 PACKB(2, 2), /* MBOX_SET_ASYNC_DATA_SETUP_TIME */
119 PACKB(3, 3), /* MBOX_SET_SBUS_CONTROL_PARAMS */
120 PACKB(4, 4), /* MBOX_SET_TARGET_PARAMS */
121 PACKB(4, 4), /* MBOX_SET_DEV_QUEUE_PARAMS */
122 PACKB(0, 0), /* 0x003a */
123 PACKB(0, 0), /* 0x003b */
124 PACKB(0, 0), /* 0x003c */
125 PACKB(0, 0), /* 0x003d */
126 PACKB(0, 0), /* 0x003e */
127 PACKB(0, 0), /* 0x003f */
128 PACKB(0, 0), /* 0x0040 */
129 PACKB(0, 0), /* 0x0041 */
130 PACKB(0, 0) /* 0x0042 */
133 #define MAX_MBOX_COMMAND ARRAY_SIZE(mbox_param)
135 /* queue length's _must_ be power of two: */
136 #define QUEUE_DEPTH(in, out, ql) ((in - out) & (ql))
137 #define REQ_QUEUE_DEPTH(in, out) QUEUE_DEPTH(in, out, \
138 QLOGICPTI_REQ_QUEUE_LEN)
139 #define RES_QUEUE_DEPTH(in, out) QUEUE_DEPTH(in, out, RES_QUEUE_LEN)
141 static inline void qlogicpti_enable_irqs(struct qlogicpti *qpti)
143 sbus_writew(SBUS_CTRL_ERIRQ | SBUS_CTRL_GENAB,
144 qpti->qregs + SBUS_CTRL);
147 static inline void qlogicpti_disable_irqs(struct qlogicpti *qpti)
149 sbus_writew(0, qpti->qregs + SBUS_CTRL);
152 static inline void set_sbus_cfg1(struct qlogicpti *qpti)
155 u8 bursts = qpti->bursts;
157 #if 0 /* It appears that at least PTI cards do not support
158 * 64-byte bursts and that setting the B64 bit actually
159 * is a nop and the chip ends up using the smallest burst
162 if (sbus_can_burst64() && (bursts & DMA_BURST64)) {
163 val = (SBUS_CFG1_BENAB | SBUS_CFG1_B64);
166 if (bursts & DMA_BURST32) {
167 val = (SBUS_CFG1_BENAB | SBUS_CFG1_B32);
168 } else if (bursts & DMA_BURST16) {
169 val = (SBUS_CFG1_BENAB | SBUS_CFG1_B16);
170 } else if (bursts & DMA_BURST8) {
171 val = (SBUS_CFG1_BENAB | SBUS_CFG1_B8);
173 val = 0; /* No sbus bursts for you... */
175 sbus_writew(val, qpti->qregs + SBUS_CFG1);
178 static int qlogicpti_mbox_command(struct qlogicpti *qpti, u_short param[], int force)
183 if (mbox_param[param[0]] == 0)
186 /* Set SBUS semaphore. */
187 tmp = sbus_readw(qpti->qregs + SBUS_SEMAPHORE);
188 tmp |= SBUS_SEMAPHORE_LCK;
189 sbus_writew(tmp, qpti->qregs + SBUS_SEMAPHORE);
191 /* Wait for host IRQ bit to clear. */
192 loop_count = DEFAULT_LOOP_COUNT;
193 while (--loop_count && (sbus_readw(qpti->qregs + HCCTRL) & HCCTRL_HIRQ)) {
198 printk(KERN_EMERG "qlogicpti%d: mbox_command loop timeout #1\n",
201 /* Write mailbox command registers. */
202 switch (mbox_param[param[0]] >> 4) {
203 case 6: sbus_writew(param[5], qpti->qregs + MBOX5);
204 case 5: sbus_writew(param[4], qpti->qregs + MBOX4);
205 case 4: sbus_writew(param[3], qpti->qregs + MBOX3);
206 case 3: sbus_writew(param[2], qpti->qregs + MBOX2);
207 case 2: sbus_writew(param[1], qpti->qregs + MBOX1);
208 case 1: sbus_writew(param[0], qpti->qregs + MBOX0);
211 /* Clear RISC interrupt. */
212 tmp = sbus_readw(qpti->qregs + HCCTRL);
214 sbus_writew(tmp, qpti->qregs + HCCTRL);
216 /* Clear SBUS semaphore. */
217 sbus_writew(0, qpti->qregs + SBUS_SEMAPHORE);
219 /* Set HOST interrupt. */
220 tmp = sbus_readw(qpti->qregs + HCCTRL);
222 sbus_writew(tmp, qpti->qregs + HCCTRL);
224 /* Wait for HOST interrupt clears. */
225 loop_count = DEFAULT_LOOP_COUNT;
226 while (--loop_count &&
227 (sbus_readw(qpti->qregs + HCCTRL) & HCCTRL_CRIRQ))
230 printk(KERN_EMERG "qlogicpti%d: mbox_command[%04x] loop timeout #2\n",
231 qpti->qpti_id, param[0]);
233 /* Wait for SBUS semaphore to get set. */
234 loop_count = DEFAULT_LOOP_COUNT;
235 while (--loop_count &&
236 !(sbus_readw(qpti->qregs + SBUS_SEMAPHORE) & SBUS_SEMAPHORE_LCK)) {
239 /* Workaround for some buggy chips. */
240 if (sbus_readw(qpti->qregs + MBOX0) & 0x4000)
244 printk(KERN_EMERG "qlogicpti%d: mbox_command[%04x] loop timeout #3\n",
245 qpti->qpti_id, param[0]);
247 /* Wait for MBOX busy condition to go away. */
248 loop_count = DEFAULT_LOOP_COUNT;
249 while (--loop_count && (sbus_readw(qpti->qregs + MBOX0) == 0x04))
252 printk(KERN_EMERG "qlogicpti%d: mbox_command[%04x] loop timeout #4\n",
253 qpti->qpti_id, param[0]);
255 /* Read back output parameters. */
256 switch (mbox_param[param[0]] & 0xf) {
257 case 6: param[5] = sbus_readw(qpti->qregs + MBOX5);
258 case 5: param[4] = sbus_readw(qpti->qregs + MBOX4);
259 case 4: param[3] = sbus_readw(qpti->qregs + MBOX3);
260 case 3: param[2] = sbus_readw(qpti->qregs + MBOX2);
261 case 2: param[1] = sbus_readw(qpti->qregs + MBOX1);
262 case 1: param[0] = sbus_readw(qpti->qregs + MBOX0);
265 /* Clear RISC interrupt. */
266 tmp = sbus_readw(qpti->qregs + HCCTRL);
268 sbus_writew(tmp, qpti->qregs + HCCTRL);
270 /* Release SBUS semaphore. */
271 tmp = sbus_readw(qpti->qregs + SBUS_SEMAPHORE);
272 tmp &= ~(SBUS_SEMAPHORE_LCK);
273 sbus_writew(tmp, qpti->qregs + SBUS_SEMAPHORE);
279 static inline void qlogicpti_set_hostdev_defaults(struct qlogicpti *qpti)
283 qpti->host_param.initiator_scsi_id = qpti->scsi_id;
284 qpti->host_param.bus_reset_delay = 3;
285 qpti->host_param.retry_count = 0;
286 qpti->host_param.retry_delay = 5;
287 qpti->host_param.async_data_setup_time = 3;
288 qpti->host_param.req_ack_active_negation = 1;
289 qpti->host_param.data_line_active_negation = 1;
290 qpti->host_param.data_dma_burst_enable = 1;
291 qpti->host_param.command_dma_burst_enable = 1;
292 qpti->host_param.tag_aging = 8;
293 qpti->host_param.selection_timeout = 250;
294 qpti->host_param.max_queue_depth = 256;
296 for(i = 0; i < MAX_TARGETS; i++) {
298 * disconnect, parity, arq, reneg on reset, and, oddly enough
299 * tags...the midlayer's notion of tagged support has to match
300 * our device settings, and since we base whether we enable a
301 * tag on a per-cmnd basis upon what the midlayer sez, we
302 * actually enable the capability here.
304 qpti->dev_param[i].device_flags = 0xcd;
305 qpti->dev_param[i].execution_throttle = 16;
307 qpti->dev_param[i].synchronous_period = 12;
308 qpti->dev_param[i].synchronous_offset = 8;
310 qpti->dev_param[i].synchronous_period = 25;
311 qpti->dev_param[i].synchronous_offset = 12;
313 qpti->dev_param[i].device_enable = 1;
317 static int qlogicpti_reset_hardware(struct Scsi_Host *host)
319 struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata;
321 unsigned short risc_code_addr;
325 risc_code_addr = 0x1000; /* all load addresses are at 0x1000 */
327 spin_lock_irqsave(host->host_lock, flags);
329 sbus_writew(HCCTRL_PAUSE, qpti->qregs + HCCTRL);
331 /* Only reset the scsi bus if it is not free. */
332 if (sbus_readw(qpti->qregs + CPU_PCTRL) & CPU_PCTRL_BSY) {
333 sbus_writew(CPU_ORIDE_RMOD, qpti->qregs + CPU_ORIDE);
334 sbus_writew(CPU_CMD_BRESET, qpti->qregs + CPU_CMD);
338 sbus_writew(SBUS_CTRL_RESET, qpti->qregs + SBUS_CTRL);
339 sbus_writew((DMA_CTRL_CCLEAR | DMA_CTRL_CIRQ), qpti->qregs + CMD_DMA_CTRL);
340 sbus_writew((DMA_CTRL_CCLEAR | DMA_CTRL_CIRQ), qpti->qregs + DATA_DMA_CTRL);
342 loop_count = DEFAULT_LOOP_COUNT;
343 while (--loop_count && ((sbus_readw(qpti->qregs + MBOX0) & 0xff) == 0x04))
346 printk(KERN_EMERG "qlogicpti%d: reset_hardware loop timeout\n",
349 sbus_writew(HCCTRL_PAUSE, qpti->qregs + HCCTRL);
351 qlogicpti_enable_irqs(qpti);
353 if (sbus_readw(qpti->qregs + RISC_PSR) & RISC_PSR_ULTRA) {
355 sbus_writew((RISC_MTREG_P0ULTRA | RISC_MTREG_P1ULTRA),
356 qpti->qregs + RISC_MTREG);
359 sbus_writew((RISC_MTREG_P0DFLT | RISC_MTREG_P1DFLT),
360 qpti->qregs + RISC_MTREG);
363 /* reset adapter and per-device default values. */
364 /* do it after finding out whether we're ultra mode capable */
365 qlogicpti_set_hostdev_defaults(qpti);
367 /* Release the RISC processor. */
368 sbus_writew(HCCTRL_REL, qpti->qregs + HCCTRL);
370 /* Get RISC to start executing the firmware code. */
371 param[0] = MBOX_EXEC_FIRMWARE;
372 param[1] = risc_code_addr;
373 if (qlogicpti_mbox_command(qpti, param, 1)) {
374 printk(KERN_EMERG "qlogicpti%d: Cannot execute ISP firmware.\n",
376 spin_unlock_irqrestore(host->host_lock, flags);
380 /* Set initiator scsi ID. */
381 param[0] = MBOX_SET_INIT_SCSI_ID;
382 param[1] = qpti->host_param.initiator_scsi_id;
383 if (qlogicpti_mbox_command(qpti, param, 1) ||
384 (param[0] != MBOX_COMMAND_COMPLETE)) {
385 printk(KERN_EMERG "qlogicpti%d: Cannot set initiator SCSI ID.\n",
387 spin_unlock_irqrestore(host->host_lock, flags);
391 /* Initialize state of the queues, both hw and sw. */
392 qpti->req_in_ptr = qpti->res_out_ptr = 0;
394 param[0] = MBOX_INIT_RES_QUEUE;
395 param[1] = RES_QUEUE_LEN + 1;
396 param[2] = (u_short) (qpti->res_dvma >> 16);
397 param[3] = (u_short) (qpti->res_dvma & 0xffff);
398 param[4] = param[5] = 0;
399 if (qlogicpti_mbox_command(qpti, param, 1)) {
400 printk(KERN_EMERG "qlogicpti%d: Cannot init response queue.\n",
402 spin_unlock_irqrestore(host->host_lock, flags);
406 param[0] = MBOX_INIT_REQ_QUEUE;
407 param[1] = QLOGICPTI_REQ_QUEUE_LEN + 1;
408 param[2] = (u_short) (qpti->req_dvma >> 16);
409 param[3] = (u_short) (qpti->req_dvma & 0xffff);
410 param[4] = param[5] = 0;
411 if (qlogicpti_mbox_command(qpti, param, 1)) {
412 printk(KERN_EMERG "qlogicpti%d: Cannot init request queue.\n",
414 spin_unlock_irqrestore(host->host_lock, flags);
418 param[0] = MBOX_SET_RETRY_COUNT;
419 param[1] = qpti->host_param.retry_count;
420 param[2] = qpti->host_param.retry_delay;
421 qlogicpti_mbox_command(qpti, param, 0);
423 param[0] = MBOX_SET_TAG_AGE_LIMIT;
424 param[1] = qpti->host_param.tag_aging;
425 qlogicpti_mbox_command(qpti, param, 0);
427 for (i = 0; i < MAX_TARGETS; i++) {
428 param[0] = MBOX_GET_DEV_QUEUE_PARAMS;
430 qlogicpti_mbox_command(qpti, param, 0);
433 param[0] = MBOX_GET_FIRMWARE_STATUS;
434 qlogicpti_mbox_command(qpti, param, 0);
436 param[0] = MBOX_SET_SELECT_TIMEOUT;
437 param[1] = qpti->host_param.selection_timeout;
438 qlogicpti_mbox_command(qpti, param, 0);
440 for (i = 0; i < MAX_TARGETS; i++) {
441 param[0] = MBOX_SET_TARGET_PARAMS;
443 param[2] = (qpti->dev_param[i].device_flags << 8);
445 * Since we're now loading 1.31 f/w, force narrow/async.
448 param[3] = 0; /* no offset, we do not have sync mode yet */
449 qlogicpti_mbox_command(qpti, param, 0);
453 * Always (sigh) do an initial bus reset (kicks f/w).
455 param[0] = MBOX_BUS_RESET;
456 param[1] = qpti->host_param.bus_reset_delay;
457 qlogicpti_mbox_command(qpti, param, 0);
458 qpti->send_marker = 1;
460 spin_unlock_irqrestore(host->host_lock, flags);
464 #define PTI_RESET_LIMIT 400
466 static int __devinit qlogicpti_load_firmware(struct qlogicpti *qpti)
468 struct Scsi_Host *host = qpti->qhost;
469 unsigned short csum = 0;
470 unsigned short param[6];
471 unsigned short *risc_code, risc_code_addr, risc_code_length;
475 risc_code = &sbus_risc_code01[0];
476 risc_code_addr = 0x1000; /* all f/w modules load at 0x1000 */
477 risc_code_length = sbus_risc_code_length01;
479 spin_lock_irqsave(host->host_lock, flags);
481 /* Verify the checksum twice, one before loading it, and once
482 * afterwards via the mailbox commands.
484 for (i = 0; i < risc_code_length; i++)
485 csum += risc_code[i];
487 spin_unlock_irqrestore(host->host_lock, flags);
488 printk(KERN_EMERG "qlogicpti%d: Aieee, firmware checksum failed!",
492 sbus_writew(SBUS_CTRL_RESET, qpti->qregs + SBUS_CTRL);
493 sbus_writew((DMA_CTRL_CCLEAR | DMA_CTRL_CIRQ), qpti->qregs + CMD_DMA_CTRL);
494 sbus_writew((DMA_CTRL_CCLEAR | DMA_CTRL_CIRQ), qpti->qregs + DATA_DMA_CTRL);
495 timeout = PTI_RESET_LIMIT;
496 while (--timeout && (sbus_readw(qpti->qregs + SBUS_CTRL) & SBUS_CTRL_RESET))
499 spin_unlock_irqrestore(host->host_lock, flags);
500 printk(KERN_EMERG "qlogicpti%d: Cannot reset the ISP.", qpti->qpti_id);
504 sbus_writew(HCCTRL_RESET, qpti->qregs + HCCTRL);
507 sbus_writew((SBUS_CTRL_GENAB | SBUS_CTRL_ERIRQ), qpti->qregs + SBUS_CTRL);
509 sbus_writew(0, qpti->qregs + SBUS_SEMAPHORE);
511 if (sbus_readw(qpti->qregs + RISC_PSR) & RISC_PSR_ULTRA) {
513 sbus_writew((RISC_MTREG_P0ULTRA | RISC_MTREG_P1ULTRA),
514 qpti->qregs + RISC_MTREG);
517 sbus_writew((RISC_MTREG_P0DFLT | RISC_MTREG_P1DFLT),
518 qpti->qregs + RISC_MTREG);
521 sbus_writew(HCCTRL_REL, qpti->qregs + HCCTRL);
523 /* Pin lines are only stable while RISC is paused. */
524 sbus_writew(HCCTRL_PAUSE, qpti->qregs + HCCTRL);
525 if (sbus_readw(qpti->qregs + CPU_PDIFF) & CPU_PDIFF_MODE)
526 qpti->differential = 1;
528 qpti->differential = 0;
529 sbus_writew(HCCTRL_REL, qpti->qregs + HCCTRL);
531 /* This shouldn't be necessary- we've reset things so we should be
532 running from the ROM now.. */
534 param[0] = MBOX_STOP_FIRMWARE;
535 param[1] = param[2] = param[3] = param[4] = param[5] = 0;
536 if (qlogicpti_mbox_command(qpti, param, 1)) {
537 printk(KERN_EMERG "qlogicpti%d: Cannot stop firmware for reload.\n",
539 spin_unlock_irqrestore(host->host_lock, flags);
544 for (i = 0; i < risc_code_length; i++) {
545 param[0] = MBOX_WRITE_RAM_WORD;
546 param[1] = risc_code_addr + i;
547 param[2] = risc_code[i];
548 if (qlogicpti_mbox_command(qpti, param, 1) ||
549 param[0] != MBOX_COMMAND_COMPLETE) {
550 printk("qlogicpti%d: Firmware dload failed, I'm bolixed!\n",
552 spin_unlock_irqrestore(host->host_lock, flags);
557 /* Reset the ISP again. */
558 sbus_writew(HCCTRL_RESET, qpti->qregs + HCCTRL);
561 qlogicpti_enable_irqs(qpti);
562 sbus_writew(0, qpti->qregs + SBUS_SEMAPHORE);
563 sbus_writew(HCCTRL_REL, qpti->qregs + HCCTRL);
565 /* Ask ISP to verify the checksum of the new code. */
566 param[0] = MBOX_VERIFY_CHECKSUM;
567 param[1] = risc_code_addr;
568 if (qlogicpti_mbox_command(qpti, param, 1) ||
569 (param[0] != MBOX_COMMAND_COMPLETE)) {
570 printk(KERN_EMERG "qlogicpti%d: New firmware csum failure!\n",
572 spin_unlock_irqrestore(host->host_lock, flags);
576 /* Start using newly downloaded firmware. */
577 param[0] = MBOX_EXEC_FIRMWARE;
578 param[1] = risc_code_addr;
579 qlogicpti_mbox_command(qpti, param, 1);
581 param[0] = MBOX_ABOUT_FIRMWARE;
582 if (qlogicpti_mbox_command(qpti, param, 1) ||
583 (param[0] != MBOX_COMMAND_COMPLETE)) {
584 printk(KERN_EMERG "qlogicpti%d: AboutFirmware cmd fails.\n",
586 spin_unlock_irqrestore(host->host_lock, flags);
590 /* Snag the major and minor revisions from the result. */
591 qpti->fware_majrev = param[1];
592 qpti->fware_minrev = param[2];
593 qpti->fware_micrev = param[3];
595 /* Set the clock rate */
596 param[0] = MBOX_SET_CLOCK_RATE;
597 param[1] = qpti->clock;
598 if (qlogicpti_mbox_command(qpti, param, 1) ||
599 (param[0] != MBOX_COMMAND_COMPLETE)) {
600 printk(KERN_EMERG "qlogicpti%d: could not set clock rate.\n",
602 spin_unlock_irqrestore(host->host_lock, flags);
606 if (qpti->is_pti != 0) {
607 /* Load scsi initiator ID and interrupt level into sbus static ram. */
608 param[0] = MBOX_WRITE_RAM_WORD;
610 param[2] = (unsigned short) qpti->scsi_id;
611 qlogicpti_mbox_command(qpti, param, 1);
613 param[0] = MBOX_WRITE_RAM_WORD;
615 param[2] = (unsigned short) 3;
616 qlogicpti_mbox_command(qpti, param, 1);
619 spin_unlock_irqrestore(host->host_lock, flags);
623 static int qlogicpti_verify_tmon(struct qlogicpti *qpti)
625 int curstat = sbus_readb(qpti->sreg);
628 if (!(curstat & SREG_FUSE) && (qpti->swsreg & SREG_FUSE))
629 printk("qlogicpti%d: Fuse returned to normal state.\n", qpti->qpti_id);
630 if (!(curstat & SREG_TPOWER) && (qpti->swsreg & SREG_TPOWER))
631 printk("qlogicpti%d: termpwr back to normal state.\n", qpti->qpti_id);
632 if (curstat != qpti->swsreg) {
634 if (curstat & SREG_FUSE) {
636 printk("qlogicpti%d: Fuse is open!\n", qpti->qpti_id);
638 if (curstat & SREG_TPOWER) {
640 printk("qlogicpti%d: termpwr failure\n", qpti->qpti_id);
642 if (qpti->differential &&
643 (curstat & SREG_DSENSE) != SREG_DSENSE) {
645 printk("qlogicpti%d: You have a single ended device on a "
646 "differential bus! Please fix!\n", qpti->qpti_id);
648 qpti->swsreg = curstat;
654 static irqreturn_t qpti_intr(int irq, void *dev_id);
656 static void __devinit qpti_chain_add(struct qlogicpti *qpti)
658 spin_lock_irq(&qptichain_lock);
659 if (qptichain != NULL) {
660 struct qlogicpti *qlink = qptichain;
669 spin_unlock_irq(&qptichain_lock);
672 static void __devexit qpti_chain_del(struct qlogicpti *qpti)
674 spin_lock_irq(&qptichain_lock);
675 if (qptichain == qpti) {
676 qptichain = qpti->next;
678 struct qlogicpti *qlink = qptichain;
679 while(qlink->next != qpti)
681 qlink->next = qpti->next;
684 spin_unlock_irq(&qptichain_lock);
687 static int __devinit qpti_map_regs(struct qlogicpti *qpti)
689 struct of_device *op = qpti->op;
691 qpti->qregs = of_ioremap(&op->resource[0], 0,
692 resource_size(&op->resource[0]),
695 printk("PTI: Qlogic/ISP registers are unmappable\n");
699 qpti->sreg = of_ioremap(&op->resource[0], (16 * 4096),
700 sizeof(unsigned char),
701 "PTI Qlogic/ISP statreg");
703 printk("PTI: Qlogic/ISP status register is unmappable\n");
710 static int __devinit qpti_register_irq(struct qlogicpti *qpti)
712 struct of_device *op = qpti->op;
714 qpti->qhost->irq = qpti->irq = op->irqs[0];
716 /* We used to try various overly-clever things to
717 * reduce the interrupt processing overhead on
718 * sun4c/sun4m when multiple PTI's shared the
719 * same IRQ. It was too complex and messy to
722 if (request_irq(qpti->irq, qpti_intr,
723 IRQF_SHARED, "Qlogic/PTI", qpti))
726 printk("qlogicpti%d: IRQ %d ", qpti->qpti_id, qpti->irq);
731 printk("qlogicpti%d: Cannot acquire irq line\n", qpti->qpti_id);
735 static void __devinit qpti_get_scsi_id(struct qlogicpti *qpti)
737 struct of_device *op = qpti->op;
738 struct device_node *dp;
742 qpti->scsi_id = of_getintprop_default(dp, "initiator-id", -1);
743 if (qpti->scsi_id == -1)
744 qpti->scsi_id = of_getintprop_default(dp, "scsi-initiator-id",
746 if (qpti->scsi_id == -1)
748 of_getintprop_default(dp->parent,
749 "scsi-initiator-id", 7);
750 qpti->qhost->this_id = qpti->scsi_id;
751 qpti->qhost->max_sectors = 64;
753 printk("SCSI ID %d ", qpti->scsi_id);
756 static void qpti_get_bursts(struct qlogicpti *qpti)
758 struct of_device *op = qpti->op;
761 bursts = of_getintprop_default(op->node, "burst-sizes", 0xff);
762 bmask = of_getintprop_default(op->node->parent, "burst-sizes", 0xff);
765 if (bursts == 0xff ||
766 (bursts & DMA_BURST16) == 0 ||
767 (bursts & DMA_BURST32) == 0)
768 bursts = (DMA_BURST32 - 1);
770 qpti->bursts = bursts;
773 static void qpti_get_clock(struct qlogicpti *qpti)
777 /* Check for what the clock input to this card is.
780 cfreq = prom_getintdefault(qpti->prom_node,"clock-frequency",40000000);
781 qpti->clock = (cfreq + 500000)/1000000;
782 if (qpti->clock == 0) /* bullshit */
786 /* The request and response queues must each be aligned
787 * on a page boundary.
789 static int __devinit qpti_map_queues(struct qlogicpti *qpti)
791 struct of_device *op = qpti->op;
793 #define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN)
794 qpti->res_cpu = dma_alloc_coherent(&op->dev,
795 QSIZE(RES_QUEUE_LEN),
796 &qpti->res_dvma, GFP_ATOMIC);
797 if (qpti->res_cpu == NULL ||
798 qpti->res_dvma == 0) {
799 printk("QPTI: Cannot map response queue.\n");
803 qpti->req_cpu = dma_alloc_coherent(&op->dev,
804 QSIZE(QLOGICPTI_REQ_QUEUE_LEN),
805 &qpti->req_dvma, GFP_ATOMIC);
806 if (qpti->req_cpu == NULL ||
807 qpti->req_dvma == 0) {
808 dma_free_coherent(&op->dev, QSIZE(RES_QUEUE_LEN),
809 qpti->res_cpu, qpti->res_dvma);
810 printk("QPTI: Cannot map request queue.\n");
813 memset(qpti->res_cpu, 0, QSIZE(RES_QUEUE_LEN));
814 memset(qpti->req_cpu, 0, QSIZE(QLOGICPTI_REQ_QUEUE_LEN));
818 const char *qlogicpti_info(struct Scsi_Host *host)
821 struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata;
823 sprintf(buf, "PTI Qlogic,ISP SBUS SCSI irq %d regs at %p",
824 qpti->qhost->irq, qpti->qregs);
828 /* I am a certified frobtronicist. */
829 static inline void marker_frob(struct Command_Entry *cmd)
831 struct Marker_Entry *marker = (struct Marker_Entry *) cmd;
833 memset(marker, 0, sizeof(struct Marker_Entry));
834 marker->hdr.entry_cnt = 1;
835 marker->hdr.entry_type = ENTRY_MARKER;
836 marker->modifier = SYNC_ALL;
840 static inline void cmd_frob(struct Command_Entry *cmd, struct scsi_cmnd *Cmnd,
841 struct qlogicpti *qpti)
843 memset(cmd, 0, sizeof(struct Command_Entry));
844 cmd->hdr.entry_cnt = 1;
845 cmd->hdr.entry_type = ENTRY_COMMAND;
846 cmd->target_id = Cmnd->device->id;
847 cmd->target_lun = Cmnd->device->lun;
848 cmd->cdb_length = Cmnd->cmd_len;
849 cmd->control_flags = 0;
850 if (Cmnd->device->tagged_supported) {
851 if (qpti->cmd_count[Cmnd->device->id] == 0)
852 qpti->tag_ages[Cmnd->device->id] = jiffies;
853 if (time_after(jiffies, qpti->tag_ages[Cmnd->device->id] + (5*HZ))) {
854 cmd->control_flags = CFLAG_ORDERED_TAG;
855 qpti->tag_ages[Cmnd->device->id] = jiffies;
857 cmd->control_flags = CFLAG_SIMPLE_TAG;
859 if ((Cmnd->cmnd[0] == WRITE_6) ||
860 (Cmnd->cmnd[0] == WRITE_10) ||
861 (Cmnd->cmnd[0] == WRITE_12))
862 cmd->control_flags |= CFLAG_WRITE;
864 cmd->control_flags |= CFLAG_READ;
866 memcpy(cmd->cdb, Cmnd->cmnd, Cmnd->cmd_len);
869 /* Do it to it baby. */
870 static inline int load_cmd(struct scsi_cmnd *Cmnd, struct Command_Entry *cmd,
871 struct qlogicpti *qpti, u_int in_ptr, u_int out_ptr)
874 struct scatterlist *sg, *s;
877 if (scsi_bufflen(Cmnd)) {
880 sg = scsi_sglist(Cmnd);
881 sg_count = dma_map_sg(&qpti->op->dev, sg,
883 Cmnd->sc_data_direction);
886 cmd->segment_cnt = sg_count;
888 /* Fill in first four sg entries: */
892 for_each_sg(sg, s, n, i) {
893 ds[i].d_base = sg_dma_address(s);
894 ds[i].d_count = sg_dma_len(s);
898 while (sg_count > 0) {
899 struct Continuation_Entry *cont;
901 ++cmd->hdr.entry_cnt;
902 cont = (struct Continuation_Entry *) &qpti->req_cpu[in_ptr];
903 in_ptr = NEXT_REQ_PTR(in_ptr);
904 if (in_ptr == out_ptr)
907 cont->hdr.entry_type = ENTRY_CONTINUATION;
908 cont->hdr.entry_cnt = 0;
909 cont->hdr.sys_def_1 = 0;
916 for_each_sg(sg, s, n, i) {
917 ds[i].d_base = sg_dma_address(s);
918 ds[i].d_count = sg_dma_len(s);
924 cmd->dataseg[0].d_base = 0;
925 cmd->dataseg[0].d_count = 0;
926 cmd->segment_cnt = 1; /* Shouldn't this be 0? */
929 /* Committed, record Scsi_Cmd so we can find it later. */
930 cmd->handle = in_ptr;
931 qpti->cmd_slots[in_ptr] = Cmnd;
933 qpti->cmd_count[Cmnd->device->id]++;
934 sbus_writew(in_ptr, qpti->qregs + MBOX4);
935 qpti->req_in_ptr = in_ptr;
940 static inline void update_can_queue(struct Scsi_Host *host, u_int in_ptr, u_int out_ptr)
942 /* Temporary workaround until bug is found and fixed (one bug has been found
943 already, but fixing it makes things even worse) -jj */
944 int num_free = QLOGICPTI_REQ_QUEUE_LEN - REQ_QUEUE_DEPTH(in_ptr, out_ptr) - 64;
945 host->can_queue = host->host_busy + num_free;
946 host->sg_tablesize = QLOGICPTI_MAX_SG(num_free);
949 static int qlogicpti_slave_configure(struct scsi_device *sdev)
951 struct qlogicpti *qpti = shost_priv(sdev->host);
955 /* tags handled in midlayer */
956 /* enable sync mode? */
958 qpti->dev_param[tgt].device_flags |= 0x10;
960 qpti->dev_param[tgt].synchronous_offset = 0;
961 qpti->dev_param[tgt].synchronous_period = 0;
963 /* are we wide capable? */
965 qpti->dev_param[tgt].device_flags |= 0x20;
967 param[0] = MBOX_SET_TARGET_PARAMS;
968 param[1] = (tgt << 8);
969 param[2] = (qpti->dev_param[tgt].device_flags << 8);
970 if (qpti->dev_param[tgt].device_flags & 0x10) {
971 param[3] = (qpti->dev_param[tgt].synchronous_offset << 8) |
972 qpti->dev_param[tgt].synchronous_period;
976 qlogicpti_mbox_command(qpti, param, 0);
981 * The middle SCSI layer ensures that queuecommand never gets invoked
982 * concurrently with itself or the interrupt handler (though the
983 * interrupt handler may call this routine as part of
984 * request-completion handling).
986 * "This code must fly." -davem
988 static int qlogicpti_queuecommand(struct scsi_cmnd *Cmnd, void (*done)(struct scsi_cmnd *))
990 struct Scsi_Host *host = Cmnd->device->host;
991 struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata;
992 struct Command_Entry *cmd;
996 Cmnd->scsi_done = done;
998 in_ptr = qpti->req_in_ptr;
999 cmd = (struct Command_Entry *) &qpti->req_cpu[in_ptr];
1000 out_ptr = sbus_readw(qpti->qregs + MBOX4);
1001 in_ptr = NEXT_REQ_PTR(in_ptr);
1002 if (in_ptr == out_ptr)
1005 if (qpti->send_marker) {
1007 qpti->send_marker = 0;
1008 if (NEXT_REQ_PTR(in_ptr) == out_ptr) {
1009 sbus_writew(in_ptr, qpti->qregs + MBOX4);
1010 qpti->req_in_ptr = in_ptr;
1013 cmd = (struct Command_Entry *) &qpti->req_cpu[in_ptr];
1014 in_ptr = NEXT_REQ_PTR(in_ptr);
1016 cmd_frob(cmd, Cmnd, qpti);
1017 if ((in_ptr = load_cmd(Cmnd, cmd, qpti, in_ptr, out_ptr)) == -1)
1020 update_can_queue(host, in_ptr, out_ptr);
1025 printk(KERN_EMERG "qlogicpti%d: request queue overflow\n",
1028 /* Unfortunately, unless you use the new EH code, which
1029 * we don't, the midlayer will ignore the return value,
1030 * which is insane. We pick up the pieces like this.
1032 Cmnd->result = DID_BUS_BUSY;
1037 static int qlogicpti_return_status(struct Status_Entry *sts, int id)
1039 int host_status = DID_ERROR;
1041 switch (sts->completion_status) {
1043 host_status = DID_OK;
1046 if (!(sts->state_flags & SF_GOT_BUS))
1047 host_status = DID_NO_CONNECT;
1048 else if (!(sts->state_flags & SF_GOT_TARGET))
1049 host_status = DID_BAD_TARGET;
1050 else if (!(sts->state_flags & SF_SENT_CDB))
1051 host_status = DID_ERROR;
1052 else if (!(sts->state_flags & SF_TRANSFERRED_DATA))
1053 host_status = DID_ERROR;
1054 else if (!(sts->state_flags & SF_GOT_STATUS))
1055 host_status = DID_ERROR;
1056 else if (!(sts->state_flags & SF_GOT_SENSE))
1057 host_status = DID_ERROR;
1060 case CS_TRANSPORT_ERROR:
1061 host_status = DID_ERROR;
1063 case CS_RESET_OCCURRED:
1065 host_status = DID_RESET;
1068 host_status = DID_ABORT;
1071 host_status = DID_TIME_OUT;
1073 case CS_DATA_OVERRUN:
1074 case CS_COMMAND_OVERRUN:
1075 case CS_STATUS_OVERRUN:
1076 case CS_BAD_MESSAGE:
1077 case CS_NO_MESSAGE_OUT:
1078 case CS_EXT_ID_FAILED:
1079 case CS_IDE_MSG_FAILED:
1080 case CS_ABORT_MSG_FAILED:
1081 case CS_NOP_MSG_FAILED:
1082 case CS_PARITY_ERROR_MSG_FAILED:
1083 case CS_DEVICE_RESET_MSG_FAILED:
1084 case CS_ID_MSG_FAILED:
1085 case CS_UNEXP_BUS_FREE:
1086 host_status = DID_ERROR;
1088 case CS_DATA_UNDERRUN:
1089 host_status = DID_OK;
1092 printk(KERN_EMERG "qlogicpti%d: unknown completion status 0x%04x\n",
1093 id, sts->completion_status);
1094 host_status = DID_ERROR;
1098 return (sts->scsi_status & STATUS_MASK) | (host_status << 16);
1101 static struct scsi_cmnd *qlogicpti_intr_handler(struct qlogicpti *qpti)
1103 struct scsi_cmnd *Cmnd, *done_queue = NULL;
1104 struct Status_Entry *sts;
1105 u_int in_ptr, out_ptr;
1107 if (!(sbus_readw(qpti->qregs + SBUS_STAT) & SBUS_STAT_RINT))
1110 in_ptr = sbus_readw(qpti->qregs + MBOX5);
1111 sbus_writew(HCCTRL_CRIRQ, qpti->qregs + HCCTRL);
1112 if (sbus_readw(qpti->qregs + SBUS_SEMAPHORE) & SBUS_SEMAPHORE_LCK) {
1113 switch (sbus_readw(qpti->qregs + MBOX0)) {
1114 case ASYNC_SCSI_BUS_RESET:
1115 case EXECUTION_TIMEOUT_RESET:
1116 qpti->send_marker = 1;
1118 case INVALID_COMMAND:
1119 case HOST_INTERFACE_ERROR:
1121 case COMMAND_PARAM_ERROR:
1124 sbus_writew(0, qpti->qregs + SBUS_SEMAPHORE);
1127 /* This looks like a network driver! */
1128 out_ptr = qpti->res_out_ptr;
1129 while (out_ptr != in_ptr) {
1132 sts = (struct Status_Entry *) &qpti->res_cpu[out_ptr];
1133 out_ptr = NEXT_RES_PTR(out_ptr);
1135 /* We store an index in the handle, not the pointer in
1136 * some form. This avoids problems due to the fact
1137 * that the handle provided is only 32-bits. -DaveM
1139 cmd_slot = sts->handle;
1140 Cmnd = qpti->cmd_slots[cmd_slot];
1141 qpti->cmd_slots[cmd_slot] = NULL;
1143 if (sts->completion_status == CS_RESET_OCCURRED ||
1144 sts->completion_status == CS_ABORTED ||
1145 (sts->status_flags & STF_BUS_RESET))
1146 qpti->send_marker = 1;
1148 if (sts->state_flags & SF_GOT_SENSE)
1149 memcpy(Cmnd->sense_buffer, sts->req_sense_data,
1150 SCSI_SENSE_BUFFERSIZE);
1152 if (sts->hdr.entry_type == ENTRY_STATUS)
1154 qlogicpti_return_status(sts, qpti->qpti_id);
1156 Cmnd->result = DID_ERROR << 16;
1158 if (scsi_bufflen(Cmnd))
1159 dma_unmap_sg(&qpti->op->dev,
1160 scsi_sglist(Cmnd), scsi_sg_count(Cmnd),
1161 Cmnd->sc_data_direction);
1163 qpti->cmd_count[Cmnd->device->id]--;
1164 sbus_writew(out_ptr, qpti->qregs + MBOX5);
1165 Cmnd->host_scribble = (unsigned char *) done_queue;
1168 qpti->res_out_ptr = out_ptr;
1173 static irqreturn_t qpti_intr(int irq, void *dev_id)
1175 struct qlogicpti *qpti = dev_id;
1176 unsigned long flags;
1177 struct scsi_cmnd *dq;
1179 spin_lock_irqsave(qpti->qhost->host_lock, flags);
1180 dq = qlogicpti_intr_handler(qpti);
1184 struct scsi_cmnd *next;
1186 next = (struct scsi_cmnd *) dq->host_scribble;
1189 } while (dq != NULL);
1191 spin_unlock_irqrestore(qpti->qhost->host_lock, flags);
1196 static int qlogicpti_abort(struct scsi_cmnd *Cmnd)
1199 struct Scsi_Host *host = Cmnd->device->host;
1200 struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata;
1201 int return_status = SUCCESS;
1205 printk(KERN_WARNING "qlogicpti%d: Aborting cmd for tgt[%d] lun[%d]\n",
1206 qpti->qpti_id, (int)Cmnd->device->id, (int)Cmnd->device->lun);
1208 qlogicpti_disable_irqs(qpti);
1210 /* Find the 32-bit cookie we gave to the firmware for
1213 for (i = 0; i < QLOGICPTI_REQ_QUEUE_LEN + 1; i++)
1214 if (qpti->cmd_slots[i] == Cmnd)
1218 param[0] = MBOX_ABORT;
1219 param[1] = (((u_short) Cmnd->device->id) << 8) | Cmnd->device->lun;
1220 param[2] = cmd_cookie >> 16;
1221 param[3] = cmd_cookie & 0xffff;
1222 if (qlogicpti_mbox_command(qpti, param, 0) ||
1223 (param[0] != MBOX_COMMAND_COMPLETE)) {
1224 printk(KERN_EMERG "qlogicpti%d: scsi abort failure: %x\n",
1225 qpti->qpti_id, param[0]);
1226 return_status = FAILED;
1229 qlogicpti_enable_irqs(qpti);
1231 return return_status;
1234 static int qlogicpti_reset(struct scsi_cmnd *Cmnd)
1237 struct Scsi_Host *host = Cmnd->device->host;
1238 struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata;
1239 int return_status = SUCCESS;
1241 printk(KERN_WARNING "qlogicpti%d: Resetting SCSI bus!\n",
1244 qlogicpti_disable_irqs(qpti);
1246 param[0] = MBOX_BUS_RESET;
1247 param[1] = qpti->host_param.bus_reset_delay;
1248 if (qlogicpti_mbox_command(qpti, param, 0) ||
1249 (param[0] != MBOX_COMMAND_COMPLETE)) {
1250 printk(KERN_EMERG "qlogicisp%d: scsi bus reset failure: %x\n",
1251 qpti->qpti_id, param[0]);
1252 return_status = FAILED;
1255 qlogicpti_enable_irqs(qpti);
1257 return return_status;
1260 static struct scsi_host_template qpti_template = {
1261 .module = THIS_MODULE,
1262 .name = "qlogicpti",
1263 .info = qlogicpti_info,
1264 .queuecommand = qlogicpti_queuecommand,
1265 .slave_configure = qlogicpti_slave_configure,
1266 .eh_abort_handler = qlogicpti_abort,
1267 .eh_bus_reset_handler = qlogicpti_reset,
1268 .can_queue = QLOGICPTI_REQ_QUEUE_LEN,
1270 .sg_tablesize = QLOGICPTI_MAX_SG(QLOGICPTI_REQ_QUEUE_LEN),
1272 .use_clustering = ENABLE_CLUSTERING,
1275 static int __devinit qpti_sbus_probe(struct of_device *op, const struct of_device_id *match)
1277 struct scsi_host_template *tpnt = match->data;
1278 struct device_node *dp = op->node;
1279 struct Scsi_Host *host;
1280 struct qlogicpti *qpti;
1284 /* Sometimes Antares cards come up not completely
1285 * setup, and we get a report of a zero IRQ.
1287 if (op->irqs[0] == 0)
1290 host = scsi_host_alloc(tpnt, sizeof(struct qlogicpti));
1294 qpti = shost_priv(host);
1296 host->max_id = MAX_TARGETS;
1299 qpti->qpti_id = nqptis;
1300 strcpy(qpti->prom_name, op->node->name);
1301 qpti->is_pti = strcmp(qpti->prom_name, "QLGC,isp");
1303 if (qpti_map_regs(qpti) < 0)
1306 if (qpti_register_irq(qpti) < 0)
1307 goto fail_unmap_regs;
1309 qpti_get_scsi_id(qpti);
1310 qpti_get_bursts(qpti);
1311 qpti_get_clock(qpti);
1313 /* Clear out scsi_cmnd array. */
1314 memset(qpti->cmd_slots, 0, sizeof(qpti->cmd_slots));
1316 if (qpti_map_queues(qpti) < 0)
1319 /* Load the firmware. */
1320 if (qlogicpti_load_firmware(qpti))
1321 goto fail_unmap_queues;
1323 /* Check the PTI status reg. */
1324 if (qlogicpti_verify_tmon(qpti))
1325 goto fail_unmap_queues;
1328 /* Reset the ISP and init res/req queues. */
1329 if (qlogicpti_reset_hardware(host))
1330 goto fail_unmap_queues;
1332 printk("(Firmware v%d.%d.%d)", qpti->fware_majrev,
1333 qpti->fware_minrev, qpti->fware_micrev);
1335 fcode = of_get_property(dp, "isp-fcode", NULL);
1336 if (fcode && fcode[0])
1337 printk("(FCode %s)", fcode);
1338 if (of_find_property(dp, "differential", NULL) != NULL)
1339 qpti->differential = 1;
1341 printk("\nqlogicpti%d: [%s Wide, using %s interface]\n",
1343 (qpti->ultra ? "Ultra" : "Fast"),
1344 (qpti->differential ? "differential" : "single ended"));
1346 if (scsi_add_host(host, &op->dev)) {
1347 printk("qlogicpti%d: Failed scsi_add_host\n", qpti->qpti_id);
1348 goto fail_unmap_queues;
1351 dev_set_drvdata(&op->dev, qpti);
1353 qpti_chain_add(qpti);
1355 scsi_scan_host(host);
1361 #define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN)
1362 dma_free_coherent(&op->dev,
1363 QSIZE(RES_QUEUE_LEN),
1364 qpti->res_cpu, qpti->res_dvma);
1365 dma_free_coherent(&op->dev,
1366 QSIZE(QLOGICPTI_REQ_QUEUE_LEN),
1367 qpti->req_cpu, qpti->req_dvma);
1371 of_iounmap(&op->resource[0], qpti->qregs,
1372 resource_size(&op->resource[0]));
1374 of_iounmap(&op->resource[0], qpti->sreg,
1375 sizeof(unsigned char));
1378 free_irq(qpti->irq, qpti);
1381 scsi_host_put(host);
1386 static int __devexit qpti_sbus_remove(struct of_device *op)
1388 struct qlogicpti *qpti = dev_get_drvdata(&op->dev);
1390 qpti_chain_del(qpti);
1392 scsi_remove_host(qpti->qhost);
1394 /* Shut up the card. */
1395 sbus_writew(0, qpti->qregs + SBUS_CTRL);
1397 /* Free IRQ handler and unmap Qlogic,ISP and PTI status regs. */
1398 free_irq(qpti->irq, qpti);
1400 #define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN)
1401 dma_free_coherent(&op->dev,
1402 QSIZE(RES_QUEUE_LEN),
1403 qpti->res_cpu, qpti->res_dvma);
1404 dma_free_coherent(&op->dev,
1405 QSIZE(QLOGICPTI_REQ_QUEUE_LEN),
1406 qpti->req_cpu, qpti->req_dvma);
1409 of_iounmap(&op->resource[0], qpti->qregs,
1410 resource_size(&op->resource[0]));
1412 of_iounmap(&op->resource[0], qpti->sreg, sizeof(unsigned char));
1414 scsi_host_put(qpti->qhost);
1419 static const struct of_device_id qpti_match[] = {
1422 .data = &qpti_template,
1425 .name = "PTI,ptisp",
1426 .data = &qpti_template,
1430 .data = &qpti_template,
1434 .data = &qpti_template,
1438 MODULE_DEVICE_TABLE(of, qpti_match);
1440 static struct of_platform_driver qpti_sbus_driver = {
1442 .match_table = qpti_match,
1443 .probe = qpti_sbus_probe,
1444 .remove = __devexit_p(qpti_sbus_remove),
1447 static int __init qpti_init(void)
1449 return of_register_driver(&qpti_sbus_driver, &of_bus_type);
1452 static void __exit qpti_exit(void)
1454 of_unregister_driver(&qpti_sbus_driver);
1457 MODULE_DESCRIPTION("QlogicISP SBUS driver");
1458 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
1459 MODULE_LICENSE("GPL");
1460 MODULE_VERSION("2.1");
1462 module_init(qpti_init);
1463 module_exit(qpti_exit);