1 #include <linux/types.h>
3 #include <linux/blkdev.h>
4 #include <linux/sched.h>
5 #include <linux/init.h>
6 #include <linux/interrupt.h>
10 #include <asm/pgtable.h>
11 #include <asm/amigaints.h>
12 #include <asm/amigahw.h>
13 #include <linux/zorro.h>
15 #include <linux/spinlock.h>
18 #include <scsi/scsi_host.h>
22 #include<linux/stat.h>
24 #define DMA(ptr) ((a2091_scsiregs *)((ptr)->base))
25 #define HDATA(ptr) ((struct WD33C93_hostdata *)((ptr)->hostdata))
27 static irqreturn_t a2091_intr (int irq, void *_instance, struct pt_regs *fp)
31 struct Scsi_Host *instance = (struct Scsi_Host *)_instance;
33 status = DMA(instance)->ISTR;
34 if (!(status & (ISTR_INT_F|ISTR_INT_P)) || !(status & ISTR_INTS))
37 spin_lock_irqsave(instance->host_lock, flags);
38 wd33c93_intr(instance);
39 spin_unlock_irqrestore(instance->host_lock, flags);
43 static int dma_setup (Scsi_Cmnd *cmd, int dir_in)
45 unsigned short cntr = CNTR_PDMD | CNTR_INTEN;
46 unsigned long addr = virt_to_bus(cmd->SCp.ptr);
47 struct Scsi_Host *instance = cmd->device->host;
49 /* don't allow DMA if the physical address is bad */
50 if (addr & A2091_XFER_MASK ||
51 (!dir_in && mm_end_of_chunk (addr, cmd->SCp.this_residual)))
53 HDATA(instance)->dma_bounce_len = (cmd->SCp.this_residual + 511)
55 HDATA(instance)->dma_bounce_buffer =
56 kmalloc (HDATA(instance)->dma_bounce_len, GFP_KERNEL);
58 /* can't allocate memory; use PIO */
59 if (!HDATA(instance)->dma_bounce_buffer) {
60 HDATA(instance)->dma_bounce_len = 0;
64 /* get the physical address of the bounce buffer */
65 addr = virt_to_bus(HDATA(instance)->dma_bounce_buffer);
67 /* the bounce buffer may not be in the first 16M of physmem */
68 if (addr & A2091_XFER_MASK) {
69 /* we could use chipmem... maybe later */
70 kfree (HDATA(instance)->dma_bounce_buffer);
71 HDATA(instance)->dma_bounce_buffer = NULL;
72 HDATA(instance)->dma_bounce_len = 0;
77 /* copy to bounce buffer for a write */
80 panic ("scsi%ddma: incomplete s/g support",
83 memcpy (HDATA(instance)->dma_bounce_buffer,
84 cmd->SCp.ptr, cmd->SCp.this_residual);
87 memcpy (HDATA(instance)->dma_bounce_buffer,
88 cmd->request_buffer, cmd->request_bufflen);
92 /* setup dma direction */
96 /* remember direction */
97 HDATA(cmd->device->host)->dma_dir = dir_in;
99 DMA(cmd->device->host)->CNTR = cntr;
101 /* setup DMA *physical* address */
102 DMA(cmd->device->host)->ACR = addr;
105 /* invalidate any cache */
106 cache_clear (addr, cmd->SCp.this_residual);
108 /* push any dirty cache */
109 cache_push (addr, cmd->SCp.this_residual);
112 DMA(cmd->device->host)->ST_DMA = 1;
118 static void dma_stop (struct Scsi_Host *instance, Scsi_Cmnd *SCpnt,
121 /* disable SCSI interrupts */
122 unsigned short cntr = CNTR_PDMD;
124 if (!HDATA(instance)->dma_dir)
127 /* disable SCSI interrupts */
128 DMA(instance)->CNTR = cntr;
130 /* flush if we were reading */
131 if (HDATA(instance)->dma_dir) {
132 DMA(instance)->FLUSH = 1;
133 while (!(DMA(instance)->ISTR & ISTR_FE_FLG))
137 /* clear a possible interrupt */
138 DMA(instance)->CINT = 1;
141 DMA(instance)->SP_DMA = 1;
143 /* restore the CONTROL bits (minus the direction flag) */
144 DMA(instance)->CNTR = CNTR_PDMD | CNTR_INTEN;
146 /* copy from a bounce buffer, if necessary */
147 if (status && HDATA(instance)->dma_bounce_buffer) {
148 if (SCpnt && SCpnt->use_sg) {
150 panic ("scsi%d: incomplete s/g support",
153 if( HDATA(instance)->dma_dir )
154 memcpy (SCpnt->SCp.ptr,
155 HDATA(instance)->dma_bounce_buffer,
156 SCpnt->SCp.this_residual);
157 kfree (HDATA(instance)->dma_bounce_buffer);
158 HDATA(instance)->dma_bounce_buffer = NULL;
159 HDATA(instance)->dma_bounce_len = 0;
163 if (HDATA(instance)->dma_dir && SCpnt)
164 memcpy (SCpnt->request_buffer,
165 HDATA(instance)->dma_bounce_buffer,
166 SCpnt->request_bufflen);
168 kfree (HDATA(instance)->dma_bounce_buffer);
169 HDATA(instance)->dma_bounce_buffer = NULL;
170 HDATA(instance)->dma_bounce_len = 0;
175 int __init a2091_detect(struct scsi_host_template *tpnt)
177 static unsigned char called = 0;
178 struct Scsi_Host *instance;
179 unsigned long address;
180 struct zorro_dev *z = NULL;
184 if (!MACH_IS_AMIGA || called)
188 tpnt->proc_name = "A2091";
189 tpnt->proc_info = &wd33c93_proc_info;
191 while ((z = zorro_find_device(ZORRO_WILDCARD, z))) {
192 if (z->id != ZORRO_PROD_CBM_A590_A2091_1 &&
193 z->id != ZORRO_PROD_CBM_A590_A2091_2)
195 address = z->resource.start;
196 if (!request_mem_region(address, 256, "wd33c93"))
199 instance = scsi_register (tpnt, sizeof (struct WD33C93_hostdata));
200 if (instance == NULL) {
201 release_mem_region(address, 256);
204 instance->base = ZTWO_VADDR(address);
205 instance->irq = IRQ_AMIGA_PORTS;
206 instance->unique_id = z->slotaddr;
207 DMA(instance)->DAWR = DAWR_A2091;
208 regs.SASR = &(DMA(instance)->SASR);
209 regs.SCMD = &(DMA(instance)->SCMD);
210 wd33c93_init(instance, regs, dma_setup, dma_stop, WD33C93_FS_8_10);
211 request_irq(IRQ_AMIGA_PORTS, a2091_intr, IRQF_SHARED, "A2091 SCSI",
213 DMA(instance)->CNTR = CNTR_PDMD | CNTR_INTEN;
220 static int a2091_bus_reset(Scsi_Cmnd *cmd)
222 /* FIXME perform bus-specific reset */
224 /* FIXME 2: kill this function, and let midlayer fall back
225 to the same action, calling wd33c93_host_reset() */
227 spin_lock_irq(cmd->device->host->host_lock);
228 wd33c93_host_reset(cmd);
229 spin_unlock_irq(cmd->device->host->host_lock);
236 static struct scsi_host_template driver_template = {
237 .proc_name = "A2901",
238 .name = "Commodore A2091/A590 SCSI",
239 .detect = a2091_detect,
240 .release = a2091_release,
241 .queuecommand = wd33c93_queuecommand,
242 .eh_abort_handler = wd33c93_abort,
243 .eh_bus_reset_handler = a2091_bus_reset,
244 .eh_host_reset_handler = wd33c93_host_reset,
245 .can_queue = CAN_QUEUE,
247 .sg_tablesize = SG_ALL,
248 .cmd_per_lun = CMD_PER_LUN,
249 .use_clustering = DISABLE_CLUSTERING
253 #include "scsi_module.c"
255 int a2091_release(struct Scsi_Host *instance)
258 DMA(instance)->CNTR = 0;
259 release_mem_region(ZTWO_PADDR(instance->base), 256);
260 free_irq(IRQ_AMIGA_PORTS, instance);
266 MODULE_LICENSE("GPL");