1 #include <linux/types.h>
3 #include <linux/blkdev.h>
4 #include <linux/sched.h>
5 #include <linux/version.h>
6 #include <linux/init.h>
7 #include <linux/interrupt.h>
11 #include <asm/pgtable.h>
12 #include <asm/amigaints.h>
13 #include <asm/amigahw.h>
14 #include <linux/zorro.h>
16 #include <linux/spinlock.h>
19 #include <scsi/scsi_host.h>
23 #include<linux/stat.h>
25 #define DMA(ptr) ((a2091_scsiregs *)((ptr)->base))
26 #define HDATA(ptr) ((struct WD33C93_hostdata *)((ptr)->hostdata))
28 static irqreturn_t a2091_intr (int irq, void *_instance, struct pt_regs *fp)
32 struct Scsi_Host *instance = (struct Scsi_Host *)_instance;
34 status = DMA(instance)->ISTR;
35 if (!(status & (ISTR_INT_F|ISTR_INT_P)) || !(status & ISTR_INTS))
38 spin_lock_irqsave(instance->host_lock, flags);
39 wd33c93_intr(instance);
40 spin_unlock_irqrestore(instance->host_lock, flags);
44 static int dma_setup (Scsi_Cmnd *cmd, int dir_in)
46 unsigned short cntr = CNTR_PDMD | CNTR_INTEN;
47 unsigned long addr = virt_to_bus(cmd->SCp.ptr);
48 struct Scsi_Host *instance = cmd->device->host;
50 /* don't allow DMA if the physical address is bad */
51 if (addr & A2091_XFER_MASK ||
52 (!dir_in && mm_end_of_chunk (addr, cmd->SCp.this_residual)))
54 HDATA(instance)->dma_bounce_len = (cmd->SCp.this_residual + 511)
56 HDATA(instance)->dma_bounce_buffer =
57 kmalloc (HDATA(instance)->dma_bounce_len, GFP_KERNEL);
59 /* can't allocate memory; use PIO */
60 if (!HDATA(instance)->dma_bounce_buffer) {
61 HDATA(instance)->dma_bounce_len = 0;
65 /* get the physical address of the bounce buffer */
66 addr = virt_to_bus(HDATA(instance)->dma_bounce_buffer);
68 /* the bounce buffer may not be in the first 16M of physmem */
69 if (addr & A2091_XFER_MASK) {
70 /* we could use chipmem... maybe later */
71 kfree (HDATA(instance)->dma_bounce_buffer);
72 HDATA(instance)->dma_bounce_buffer = NULL;
73 HDATA(instance)->dma_bounce_len = 0;
78 /* copy to bounce buffer for a write */
81 panic ("scsi%ddma: incomplete s/g support",
84 memcpy (HDATA(instance)->dma_bounce_buffer,
85 cmd->SCp.ptr, cmd->SCp.this_residual);
88 memcpy (HDATA(instance)->dma_bounce_buffer,
89 cmd->request_buffer, cmd->request_bufflen);
93 /* setup dma direction */
97 /* remember direction */
98 HDATA(cmd->device->host)->dma_dir = dir_in;
100 DMA(cmd->device->host)->CNTR = cntr;
102 /* setup DMA *physical* address */
103 DMA(cmd->device->host)->ACR = addr;
106 /* invalidate any cache */
107 cache_clear (addr, cmd->SCp.this_residual);
109 /* push any dirty cache */
110 cache_push (addr, cmd->SCp.this_residual);
113 DMA(cmd->device->host)->ST_DMA = 1;
119 static void dma_stop (struct Scsi_Host *instance, Scsi_Cmnd *SCpnt,
122 /* disable SCSI interrupts */
123 unsigned short cntr = CNTR_PDMD;
125 if (!HDATA(instance)->dma_dir)
128 /* disable SCSI interrupts */
129 DMA(instance)->CNTR = cntr;
131 /* flush if we were reading */
132 if (HDATA(instance)->dma_dir) {
133 DMA(instance)->FLUSH = 1;
134 while (!(DMA(instance)->ISTR & ISTR_FE_FLG))
138 /* clear a possible interrupt */
139 DMA(instance)->CINT = 1;
142 DMA(instance)->SP_DMA = 1;
144 /* restore the CONTROL bits (minus the direction flag) */
145 DMA(instance)->CNTR = CNTR_PDMD | CNTR_INTEN;
147 /* copy from a bounce buffer, if necessary */
148 if (status && HDATA(instance)->dma_bounce_buffer) {
149 if (SCpnt && SCpnt->use_sg) {
151 panic ("scsi%d: incomplete s/g support",
154 if( HDATA(instance)->dma_dir )
155 memcpy (SCpnt->SCp.ptr,
156 HDATA(instance)->dma_bounce_buffer,
157 SCpnt->SCp.this_residual);
158 kfree (HDATA(instance)->dma_bounce_buffer);
159 HDATA(instance)->dma_bounce_buffer = NULL;
160 HDATA(instance)->dma_bounce_len = 0;
164 if (HDATA(instance)->dma_dir && SCpnt)
165 memcpy (SCpnt->request_buffer,
166 HDATA(instance)->dma_bounce_buffer,
167 SCpnt->request_bufflen);
169 kfree (HDATA(instance)->dma_bounce_buffer);
170 HDATA(instance)->dma_bounce_buffer = NULL;
171 HDATA(instance)->dma_bounce_len = 0;
176 int __init a2091_detect(Scsi_Host_Template *tpnt)
178 static unsigned char called = 0;
179 struct Scsi_Host *instance;
180 unsigned long address;
181 struct zorro_dev *z = NULL;
185 if (!MACH_IS_AMIGA || called)
189 tpnt->proc_name = "A2091";
190 tpnt->proc_info = &wd33c93_proc_info;
192 while ((z = zorro_find_device(ZORRO_WILDCARD, z))) {
193 if (z->id != ZORRO_PROD_CBM_A590_A2091_1 &&
194 z->id != ZORRO_PROD_CBM_A590_A2091_2)
196 address = z->resource.start;
197 if (!request_mem_region(address, 256, "wd33c93"))
200 instance = scsi_register (tpnt, sizeof (struct WD33C93_hostdata));
201 if (instance == NULL) {
202 release_mem_region(address, 256);
205 instance->base = ZTWO_VADDR(address);
206 instance->irq = IRQ_AMIGA_PORTS;
207 instance->unique_id = z->slotaddr;
208 DMA(instance)->DAWR = DAWR_A2091;
209 regs.SASR = &(DMA(instance)->SASR);
210 regs.SCMD = &(DMA(instance)->SCMD);
211 wd33c93_init(instance, regs, dma_setup, dma_stop, WD33C93_FS_8_10);
212 request_irq(IRQ_AMIGA_PORTS, a2091_intr, SA_SHIRQ, "A2091 SCSI",
214 DMA(instance)->CNTR = CNTR_PDMD | CNTR_INTEN;
221 static int a2091_bus_reset(Scsi_Cmnd *cmd)
223 /* FIXME perform bus-specific reset */
224 wd33c93_host_reset(cmd);
230 static Scsi_Host_Template driver_template = {
231 .proc_name = "A2901",
232 .name = "Commodore A2091/A590 SCSI",
233 .detect = a2091_detect,
234 .release = a2091_release,
235 .queuecommand = wd33c93_queuecommand,
236 .eh_abort_handler = wd33c93_abort,
237 .eh_bus_reset_handler = a2091_bus_reset,
238 .eh_host_reset_handler = wd33c93_host_reset,
239 .can_queue = CAN_QUEUE,
241 .sg_tablesize = SG_ALL,
242 .cmd_per_lun = CMD_PER_LUN,
243 .use_clustering = DISABLE_CLUSTERING
247 #include "scsi_module.c"
249 int a2091_release(struct Scsi_Host *instance)
252 DMA(instance)->CNTR = 0;
253 release_mem_region(ZTWO_PADDR(instance->base), 256);
254 free_irq(IRQ_AMIGA_PORTS, instance);
260 MODULE_LICENSE("GPL");