1 /* sun_esp.c: ESP front-end for Sparc SBUS systems.
3 * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
6 #include <linux/kernel.h>
7 #include <linux/types.h>
8 #include <linux/module.h>
9 #include <linux/init.h>
17 #include <scsi/scsi_host.h>
21 #define DRV_MODULE_NAME "sun_esp"
22 #define PFX DRV_MODULE_NAME ": "
23 #define DRV_VERSION "1.000"
24 #define DRV_MODULE_RELDATE "April 19, 2007"
26 #define dma_read32(REG) \
27 sbus_readl(esp->dma_regs + (REG))
28 #define dma_write32(VAL, REG) \
29 sbus_writel((VAL), esp->dma_regs + (REG))
31 static int __devinit esp_sbus_find_dma(struct esp *esp, struct sbus_dev *dma_sdev)
33 struct sbus_dev *sdev = esp->dev;
36 if (dma_sdev != NULL) {
38 if (dma->sdev == dma_sdev)
43 if (dma->sdev == NULL)
46 /* If bus + slot are the same and it has the
47 * correct OBP name, it's ours.
49 if (sdev->bus == dma->sdev->bus &&
50 sdev->slot == dma->sdev->slot &&
51 (!strcmp(dma->sdev->prom_name, "dma") ||
52 !strcmp(dma->sdev->prom_name, "espdma")))
58 printk(KERN_ERR PFX "[%s] Cannot find dma.\n",
59 sdev->ofdev.node->full_name);
63 esp->dma_regs = dma->regs;
69 static int __devinit esp_sbus_map_regs(struct esp *esp, int hme)
71 struct sbus_dev *sdev = esp->dev;
74 /* On HME, two reg sets exist, first is DVMA,
75 * second is ESP registers.
78 res = &sdev->resource[1];
80 res = &sdev->resource[0];
82 esp->regs = sbus_ioremap(res, 0, SBUS_ESP_REG_SIZE, "ESP");
89 static int __devinit esp_sbus_map_command_block(struct esp *esp)
91 struct sbus_dev *sdev = esp->dev;
93 esp->command_block = sbus_alloc_consistent(sdev, 16,
94 &esp->command_block_dma);
95 if (!esp->command_block)
100 static int __devinit esp_sbus_register_irq(struct esp *esp)
102 struct Scsi_Host *host = esp->host;
103 struct sbus_dev *sdev = esp->dev;
105 host->irq = sdev->irqs[0];
106 return request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp);
109 static void __devinit esp_get_scsi_id(struct esp *esp)
111 struct sbus_dev *sdev = esp->dev;
112 struct device_node *dp = sdev->ofdev.node;
114 esp->scsi_id = of_getintprop_default(dp, "initiator-id", 0xff);
115 if (esp->scsi_id != 0xff)
118 esp->scsi_id = of_getintprop_default(dp, "scsi-initiator-id", 0xff);
119 if (esp->scsi_id != 0xff)
128 esp->scsi_id = of_getintprop_default(sdev->bus->ofdev.node,
129 "scsi-initiator-id", 7);
132 esp->host->this_id = esp->scsi_id;
133 esp->scsi_id_mask = (1 << esp->scsi_id);
136 static void __devinit esp_get_differential(struct esp *esp)
138 struct sbus_dev *sdev = esp->dev;
139 struct device_node *dp = sdev->ofdev.node;
141 if (of_find_property(dp, "differential", NULL))
142 esp->flags |= ESP_FLAG_DIFFERENTIAL;
144 esp->flags &= ~ESP_FLAG_DIFFERENTIAL;
147 static void __devinit esp_get_clock_params(struct esp *esp)
149 struct sbus_dev *sdev = esp->dev;
150 struct device_node *dp = sdev->ofdev.node;
151 struct device_node *bus_dp;
155 if (sdev != NULL && sdev->bus != NULL)
156 bus_dp = sdev->bus->ofdev.node;
158 fmhz = of_getintprop_default(dp, "clock-frequency", 0);
160 fmhz = (!bus_dp) ? 0 :
161 of_getintprop_default(bus_dp, "clock-frequency", 0);
166 static void __devinit esp_get_bursts(struct esp *esp, struct sbus_dev *dma)
168 struct sbus_dev *sdev = esp->dev;
169 struct device_node *dp = sdev->ofdev.node;
172 bursts = of_getintprop_default(dp, "burst-sizes", 0xff);
174 struct device_node *dma_dp = dma->ofdev.node;
175 u8 val = of_getintprop_default(dma_dp, "burst-sizes", 0xff);
181 u8 val = of_getintprop_default(sdev->bus->ofdev.node,
182 "burst-sizes", 0xff);
187 if (bursts == 0xff ||
188 (bursts & DMA_BURST16) == 0 ||
189 (bursts & DMA_BURST32) == 0)
190 bursts = (DMA_BURST32 - 1);
192 esp->bursts = bursts;
195 static void __devinit esp_sbus_get_props(struct esp *esp, struct sbus_dev *espdma)
197 esp_get_scsi_id(esp);
198 esp_get_differential(esp);
199 esp_get_clock_params(esp);
200 esp_get_bursts(esp, espdma);
203 static void sbus_esp_write8(struct esp *esp, u8 val, unsigned long reg)
205 sbus_writeb(val, esp->regs + (reg * 4UL));
208 static u8 sbus_esp_read8(struct esp *esp, unsigned long reg)
210 return sbus_readb(esp->regs + (reg * 4UL));
213 static dma_addr_t sbus_esp_map_single(struct esp *esp, void *buf,
216 return sbus_map_single(esp->dev, buf, sz, dir);
219 static int sbus_esp_map_sg(struct esp *esp, struct scatterlist *sg,
222 return sbus_map_sg(esp->dev, sg, num_sg, dir);
225 static void sbus_esp_unmap_single(struct esp *esp, dma_addr_t addr,
228 sbus_unmap_single(esp->dev, addr, sz, dir);
231 static void sbus_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
234 sbus_unmap_sg(esp->dev, sg, num_sg, dir);
237 static int sbus_esp_irq_pending(struct esp *esp)
239 if (dma_read32(DMA_CSR) & (DMA_HNDL_INTR | DMA_HNDL_ERROR))
244 static void sbus_esp_reset_dma(struct esp *esp)
246 int can_do_burst16, can_do_burst32, can_do_burst64;
247 int can_do_sbus64, lim;
250 can_do_burst16 = (esp->bursts & DMA_BURST16) != 0;
251 can_do_burst32 = (esp->bursts & DMA_BURST32) != 0;
254 if (sbus_can_dma_64bit(esp->dev))
256 if (sbus_can_burst64(esp->sdev))
257 can_do_burst64 = (esp->bursts & DMA_BURST64) != 0;
259 /* Put the DVMA into a known state. */
260 if (esp->dma->revision != dvmahme) {
261 val = dma_read32(DMA_CSR);
262 dma_write32(val | DMA_RST_SCSI, DMA_CSR);
263 dma_write32(val & ~DMA_RST_SCSI, DMA_CSR);
265 switch (esp->dma->revision) {
267 dma_write32(DMA_RESET_FAS366, DMA_CSR);
268 dma_write32(DMA_RST_SCSI, DMA_CSR);
270 esp->prev_hme_dmacsr = (DMA_PARITY_OFF | DMA_2CLKS |
271 DMA_SCSI_DISAB | DMA_INT_ENAB);
273 esp->prev_hme_dmacsr &= ~(DMA_ENABLE | DMA_ST_WRITE |
277 esp->prev_hme_dmacsr |= DMA_BRST64;
278 else if (can_do_burst32)
279 esp->prev_hme_dmacsr |= DMA_BRST32;
282 esp->prev_hme_dmacsr |= DMA_SCSI_SBUS64;
283 sbus_set_sbus64(esp->dev, esp->bursts);
287 while (dma_read32(DMA_CSR) & DMA_PEND_READ) {
289 printk(KERN_ALERT PFX "esp%d: DMA_PEND_READ "
291 esp->host->unique_id);
297 dma_write32(0, DMA_CSR);
298 dma_write32(esp->prev_hme_dmacsr, DMA_CSR);
300 dma_write32(0, DMA_ADDR);
304 if (esp->rev != ESP100) {
305 val = dma_read32(DMA_CSR);
306 dma_write32(val | DMA_3CLKS, DMA_CSR);
311 val = dma_read32(DMA_CSR);
314 if (can_do_burst32) {
318 dma_write32(val, DMA_CSR);
322 val = dma_read32(DMA_CSR);
323 val |= DMA_ADD_ENABLE;
324 val &= ~DMA_BCNT_ENAB;
325 if (!can_do_burst32 && can_do_burst16) {
326 val |= DMA_ESC_BURST;
328 val &= ~(DMA_ESC_BURST);
330 dma_write32(val, DMA_CSR);
337 /* Enable interrupts. */
338 val = dma_read32(DMA_CSR);
339 dma_write32(val | DMA_INT_ENAB, DMA_CSR);
342 static void sbus_esp_dma_drain(struct esp *esp)
347 if (esp->dma->revision == dvmahme)
350 csr = dma_read32(DMA_CSR);
351 if (!(csr & DMA_FIFO_ISDRAIN))
354 if (esp->dma->revision != dvmarev3 && esp->dma->revision != dvmaesc1)
355 dma_write32(csr | DMA_FIFO_STDRAIN, DMA_CSR);
358 while (dma_read32(DMA_CSR) & DMA_FIFO_ISDRAIN) {
360 printk(KERN_ALERT PFX "esp%d: DMA will not drain!\n",
361 esp->host->unique_id);
368 static void sbus_esp_dma_invalidate(struct esp *esp)
370 if (esp->dma->revision == dvmahme) {
371 dma_write32(DMA_RST_SCSI, DMA_CSR);
373 esp->prev_hme_dmacsr = ((esp->prev_hme_dmacsr |
374 (DMA_PARITY_OFF | DMA_2CLKS |
375 DMA_SCSI_DISAB | DMA_INT_ENAB)) &
376 ~(DMA_ST_WRITE | DMA_ENABLE));
378 dma_write32(0, DMA_CSR);
379 dma_write32(esp->prev_hme_dmacsr, DMA_CSR);
381 /* This is necessary to avoid having the SCSI channel
382 * engine lock up on us.
384 dma_write32(0, DMA_ADDR);
390 while ((val = dma_read32(DMA_CSR)) & DMA_PEND_READ) {
392 printk(KERN_ALERT PFX "esp%d: DMA will not "
393 "invalidate!\n", esp->host->unique_id);
399 val &= ~(DMA_ENABLE | DMA_ST_WRITE | DMA_BCNT_ENAB);
401 dma_write32(val, DMA_CSR);
402 val &= ~DMA_FIFO_INV;
403 dma_write32(val, DMA_CSR);
407 static void sbus_esp_send_dma_cmd(struct esp *esp, u32 addr, u32 esp_count,
408 u32 dma_count, int write, u8 cmd)
412 BUG_ON(!(cmd & ESP_CMD_DMA));
414 sbus_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
415 sbus_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
416 if (esp->rev == FASHME) {
417 sbus_esp_write8(esp, (esp_count >> 16) & 0xff, FAS_RLO);
418 sbus_esp_write8(esp, 0, FAS_RHI);
420 scsi_esp_cmd(esp, cmd);
422 csr = esp->prev_hme_dmacsr;
423 csr |= DMA_SCSI_DISAB | DMA_ENABLE;
427 csr &= ~DMA_ST_WRITE;
428 esp->prev_hme_dmacsr = csr;
430 dma_write32(dma_count, DMA_COUNT);
431 dma_write32(addr, DMA_ADDR);
432 dma_write32(csr, DMA_CSR);
434 csr = dma_read32(DMA_CSR);
439 csr &= ~DMA_ST_WRITE;
440 dma_write32(csr, DMA_CSR);
441 if (esp->dma->revision == dvmaesc1) {
442 u32 end = PAGE_ALIGN(addr + dma_count + 16U);
443 dma_write32(end - addr, DMA_COUNT);
445 dma_write32(addr, DMA_ADDR);
447 scsi_esp_cmd(esp, cmd);
452 static int sbus_esp_dma_error(struct esp *esp)
454 u32 csr = dma_read32(DMA_CSR);
456 if (csr & DMA_HNDL_ERROR)
462 static const struct esp_driver_ops sbus_esp_ops = {
463 .esp_write8 = sbus_esp_write8,
464 .esp_read8 = sbus_esp_read8,
465 .map_single = sbus_esp_map_single,
466 .map_sg = sbus_esp_map_sg,
467 .unmap_single = sbus_esp_unmap_single,
468 .unmap_sg = sbus_esp_unmap_sg,
469 .irq_pending = sbus_esp_irq_pending,
470 .reset_dma = sbus_esp_reset_dma,
471 .dma_drain = sbus_esp_dma_drain,
472 .dma_invalidate = sbus_esp_dma_invalidate,
473 .send_dma_cmd = sbus_esp_send_dma_cmd,
474 .dma_error = sbus_esp_dma_error,
477 static int __devinit esp_sbus_probe_one(struct device *dev,
478 struct sbus_dev *esp_dev,
479 struct sbus_dev *espdma,
480 struct sbus_bus *sbus,
483 struct scsi_host_template *tpnt = &scsi_esp_template;
484 struct Scsi_Host *host;
488 host = scsi_host_alloc(tpnt, sizeof(struct esp));
494 host->max_id = (hme ? 16 : 8);
495 esp = host_to_esp(host);
499 esp->ops = &sbus_esp_ops;
502 esp->flags |= ESP_FLAG_WIDE_CAPABLE;
504 err = esp_sbus_find_dma(esp, espdma);
508 err = esp_sbus_map_regs(esp, hme);
512 err = esp_sbus_map_command_block(esp);
514 goto fail_unmap_regs;
516 err = esp_sbus_register_irq(esp);
518 goto fail_unmap_command_block;
520 esp_sbus_get_props(esp, espdma);
522 /* Before we try to touch the ESP chip, ESC1 dma can
523 * come up with the reset bit set, so make sure that
526 if (esp->dma->revision == dvmaesc1) {
527 u32 val = dma_read32(DMA_CSR);
529 dma_write32(val & ~DMA_RST_SCSI, DMA_CSR);
532 dev_set_drvdata(&esp_dev->ofdev.dev, esp);
534 err = scsi_esp_register(esp, dev);
541 free_irq(host->irq, esp);
542 fail_unmap_command_block:
543 sbus_free_consistent(esp->dev, 16,
545 esp->command_block_dma);
547 sbus_iounmap(esp->regs, SBUS_ESP_REG_SIZE);
554 static int __devinit esp_sbus_probe(struct of_device *dev, const struct of_device_id *match)
556 struct sbus_dev *sdev = to_sbus_device(&dev->dev);
557 struct device_node *dp = dev->node;
558 struct sbus_dev *dma_sdev = NULL;
562 (!strcmp(dp->parent->name, "espdma") ||
563 !strcmp(dp->parent->name, "dma")))
564 dma_sdev = sdev->parent;
565 else if (!strcmp(dp->name, "SUNW,fas")) {
570 return esp_sbus_probe_one(&dev->dev, sdev, dma_sdev,
574 static int __devexit esp_sbus_remove(struct of_device *dev)
576 struct esp *esp = dev_get_drvdata(&dev->dev);
577 unsigned int irq = esp->host->irq;
580 scsi_esp_unregister(esp);
582 /* Disable interrupts. */
583 val = dma_read32(DMA_CSR);
584 dma_write32(val & ~DMA_INT_ENAB, DMA_CSR);
587 sbus_free_consistent(esp->dev, 16,
589 esp->command_block_dma);
590 sbus_iounmap(esp->regs, SBUS_ESP_REG_SIZE);
592 scsi_host_put(esp->host);
597 static struct of_device_id esp_match[] = {
609 MODULE_DEVICE_TABLE(of, esp_match);
611 static struct of_platform_driver esp_sbus_driver = {
613 .match_table = esp_match,
614 .probe = esp_sbus_probe,
615 .remove = __devexit_p(esp_sbus_remove),
618 static int __init sunesp_init(void)
620 return of_register_driver(&esp_sbus_driver, &sbus_bus_type);
623 static void __exit sunesp_exit(void)
625 of_unregister_driver(&esp_sbus_driver);
628 MODULE_DESCRIPTION("Sun ESP SCSI driver");
629 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
630 MODULE_LICENSE("GPL");
631 MODULE_VERSION(DRV_VERSION);
633 module_init(sunesp_init);
634 module_exit(sunesp_exit);