2 * sata_svw.c - ServerWorks / Apple K2 SATA
4 * Maintained by: Benjamin Herrenschmidt <benh@kernel.crashing.org> and
5 * Jeff Garzik <jgarzik@pobox.com>
6 * Please ALWAYS copy linux-ide@vger.kernel.org
9 * Copyright 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
11 * Bits from Jeff Garzik, Copyright RedHat, Inc.
13 * This driver probably works with non-Apple versions of the
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by
19 * the Free Software Foundation; either version 2, or (at your option)
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
27 * You should have received a copy of the GNU General Public License
28 * along with this program; see the file COPYING. If not, write to
29 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
32 * libata documentation is available via 'make {ps|pdf}docs',
33 * as Documentation/DocBook/libata.*
35 * Hardware documentation available under NDA.
39 #include <linux/kernel.h>
40 #include <linux/module.h>
41 #include <linux/pci.h>
42 #include <linux/init.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/interrupt.h>
46 #include <linux/device.h>
47 #include <scsi/scsi_host.h>
48 #include <linux/libata.h>
52 #include <asm/pci-bridge.h>
53 #endif /* CONFIG_PPC_OF */
55 #define DRV_NAME "sata_svw"
56 #define DRV_VERSION "2.0"
59 /* Taskfile registers offsets */
60 K2_SATA_TF_CMD_OFFSET = 0x00,
61 K2_SATA_TF_DATA_OFFSET = 0x00,
62 K2_SATA_TF_ERROR_OFFSET = 0x04,
63 K2_SATA_TF_NSECT_OFFSET = 0x08,
64 K2_SATA_TF_LBAL_OFFSET = 0x0c,
65 K2_SATA_TF_LBAM_OFFSET = 0x10,
66 K2_SATA_TF_LBAH_OFFSET = 0x14,
67 K2_SATA_TF_DEVICE_OFFSET = 0x18,
68 K2_SATA_TF_CMDSTAT_OFFSET = 0x1c,
69 K2_SATA_TF_CTL_OFFSET = 0x20,
72 K2_SATA_DMA_CMD_OFFSET = 0x30,
75 K2_SATA_SCR_STATUS_OFFSET = 0x40,
76 K2_SATA_SCR_ERROR_OFFSET = 0x44,
77 K2_SATA_SCR_CONTROL_OFFSET = 0x48,
80 K2_SATA_SICR1_OFFSET = 0x80,
81 K2_SATA_SICR2_OFFSET = 0x84,
82 K2_SATA_SIM_OFFSET = 0x88,
85 K2_SATA_PORT_OFFSET = 0x100,
88 static u8 k2_stat_check_status(struct ata_port *ap);
91 static u32 k2_sata_scr_read (struct ata_port *ap, unsigned int sc_reg)
93 if (sc_reg > SCR_CONTROL)
95 return readl((void *) ap->ioaddr.scr_addr + (sc_reg * 4));
99 static void k2_sata_scr_write (struct ata_port *ap, unsigned int sc_reg,
102 if (sc_reg > SCR_CONTROL)
104 writel(val, (void *) ap->ioaddr.scr_addr + (sc_reg * 4));
108 static void k2_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
110 struct ata_ioports *ioaddr = &ap->ioaddr;
111 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
113 if (tf->ctl != ap->last_ctl) {
114 writeb(tf->ctl, ioaddr->ctl_addr);
115 ap->last_ctl = tf->ctl;
118 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
119 writew(tf->feature | (((u16)tf->hob_feature) << 8), ioaddr->feature_addr);
120 writew(tf->nsect | (((u16)tf->hob_nsect) << 8), ioaddr->nsect_addr);
121 writew(tf->lbal | (((u16)tf->hob_lbal) << 8), ioaddr->lbal_addr);
122 writew(tf->lbam | (((u16)tf->hob_lbam) << 8), ioaddr->lbam_addr);
123 writew(tf->lbah | (((u16)tf->hob_lbah) << 8), ioaddr->lbah_addr);
124 } else if (is_addr) {
125 writew(tf->feature, ioaddr->feature_addr);
126 writew(tf->nsect, ioaddr->nsect_addr);
127 writew(tf->lbal, ioaddr->lbal_addr);
128 writew(tf->lbam, ioaddr->lbam_addr);
129 writew(tf->lbah, ioaddr->lbah_addr);
132 if (tf->flags & ATA_TFLAG_DEVICE)
133 writeb(tf->device, ioaddr->device_addr);
139 static void k2_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
141 struct ata_ioports *ioaddr = &ap->ioaddr;
142 u16 nsect, lbal, lbam, lbah, feature;
144 tf->command = k2_stat_check_status(ap);
145 tf->device = readw(ioaddr->device_addr);
146 feature = readw(ioaddr->error_addr);
147 nsect = readw(ioaddr->nsect_addr);
148 lbal = readw(ioaddr->lbal_addr);
149 lbam = readw(ioaddr->lbam_addr);
150 lbah = readw(ioaddr->lbah_addr);
152 tf->feature = feature;
158 if (tf->flags & ATA_TFLAG_LBA48) {
159 tf->hob_feature = feature >> 8;
160 tf->hob_nsect = nsect >> 8;
161 tf->hob_lbal = lbal >> 8;
162 tf->hob_lbam = lbam >> 8;
163 tf->hob_lbah = lbah >> 8;
168 * k2_bmdma_setup_mmio - Set up PCI IDE BMDMA transaction (MMIO)
169 * @qc: Info associated with this ATA transaction.
172 * spin_lock_irqsave(host_set lock)
175 static void k2_bmdma_setup_mmio (struct ata_queued_cmd *qc)
177 struct ata_port *ap = qc->ap;
178 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
180 void *mmio = (void *) ap->ioaddr.bmdma_addr;
181 /* load PRD table addr. */
182 mb(); /* make sure PRD table writes are visible to controller */
183 writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS);
185 /* specify data direction, triple-check start bit is clear */
186 dmactl = readb(mmio + ATA_DMA_CMD);
187 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
189 dmactl |= ATA_DMA_WR;
190 writeb(dmactl, mmio + ATA_DMA_CMD);
192 /* issue r/w command if this is not a ATA DMA command*/
193 if (qc->tf.protocol != ATA_PROT_DMA)
194 ap->ops->exec_command(ap, &qc->tf);
198 * k2_bmdma_start_mmio - Start a PCI IDE BMDMA transaction (MMIO)
199 * @qc: Info associated with this ATA transaction.
202 * spin_lock_irqsave(host_set lock)
205 static void k2_bmdma_start_mmio (struct ata_queued_cmd *qc)
207 struct ata_port *ap = qc->ap;
208 void *mmio = (void *) ap->ioaddr.bmdma_addr;
211 /* start host DMA transaction */
212 dmactl = readb(mmio + ATA_DMA_CMD);
213 writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD);
214 /* There is a race condition in certain SATA controllers that can
215 be seen when the r/w command is given to the controller before the
216 host DMA is started. On a Read command, the controller would initiate
217 the command to the drive even before it sees the DMA start. When there
218 are very fast drives connected to the controller, or when the data request
219 hits in the drive cache, there is the possibility that the drive returns a part
220 or all of the requested data to the controller before the DMA start is issued.
221 In this case, the controller would become confused as to what to do with the data.
222 In the worst case when all the data is returned back to the controller, the
223 controller could hang. In other cases it could return partial data returning
224 in data corruption. This problem has been seen in PPC systems and can also appear
225 on an system with very fast disks, where the SATA controller is sitting behind a
226 number of bridges, and hence there is significant latency between the r/w command
227 and the start command. */
228 /* issue r/w command if the access is to ATA*/
229 if (qc->tf.protocol == ATA_PROT_DMA)
230 ap->ops->exec_command(ap, &qc->tf);
234 static u8 k2_stat_check_status(struct ata_port *ap)
236 return readl((void *) ap->ioaddr.status_addr);
242 * inout : decides on the direction of the dataflow and the meaning of the
244 * buffer: If inout==FALSE data is being written to it else read from it
245 * *start: If inout==FALSE start of the valid data in the buffer
246 * offset: If inout==FALSE offset from the beginning of the imaginary file
247 * from which we start writing into the buffer
248 * length: If inout==FALSE max number of bytes to be written into the buffer
249 * else number of bytes in the buffer
251 static int k2_sata_proc_info(struct Scsi_Host *shost, char *page, char **start,
252 off_t offset, int count, int inout)
255 struct device_node *np;
258 /* Find the ata_port */
259 ap = ata_shost_to_port(shost);
263 /* Find the OF node for the PCI device proper */
264 np = pci_device_to_OF_node(to_pci_dev(ap->host_set->dev));
268 /* Match it to a port node */
269 index = (ap == ap->host_set->ports[0]) ? 0 : 1;
270 for (np = np->child; np != NULL; np = np->sibling) {
271 u32 *reg = (u32 *)get_property(np, "reg", NULL);
280 len = sprintf(page, "devspec: %s\n", np->full_name);
284 #endif /* CONFIG_PPC_OF */
287 static struct scsi_host_template k2_sata_sht = {
288 .module = THIS_MODULE,
290 .ioctl = ata_scsi_ioctl,
291 .queuecommand = ata_scsi_queuecmd,
292 .can_queue = ATA_DEF_QUEUE,
293 .this_id = ATA_SHT_THIS_ID,
294 .sg_tablesize = LIBATA_MAX_PRD,
295 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
296 .emulated = ATA_SHT_EMULATED,
297 .use_clustering = ATA_SHT_USE_CLUSTERING,
298 .proc_name = DRV_NAME,
299 .dma_boundary = ATA_DMA_BOUNDARY,
300 .slave_configure = ata_scsi_slave_config,
301 .slave_destroy = ata_scsi_slave_destroy,
303 .proc_info = k2_sata_proc_info,
305 .bios_param = ata_std_bios_param,
309 static const struct ata_port_operations k2_sata_ops = {
310 .port_disable = ata_port_disable,
311 .tf_load = k2_sata_tf_load,
312 .tf_read = k2_sata_tf_read,
313 .check_status = k2_stat_check_status,
314 .exec_command = ata_exec_command,
315 .dev_select = ata_std_dev_select,
316 .bmdma_setup = k2_bmdma_setup_mmio,
317 .bmdma_start = k2_bmdma_start_mmio,
318 .bmdma_stop = ata_bmdma_stop,
319 .bmdma_status = ata_bmdma_status,
320 .qc_prep = ata_qc_prep,
321 .qc_issue = ata_qc_issue_prot,
322 .data_xfer = ata_mmio_data_xfer,
323 .freeze = ata_bmdma_freeze,
324 .thaw = ata_bmdma_thaw,
325 .error_handler = ata_bmdma_error_handler,
326 .post_internal_cmd = ata_bmdma_post_internal_cmd,
327 .irq_handler = ata_interrupt,
328 .irq_clear = ata_bmdma_irq_clear,
329 .scr_read = k2_sata_scr_read,
330 .scr_write = k2_sata_scr_write,
331 .port_start = ata_port_start,
332 .port_stop = ata_port_stop,
333 .host_stop = ata_pci_host_stop,
336 static void k2_sata_setup_port(struct ata_ioports *port, unsigned long base)
338 port->cmd_addr = base + K2_SATA_TF_CMD_OFFSET;
339 port->data_addr = base + K2_SATA_TF_DATA_OFFSET;
341 port->error_addr = base + K2_SATA_TF_ERROR_OFFSET;
342 port->nsect_addr = base + K2_SATA_TF_NSECT_OFFSET;
343 port->lbal_addr = base + K2_SATA_TF_LBAL_OFFSET;
344 port->lbam_addr = base + K2_SATA_TF_LBAM_OFFSET;
345 port->lbah_addr = base + K2_SATA_TF_LBAH_OFFSET;
346 port->device_addr = base + K2_SATA_TF_DEVICE_OFFSET;
348 port->status_addr = base + K2_SATA_TF_CMDSTAT_OFFSET;
349 port->altstatus_addr =
350 port->ctl_addr = base + K2_SATA_TF_CTL_OFFSET;
351 port->bmdma_addr = base + K2_SATA_DMA_CMD_OFFSET;
352 port->scr_addr = base + K2_SATA_SCR_STATUS_OFFSET;
356 static int k2_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
358 static int printed_version;
359 struct ata_probe_ent *probe_ent = NULL;
361 void __iomem *mmio_base;
362 int pci_dev_busy = 0;
366 if (!printed_version++)
367 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
370 * If this driver happens to only be useful on Apple's K2, then
371 * we should check that here as it has a normal Serverworks ID
373 rc = pci_enable_device(pdev);
377 * Check if we have resources mapped at all (second function may
378 * have been disabled by firmware)
380 if (pci_resource_len(pdev, 5) == 0)
383 /* Request PCI regions */
384 rc = pci_request_regions(pdev, DRV_NAME);
390 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
392 goto err_out_regions;
393 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
395 goto err_out_regions;
397 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
398 if (probe_ent == NULL) {
400 goto err_out_regions;
403 memset(probe_ent, 0, sizeof(*probe_ent));
404 probe_ent->dev = pci_dev_to_dev(pdev);
405 INIT_LIST_HEAD(&probe_ent->node);
407 mmio_base = pci_iomap(pdev, 5, 0);
408 if (mmio_base == NULL) {
410 goto err_out_free_ent;
412 base = (unsigned long) mmio_base;
414 /* Clear a magic bit in SCR1 according to Darwin, those help
415 * some funky seagate drives (though so far, those were already
416 * set by the firmware on the machines I had access to)
418 writel(readl(mmio_base + K2_SATA_SICR1_OFFSET) & ~0x00040000,
419 mmio_base + K2_SATA_SICR1_OFFSET);
421 /* Clear SATA error & interrupts we don't use */
422 writel(0xffffffff, mmio_base + K2_SATA_SCR_ERROR_OFFSET);
423 writel(0x0, mmio_base + K2_SATA_SIM_OFFSET);
425 probe_ent->sht = &k2_sata_sht;
426 probe_ent->host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
428 probe_ent->port_ops = &k2_sata_ops;
429 probe_ent->n_ports = 4;
430 probe_ent->irq = pdev->irq;
431 probe_ent->irq_flags = IRQF_SHARED;
432 probe_ent->mmio_base = mmio_base;
434 /* We don't care much about the PIO/UDMA masks, but the core won't like us
435 * if we don't fill these
437 probe_ent->pio_mask = 0x1f;
438 probe_ent->mwdma_mask = 0x7;
439 probe_ent->udma_mask = 0x7f;
441 /* different controllers have different number of ports - currently 4 or 8 */
442 /* All ports are on the same function. Multi-function device is no
443 * longer available. This should not be seen in any system. */
444 for (i = 0; i < ent->driver_data; i++)
445 k2_sata_setup_port(&probe_ent->port[i], base + i * K2_SATA_PORT_OFFSET);
447 pci_set_master(pdev);
449 /* FIXME: check ata_device_add return value */
450 ata_device_add(probe_ent);
458 pci_release_regions(pdev);
461 pci_disable_device(pdev);
465 /* 0x240 is device ID for Apple K2 device
466 * 0x241 is device ID for Serverworks Frodo4
467 * 0x242 is device ID for Serverworks Frodo8
468 * 0x24a is device ID for BCM5785 (aka HT1000) HT southbridge integrated SATA
471 static const struct pci_device_id k2_sata_pci_tbl[] = {
472 { 0x1166, 0x0240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
473 { 0x1166, 0x0241, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
474 { 0x1166, 0x0242, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
475 { 0x1166, 0x024a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
476 { 0x1166, 0x024b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
481 static struct pci_driver k2_sata_pci_driver = {
483 .id_table = k2_sata_pci_tbl,
484 .probe = k2_sata_init_one,
485 .remove = ata_pci_remove_one,
489 static int __init k2_sata_init(void)
491 return pci_module_init(&k2_sata_pci_driver);
495 static void __exit k2_sata_exit(void)
497 pci_unregister_driver(&k2_sata_pci_driver);
501 MODULE_AUTHOR("Benjamin Herrenschmidt");
502 MODULE_DESCRIPTION("low-level driver for K2 SATA controller");
503 MODULE_LICENSE("GPL");
504 MODULE_DEVICE_TABLE(pci, k2_sata_pci_tbl);
505 MODULE_VERSION(DRV_VERSION);
507 module_init(k2_sata_init);
508 module_exit(k2_sata_exit);