2 * pata_pdc202xx_old.c - Promise PDC202xx PATA for new ATA layer
4 * Alan Cox <alan@redhat.com>
5 * (C) 2007 Bartlomiej Zolnierkiewicz
7 * Based in part on linux/drivers/ide/pci/pdc202xx_old.c
9 * First cut with LBA48/ATAPI
12 * Channel interlock/reset on both required
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/pci.h>
18 #include <linux/init.h>
19 #include <linux/blkdev.h>
20 #include <linux/delay.h>
21 #include <scsi/scsi_host.h>
22 #include <linux/libata.h>
24 #define DRV_NAME "pata_pdc202xx_old"
25 #define DRV_VERSION "0.4.2"
27 static int pdc2026x_cable_detect(struct ata_port *ap)
29 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
32 pci_read_config_word(pdev, 0x50, &cis);
33 if (cis & (1 << (10 + ap->port_no)))
34 return ATA_CBL_PATA80;
35 return ATA_CBL_PATA40;
39 * pdc202xx_configure_piomode - set chip PIO timing
44 * Called to do the PIO mode setup. Our timing registers are shared
45 * so a configure_dmamode call will undo any work we do here and vice
49 static void pdc202xx_configure_piomode(struct ata_port *ap, struct ata_device *adev, int pio)
51 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
52 int port = 0x60 + 8 * ap->port_no + 4 * adev->devno;
53 static u16 pio_timing[5] = {
54 0x0913, 0x050C , 0x0308, 0x0206, 0x0104
58 pci_read_config_byte(pdev, port, &r_ap);
59 pci_read_config_byte(pdev, port + 1, &r_bp);
60 r_ap &= ~0x3F; /* Preserve ERRDY_EN, SYNC_IN */
62 r_ap |= (pio_timing[pio] >> 8);
63 r_bp |= (pio_timing[pio] & 0xFF);
65 if (ata_pio_need_iordy(adev))
66 r_ap |= 0x20; /* IORDY enable */
67 if (adev->class == ATA_DEV_ATA)
68 r_ap |= 0x10; /* FIFO enable */
69 pci_write_config_byte(pdev, port, r_ap);
70 pci_write_config_byte(pdev, port + 1, r_bp);
74 * pdc202xx_set_piomode - set initial PIO mode data
78 * Called to do the PIO mode setup. Our timing registers are shared
79 * but we want to set the PIO timing by default.
82 static void pdc202xx_set_piomode(struct ata_port *ap, struct ata_device *adev)
84 pdc202xx_configure_piomode(ap, adev, adev->pio_mode - XFER_PIO_0);
88 * pdc202xx_configure_dmamode - set DMA mode in chip
92 * Load DMA cycle times into the chip ready for a DMA transfer
96 static void pdc202xx_set_dmamode(struct ata_port *ap, struct ata_device *adev)
98 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
99 int port = 0x60 + 8 * ap->port_no + 4 * adev->devno;
100 static u8 udma_timing[6][2] = {
101 { 0x60, 0x03 }, /* 33 Mhz Clock */
104 { 0x40, 0x02 }, /* 66 Mhz Clock */
108 static u8 mdma_timing[3][2] = {
115 pci_read_config_byte(pdev, port + 1, &r_bp);
116 pci_read_config_byte(pdev, port + 2, &r_cp);
121 if (adev->dma_mode >= XFER_UDMA_0) {
122 int speed = adev->dma_mode - XFER_UDMA_0;
123 r_bp |= udma_timing[speed][0];
124 r_cp |= udma_timing[speed][1];
127 int speed = adev->dma_mode - XFER_MW_DMA_0;
128 r_bp |= mdma_timing[speed][0];
129 r_cp |= mdma_timing[speed][1];
131 pci_write_config_byte(pdev, port + 1, r_bp);
132 pci_write_config_byte(pdev, port + 2, r_cp);
137 * pdc2026x_bmdma_start - DMA engine begin
140 * In UDMA3 or higher we have to clock switch for the duration of the
141 * DMA transfer sequence.
144 static void pdc2026x_bmdma_start(struct ata_queued_cmd *qc)
146 struct ata_port *ap = qc->ap;
147 struct ata_device *adev = qc->dev;
148 struct ata_taskfile *tf = &qc->tf;
149 int sel66 = ap->port_no ? 0x08: 0x02;
151 void __iomem *master = ap->host->ports[0]->ioaddr.bmdma_addr;
152 void __iomem *clock = master + 0x11;
153 void __iomem *atapi_reg = master + 0x20 + (4 * ap->port_no);
157 /* Check we keep host level locking here */
158 if (adev->dma_mode >= XFER_UDMA_2)
159 iowrite8(ioread8(clock) | sel66, clock);
161 iowrite8(ioread8(clock) & ~sel66, clock);
163 /* The DMA clocks may have been trashed by a reset. FIXME: make conditional
164 and move to qc_issue ? */
165 pdc202xx_set_dmamode(ap, qc->dev);
167 /* Cases the state machine will not complete correctly without help */
168 if ((tf->flags & ATA_TFLAG_LBA48) || tf->protocol == ATA_PROT_ATAPI_DMA)
170 len = qc->nbytes / 2;
172 if (tf->flags & ATA_TFLAG_WRITE)
177 iowrite32(len, atapi_reg);
185 * pdc2026x_bmdma_end - DMA engine stop
188 * After a DMA completes we need to put the clock back to 33MHz for
192 static void pdc2026x_bmdma_stop(struct ata_queued_cmd *qc)
194 struct ata_port *ap = qc->ap;
195 struct ata_device *adev = qc->dev;
196 struct ata_taskfile *tf = &qc->tf;
198 int sel66 = ap->port_no ? 0x08: 0x02;
199 /* The clock bits are in the same register for both channels */
200 void __iomem *master = ap->host->ports[0]->ioaddr.bmdma_addr;
201 void __iomem *clock = master + 0x11;
202 void __iomem *atapi_reg = master + 0x20 + (4 * ap->port_no);
204 /* Cases the state machine will not complete correctly */
205 if (tf->protocol == ATA_PROT_ATAPI_DMA || ( tf->flags & ATA_TFLAG_LBA48)) {
206 iowrite32(0, atapi_reg);
207 iowrite8(ioread8(clock) & ~sel66, clock);
209 /* Check we keep host level locking here */
210 /* Flip back to 33Mhz for PIO */
211 if (adev->dma_mode >= XFER_UDMA_2)
212 iowrite8(ioread8(clock) & ~sel66, clock);
218 * pdc2026x_dev_config - device setup hook
219 * @adev: newly found device
221 * Perform chip specific early setup. We need to lock the transfer
222 * sizes to 8bit to avoid making the state engine on the 2026x cards
226 static void pdc2026x_dev_config(struct ata_device *adev)
228 adev->max_sectors = 256;
231 static struct scsi_host_template pdc202xx_sht = {
232 .module = THIS_MODULE,
234 .ioctl = ata_scsi_ioctl,
235 .queuecommand = ata_scsi_queuecmd,
236 .can_queue = ATA_DEF_QUEUE,
237 .this_id = ATA_SHT_THIS_ID,
238 .sg_tablesize = LIBATA_MAX_PRD,
239 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
240 .emulated = ATA_SHT_EMULATED,
241 .use_clustering = ATA_SHT_USE_CLUSTERING,
242 .proc_name = DRV_NAME,
243 .dma_boundary = ATA_DMA_BOUNDARY,
244 .slave_configure = ata_scsi_slave_config,
245 .slave_destroy = ata_scsi_slave_destroy,
246 .bios_param = ata_std_bios_param,
249 static struct ata_port_operations pdc2024x_port_ops = {
250 .port_disable = ata_port_disable,
251 .set_piomode = pdc202xx_set_piomode,
252 .set_dmamode = pdc202xx_set_dmamode,
253 .mode_filter = ata_pci_default_filter,
254 .tf_load = ata_tf_load,
255 .tf_read = ata_tf_read,
256 .check_status = ata_check_status,
257 .exec_command = ata_exec_command,
258 .dev_select = ata_std_dev_select,
260 .freeze = ata_bmdma_freeze,
261 .thaw = ata_bmdma_thaw,
262 .error_handler = ata_bmdma_error_handler,
263 .post_internal_cmd = ata_bmdma_post_internal_cmd,
264 .cable_detect = ata_cable_40wire,
266 .bmdma_setup = ata_bmdma_setup,
267 .bmdma_start = ata_bmdma_start,
268 .bmdma_stop = ata_bmdma_stop,
269 .bmdma_status = ata_bmdma_status,
271 .qc_prep = ata_qc_prep,
272 .qc_issue = ata_qc_issue_prot,
273 .data_xfer = ata_data_xfer,
275 .irq_handler = ata_interrupt,
276 .irq_clear = ata_bmdma_irq_clear,
277 .irq_on = ata_irq_on,
278 .irq_ack = ata_irq_ack,
280 .port_start = ata_port_start,
283 static struct ata_port_operations pdc2026x_port_ops = {
284 .port_disable = ata_port_disable,
285 .set_piomode = pdc202xx_set_piomode,
286 .set_dmamode = pdc202xx_set_dmamode,
287 .mode_filter = ata_pci_default_filter,
288 .tf_load = ata_tf_load,
289 .tf_read = ata_tf_read,
290 .check_status = ata_check_status,
291 .exec_command = ata_exec_command,
292 .dev_select = ata_std_dev_select,
293 .dev_config = pdc2026x_dev_config,
295 .freeze = ata_bmdma_freeze,
296 .thaw = ata_bmdma_thaw,
297 .error_handler = ata_bmdma_error_handler,
298 .post_internal_cmd = ata_bmdma_post_internal_cmd,
299 .cable_detect = pdc2026x_cable_detect,
301 .bmdma_setup = ata_bmdma_setup,
302 .bmdma_start = pdc2026x_bmdma_start,
303 .bmdma_stop = pdc2026x_bmdma_stop,
304 .bmdma_status = ata_bmdma_status,
306 .qc_prep = ata_qc_prep,
307 .qc_issue = ata_qc_issue_prot,
308 .data_xfer = ata_data_xfer,
310 .irq_handler = ata_interrupt,
311 .irq_clear = ata_bmdma_irq_clear,
312 .irq_on = ata_irq_on,
313 .irq_ack = ata_irq_ack,
315 .port_start = ata_port_start,
318 static int pdc202xx_init_one(struct pci_dev *dev, const struct pci_device_id *id)
320 static struct ata_port_info info[3] = {
322 .sht = &pdc202xx_sht,
323 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
326 .udma_mask = ATA_UDMA2,
327 .port_ops = &pdc2024x_port_ops
330 .sht = &pdc202xx_sht,
331 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
334 .udma_mask = ATA_UDMA4,
335 .port_ops = &pdc2026x_port_ops
338 .sht = &pdc202xx_sht,
339 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
342 .udma_mask = ATA_UDMA5,
343 .port_ops = &pdc2026x_port_ops
347 static struct ata_port_info *port_info[2];
349 port_info[0] = port_info[1] = &info[id->driver_data];
351 if (dev->device == PCI_DEVICE_ID_PROMISE_20265) {
352 struct pci_dev *bridge = dev->bus->self;
353 /* Don't grab anything behind a Promise I2O RAID */
354 if (bridge && bridge->vendor == PCI_VENDOR_ID_INTEL) {
355 if( bridge->device == PCI_DEVICE_ID_INTEL_I960)
357 if( bridge->device == PCI_DEVICE_ID_INTEL_I960RM)
361 return ata_pci_init_one(dev, port_info, 2);
364 static const struct pci_device_id pdc202xx[] = {
365 { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20246), 0 },
366 { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20262), 1 },
367 { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20263), 1 },
368 { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20265), 2 },
369 { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20267), 2 },
374 static struct pci_driver pdc202xx_pci_driver = {
376 .id_table = pdc202xx,
377 .probe = pdc202xx_init_one,
378 .remove = ata_pci_remove_one,
380 .suspend = ata_pci_device_suspend,
381 .resume = ata_pci_device_resume,
385 static int __init pdc202xx_init(void)
387 return pci_register_driver(&pdc202xx_pci_driver);
390 static void __exit pdc202xx_exit(void)
392 pci_unregister_driver(&pdc202xx_pci_driver);
395 MODULE_AUTHOR("Alan Cox");
396 MODULE_DESCRIPTION("low-level driver for Promise 2024x and 20262-20267");
397 MODULE_LICENSE("GPL");
398 MODULE_DEVICE_TABLE(pci, pdc202xx);
399 MODULE_VERSION(DRV_VERSION);
401 module_init(pdc202xx_init);
402 module_exit(pdc202xx_exit);