2 * pata_amd.c - AMD PATA for new ATA layer
3 * (C) 2005-2006 Red Hat Inc
4 * Alan Cox <alan@redhat.com>
6 * Based on pata-sil680. Errata information is taken from data sheets
7 * and the amd74xx.c driver by Vojtech Pavlik. Nvidia SATA devices are
8 * claimed by sata-nv.c.
11 * Variable system clock when/if it makes sense
12 * Power management on ports
15 * Documentation publically available.
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/pci.h>
21 #include <linux/init.h>
22 #include <linux/blkdev.h>
23 #include <linux/delay.h>
24 #include <scsi/scsi_host.h>
25 #include <linux/libata.h>
27 #define DRV_NAME "pata_amd"
28 #define DRV_VERSION "0.2.7"
31 * timing_setup - shared timing computation and load
32 * @ap: ATA port being set up
33 * @adev: drive being configured
34 * @offset: port offset
35 * @speed: target speed
36 * @clock: clock multiplier (number of times 33MHz for this part)
38 * Perform the actual timing set up for Nvidia or AMD PATA devices.
39 * The actual devices vary so they all call into this helper function
40 * providing the clock multipler and offset (because AMD and Nvidia put
41 * the ports at different locations).
44 static void timing_setup(struct ata_port *ap, struct ata_device *adev, int offset, int speed, int clock)
46 static const unsigned char amd_cyc2udma[] = {
47 6, 6, 5, 4, 0, 1, 1, 2, 2, 3, 3, 3, 3, 3, 3, 7
50 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
51 struct ata_device *peer = ata_dev_pair(adev);
52 int dn = ap->port_no * 2 + adev->devno;
53 struct ata_timing at, apeer;
55 const int amd_clock = 33333; /* KHz. */
58 T = 1000000000 / amd_clock;
59 UT = T / min_t(int, max_t(int, clock, 1), 2);
61 if (ata_timing_compute(adev, speed, &at, T, UT) < 0) {
62 dev_printk(KERN_ERR, &pdev->dev, "unknown mode %d.\n", speed);
67 /* This may be over conservative */
69 ata_timing_compute(peer, peer->dma_mode, &apeer, T, UT);
70 ata_timing_merge(&apeer, &at, &at, ATA_TIMING_8BIT);
72 ata_timing_compute(peer, peer->pio_mode, &apeer, T, UT);
73 ata_timing_merge(&apeer, &at, &at, ATA_TIMING_8BIT);
76 if (speed == XFER_UDMA_5 && amd_clock <= 33333) at.udma = 1;
77 if (speed == XFER_UDMA_6 && amd_clock <= 33333) at.udma = 15;
80 * Now do the setup work
83 /* Configure the address set up timing */
84 pci_read_config_byte(pdev, offset + 0x0C, &t);
85 t = (t & ~(3 << ((3 - dn) << 1))) | ((FIT(at.setup, 1, 4) - 1) << ((3 - dn) << 1));
86 pci_write_config_byte(pdev, offset + 0x0C , t);
88 /* Configure the 8bit I/O timing */
89 pci_write_config_byte(pdev, offset + 0x0E + (1 - (dn >> 1)),
90 ((FIT(at.act8b, 1, 16) - 1) << 4) | (FIT(at.rec8b, 1, 16) - 1));
93 pci_write_config_byte(pdev, offset + 0x08 + (3 - dn),
94 ((FIT(at.active, 1, 16) - 1) << 4) | (FIT(at.recover, 1, 16) - 1));
98 t = at.udma ? (0xc0 | (FIT(at.udma, 2, 5) - 2)) : 0x03;
102 t = at.udma ? (0xc0 | amd_cyc2udma[FIT(at.udma, 2, 10)]) : 0x03;
106 t = at.udma ? (0xc0 | amd_cyc2udma[FIT(at.udma, 1, 10)]) : 0x03;
110 t = at.udma ? (0xc0 | amd_cyc2udma[FIT(at.udma, 1, 15)]) : 0x03;
118 pci_write_config_byte(pdev, offset + 0x10 + (3 - dn), t);
122 * amd_probe_init - cable detection
125 * Perform cable detection. The BIOS stores this in PCI config
129 static int amd_pre_reset(struct ata_port *ap)
131 static const u32 bitmask[2] = {0x03, 0xC0};
132 static const struct pci_bits amd_enable_bits[] = {
133 { 0x40, 1, 0x02, 0x02 },
134 { 0x40, 1, 0x01, 0x01 }
137 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
140 if (!pci_test_config_bits(pdev, &amd_enable_bits[ap->port_no]))
143 pci_read_config_byte(pdev, 0x42, &ata66);
144 if (ata66 & bitmask[ap->port_no])
145 ap->cbl = ATA_CBL_PATA80;
147 ap->cbl = ATA_CBL_PATA40;
148 return ata_std_prereset(ap);
152 static void amd_error_handler(struct ata_port *ap)
154 return ata_bmdma_drive_eh(ap, amd_pre_reset,
155 ata_std_softreset, NULL,
159 static int amd_early_pre_reset(struct ata_port *ap)
161 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
162 static struct pci_bits amd_enable_bits[] = {
163 { 0x40, 1, 0x02, 0x02 },
164 { 0x40, 1, 0x01, 0x01 }
167 if (!pci_test_config_bits(pdev, &amd_enable_bits[ap->port_no]))
170 /* No host side cable detection */
171 ap->cbl = ATA_CBL_PATA80;
172 return ata_std_prereset(ap);
176 static void amd_early_error_handler(struct ata_port *ap)
178 ata_bmdma_drive_eh(ap, amd_early_pre_reset,
179 ata_std_softreset, NULL,
184 * amd33_set_piomode - set initial PIO mode data
188 * Program the AMD registers for PIO mode.
191 static void amd33_set_piomode(struct ata_port *ap, struct ata_device *adev)
193 timing_setup(ap, adev, 0x40, adev->pio_mode, 1);
196 static void amd66_set_piomode(struct ata_port *ap, struct ata_device *adev)
198 timing_setup(ap, adev, 0x40, adev->pio_mode, 2);
201 static void amd100_set_piomode(struct ata_port *ap, struct ata_device *adev)
203 timing_setup(ap, adev, 0x40, adev->pio_mode, 3);
206 static void amd133_set_piomode(struct ata_port *ap, struct ata_device *adev)
208 timing_setup(ap, adev, 0x40, adev->pio_mode, 4);
212 * amd33_set_dmamode - set initial DMA mode data
216 * Program the MWDMA/UDMA modes for the AMD and Nvidia
220 static void amd33_set_dmamode(struct ata_port *ap, struct ata_device *adev)
222 timing_setup(ap, adev, 0x40, adev->dma_mode, 1);
225 static void amd66_set_dmamode(struct ata_port *ap, struct ata_device *adev)
227 timing_setup(ap, adev, 0x40, adev->dma_mode, 2);
230 static void amd100_set_dmamode(struct ata_port *ap, struct ata_device *adev)
232 timing_setup(ap, adev, 0x40, adev->dma_mode, 3);
235 static void amd133_set_dmamode(struct ata_port *ap, struct ata_device *adev)
237 timing_setup(ap, adev, 0x40, adev->dma_mode, 4);
242 * nv_probe_init - cable detection
245 * Perform cable detection. The BIOS stores this in PCI config
249 static int nv_pre_reset(struct ata_port *ap) {
250 static const u8 bitmask[2] = {0x03, 0xC0};
251 static const struct pci_bits nv_enable_bits[] = {
252 { 0x50, 1, 0x02, 0x02 },
253 { 0x50, 1, 0x01, 0x01 }
256 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
260 if (!pci_test_config_bits(pdev, &nv_enable_bits[ap->port_no]))
263 pci_read_config_byte(pdev, 0x52, &ata66);
264 if (ata66 & bitmask[ap->port_no])
265 ap->cbl = ATA_CBL_PATA80;
267 ap->cbl = ATA_CBL_PATA40;
269 /* We now have to double check because the Nvidia boxes BIOS
270 doesn't always set the cable bits but does set mode bits */
272 pci_read_config_word(pdev, 0x62 - 2 * ap->port_no, &udma);
273 if ((udma & 0xC4) == 0xC4 || (udma & 0xC400) == 0xC400)
274 ap->cbl = ATA_CBL_PATA80;
275 return ata_std_prereset(ap);
278 static void nv_error_handler(struct ata_port *ap)
280 ata_bmdma_drive_eh(ap, nv_pre_reset,
281 ata_std_softreset, NULL,
285 * nv100_set_piomode - set initial PIO mode data
289 * Program the AMD registers for PIO mode.
292 static void nv100_set_piomode(struct ata_port *ap, struct ata_device *adev)
294 timing_setup(ap, adev, 0x50, adev->pio_mode, 3);
297 static void nv133_set_piomode(struct ata_port *ap, struct ata_device *adev)
299 timing_setup(ap, adev, 0x50, adev->pio_mode, 4);
303 * nv100_set_dmamode - set initial DMA mode data
307 * Program the MWDMA/UDMA modes for the AMD and Nvidia
311 static void nv100_set_dmamode(struct ata_port *ap, struct ata_device *adev)
313 timing_setup(ap, adev, 0x50, adev->dma_mode, 3);
316 static void nv133_set_dmamode(struct ata_port *ap, struct ata_device *adev)
318 timing_setup(ap, adev, 0x50, adev->dma_mode, 4);
321 static struct scsi_host_template amd_sht = {
322 .module = THIS_MODULE,
324 .ioctl = ata_scsi_ioctl,
325 .queuecommand = ata_scsi_queuecmd,
326 .can_queue = ATA_DEF_QUEUE,
327 .this_id = ATA_SHT_THIS_ID,
328 .sg_tablesize = LIBATA_MAX_PRD,
329 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
330 .emulated = ATA_SHT_EMULATED,
331 .use_clustering = ATA_SHT_USE_CLUSTERING,
332 .proc_name = DRV_NAME,
333 .dma_boundary = ATA_DMA_BOUNDARY,
334 .slave_configure = ata_scsi_slave_config,
335 .slave_destroy = ata_scsi_slave_destroy,
336 .bios_param = ata_std_bios_param,
337 .resume = ata_scsi_device_resume,
338 .suspend = ata_scsi_device_suspend,
341 static struct ata_port_operations amd33_port_ops = {
342 .port_disable = ata_port_disable,
343 .set_piomode = amd33_set_piomode,
344 .set_dmamode = amd33_set_dmamode,
345 .mode_filter = ata_pci_default_filter,
346 .tf_load = ata_tf_load,
347 .tf_read = ata_tf_read,
348 .check_status = ata_check_status,
349 .exec_command = ata_exec_command,
350 .dev_select = ata_std_dev_select,
352 .freeze = ata_bmdma_freeze,
353 .thaw = ata_bmdma_thaw,
354 .error_handler = amd_early_error_handler,
355 .post_internal_cmd = ata_bmdma_post_internal_cmd,
357 .bmdma_setup = ata_bmdma_setup,
358 .bmdma_start = ata_bmdma_start,
359 .bmdma_stop = ata_bmdma_stop,
360 .bmdma_status = ata_bmdma_status,
362 .qc_prep = ata_qc_prep,
363 .qc_issue = ata_qc_issue_prot,
365 .data_xfer = ata_data_xfer,
367 .irq_handler = ata_interrupt,
368 .irq_clear = ata_bmdma_irq_clear,
369 .irq_on = ata_irq_on,
370 .irq_ack = ata_irq_ack,
372 .port_start = ata_port_start,
375 static struct ata_port_operations amd66_port_ops = {
376 .port_disable = ata_port_disable,
377 .set_piomode = amd66_set_piomode,
378 .set_dmamode = amd66_set_dmamode,
379 .mode_filter = ata_pci_default_filter,
380 .tf_load = ata_tf_load,
381 .tf_read = ata_tf_read,
382 .check_status = ata_check_status,
383 .exec_command = ata_exec_command,
384 .dev_select = ata_std_dev_select,
386 .freeze = ata_bmdma_freeze,
387 .thaw = ata_bmdma_thaw,
388 .error_handler = amd_early_error_handler,
389 .post_internal_cmd = ata_bmdma_post_internal_cmd,
391 .bmdma_setup = ata_bmdma_setup,
392 .bmdma_start = ata_bmdma_start,
393 .bmdma_stop = ata_bmdma_stop,
394 .bmdma_status = ata_bmdma_status,
396 .qc_prep = ata_qc_prep,
397 .qc_issue = ata_qc_issue_prot,
399 .data_xfer = ata_data_xfer,
401 .irq_handler = ata_interrupt,
402 .irq_clear = ata_bmdma_irq_clear,
403 .irq_on = ata_irq_on,
404 .irq_ack = ata_irq_ack,
406 .port_start = ata_port_start,
409 static struct ata_port_operations amd100_port_ops = {
410 .port_disable = ata_port_disable,
411 .set_piomode = amd100_set_piomode,
412 .set_dmamode = amd100_set_dmamode,
413 .mode_filter = ata_pci_default_filter,
414 .tf_load = ata_tf_load,
415 .tf_read = ata_tf_read,
416 .check_status = ata_check_status,
417 .exec_command = ata_exec_command,
418 .dev_select = ata_std_dev_select,
420 .freeze = ata_bmdma_freeze,
421 .thaw = ata_bmdma_thaw,
422 .error_handler = amd_error_handler,
423 .post_internal_cmd = ata_bmdma_post_internal_cmd,
425 .bmdma_setup = ata_bmdma_setup,
426 .bmdma_start = ata_bmdma_start,
427 .bmdma_stop = ata_bmdma_stop,
428 .bmdma_status = ata_bmdma_status,
430 .qc_prep = ata_qc_prep,
431 .qc_issue = ata_qc_issue_prot,
433 .data_xfer = ata_data_xfer,
435 .irq_handler = ata_interrupt,
436 .irq_clear = ata_bmdma_irq_clear,
437 .irq_on = ata_irq_on,
438 .irq_ack = ata_irq_ack,
440 .port_start = ata_port_start,
443 static struct ata_port_operations amd133_port_ops = {
444 .port_disable = ata_port_disable,
445 .set_piomode = amd133_set_piomode,
446 .set_dmamode = amd133_set_dmamode,
447 .mode_filter = ata_pci_default_filter,
448 .tf_load = ata_tf_load,
449 .tf_read = ata_tf_read,
450 .check_status = ata_check_status,
451 .exec_command = ata_exec_command,
452 .dev_select = ata_std_dev_select,
454 .freeze = ata_bmdma_freeze,
455 .thaw = ata_bmdma_thaw,
456 .error_handler = amd_error_handler,
457 .post_internal_cmd = ata_bmdma_post_internal_cmd,
459 .bmdma_setup = ata_bmdma_setup,
460 .bmdma_start = ata_bmdma_start,
461 .bmdma_stop = ata_bmdma_stop,
462 .bmdma_status = ata_bmdma_status,
464 .qc_prep = ata_qc_prep,
465 .qc_issue = ata_qc_issue_prot,
467 .data_xfer = ata_data_xfer,
469 .irq_handler = ata_interrupt,
470 .irq_clear = ata_bmdma_irq_clear,
471 .irq_on = ata_irq_on,
472 .irq_ack = ata_irq_ack,
474 .port_start = ata_port_start,
477 static struct ata_port_operations nv100_port_ops = {
478 .port_disable = ata_port_disable,
479 .set_piomode = nv100_set_piomode,
480 .set_dmamode = nv100_set_dmamode,
481 .mode_filter = ata_pci_default_filter,
482 .tf_load = ata_tf_load,
483 .tf_read = ata_tf_read,
484 .check_status = ata_check_status,
485 .exec_command = ata_exec_command,
486 .dev_select = ata_std_dev_select,
488 .freeze = ata_bmdma_freeze,
489 .thaw = ata_bmdma_thaw,
490 .error_handler = nv_error_handler,
491 .post_internal_cmd = ata_bmdma_post_internal_cmd,
493 .bmdma_setup = ata_bmdma_setup,
494 .bmdma_start = ata_bmdma_start,
495 .bmdma_stop = ata_bmdma_stop,
496 .bmdma_status = ata_bmdma_status,
498 .qc_prep = ata_qc_prep,
499 .qc_issue = ata_qc_issue_prot,
501 .data_xfer = ata_data_xfer,
503 .irq_handler = ata_interrupt,
504 .irq_clear = ata_bmdma_irq_clear,
505 .irq_on = ata_irq_on,
506 .irq_ack = ata_irq_ack,
508 .port_start = ata_port_start,
511 static struct ata_port_operations nv133_port_ops = {
512 .port_disable = ata_port_disable,
513 .set_piomode = nv133_set_piomode,
514 .set_dmamode = nv133_set_dmamode,
515 .mode_filter = ata_pci_default_filter,
516 .tf_load = ata_tf_load,
517 .tf_read = ata_tf_read,
518 .check_status = ata_check_status,
519 .exec_command = ata_exec_command,
520 .dev_select = ata_std_dev_select,
522 .freeze = ata_bmdma_freeze,
523 .thaw = ata_bmdma_thaw,
524 .error_handler = nv_error_handler,
525 .post_internal_cmd = ata_bmdma_post_internal_cmd,
527 .bmdma_setup = ata_bmdma_setup,
528 .bmdma_start = ata_bmdma_start,
529 .bmdma_stop = ata_bmdma_stop,
530 .bmdma_status = ata_bmdma_status,
532 .qc_prep = ata_qc_prep,
533 .qc_issue = ata_qc_issue_prot,
535 .data_xfer = ata_data_xfer,
537 .irq_handler = ata_interrupt,
538 .irq_clear = ata_bmdma_irq_clear,
539 .irq_on = ata_irq_on,
540 .irq_ack = ata_irq_ack,
542 .port_start = ata_port_start,
545 static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
547 static struct ata_port_info info[10] = {
550 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
552 .mwdma_mask = 0x07, /* No SWDMA */
553 .udma_mask = 0x07, /* UDMA 33 */
554 .port_ops = &amd33_port_ops
556 { /* 1: Early AMD7409 - no swdma */
558 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
561 .udma_mask = 0x1f, /* UDMA 66 */
562 .port_ops = &amd66_port_ops
564 { /* 2: AMD 7409, no swdma errata */
566 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
569 .udma_mask = 0x1f, /* UDMA 66 */
570 .port_ops = &amd66_port_ops
574 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
577 .udma_mask = 0x3f, /* UDMA 100 */
578 .port_ops = &amd100_port_ops
582 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
585 .udma_mask = 0x3f, /* UDMA 100 */
586 .port_ops = &amd100_port_ops
590 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
593 .udma_mask = 0x7f, /* UDMA 133, no swdma */
594 .port_ops = &amd133_port_ops
596 { /* 6: AMD 8111 UDMA 100 (Serenade) */
598 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
601 .udma_mask = 0x3f, /* UDMA 100, no swdma */
602 .port_ops = &amd133_port_ops
604 { /* 7: Nvidia Nforce */
606 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
609 .udma_mask = 0x3f, /* UDMA 100 */
610 .port_ops = &nv100_port_ops
612 { /* 8: Nvidia Nforce2 and later */
614 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
617 .udma_mask = 0x7f, /* UDMA 133, no swdma */
618 .port_ops = &nv133_port_ops
620 { /* 9: AMD CS5536 (Geode companion) */
622 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
625 .udma_mask = 0x3f, /* UDMA 100 */
626 .port_ops = &amd100_port_ops
629 static struct ata_port_info *port_info[2];
630 static int printed_version;
631 int type = id->driver_data;
635 if (!printed_version++)
636 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
638 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
639 pci_read_config_byte(pdev, 0x41, &fifo);
641 /* Check for AMD7409 without swdma errata and if found adjust type */
642 if (type == 1 && rev > 0x7)
645 /* Check for AMD7411 */
648 pci_write_config_byte(pdev, 0x41, fifo & 0x0F);
650 pci_write_config_byte(pdev, 0x41, fifo | 0xF0);
653 if (type == 5 && pdev->subsystem_vendor == PCI_VENDOR_ID_AMD &&
654 pdev->subsystem_device == PCI_DEVICE_ID_AMD_SERENADE)
655 type = 6; /* UDMA 100 only */
658 ata_pci_clear_simplex(pdev);
662 port_info[0] = port_info[1] = &info[type];
663 return ata_pci_init_one(pdev, port_info, 2);
666 static int amd_reinit_one(struct pci_dev *pdev)
668 if (pdev->vendor == PCI_VENDOR_ID_AMD) {
670 pci_read_config_byte(pdev, 0x41, &fifo);
671 if (pdev->device == PCI_DEVICE_ID_AMD_VIPER_7411)
673 pci_write_config_byte(pdev, 0x41, fifo & 0x0F);
675 pci_write_config_byte(pdev, 0x41, fifo | 0xF0);
676 if (pdev->device == PCI_DEVICE_ID_AMD_VIPER_7409 ||
677 pdev->device == PCI_DEVICE_ID_AMD_COBRA_7401)
678 ata_pci_clear_simplex(pdev);
680 return ata_pci_device_resume(pdev);
683 static const struct pci_device_id amd[] = {
684 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_COBRA_7401), 0 },
685 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_VIPER_7409), 1 },
686 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_VIPER_7411), 3 },
687 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_OPUS_7441), 4 },
688 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_8111_IDE), 5 },
689 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_IDE), 7 },
690 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2_IDE), 8 },
691 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_IDE), 8 },
692 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3_IDE), 8 },
693 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_IDE), 8 },
694 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_IDE), 8 },
695 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE), 8 },
696 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE), 8 },
697 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE), 8 },
698 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE), 8 },
699 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_IDE), 8 },
700 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_IDE), 8 },
701 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_IDE), 9 },
706 static struct pci_driver amd_pci_driver = {
709 .probe = amd_init_one,
710 .remove = ata_pci_remove_one,
711 .suspend = ata_pci_device_suspend,
712 .resume = amd_reinit_one,
715 static int __init amd_init(void)
717 return pci_register_driver(&amd_pci_driver);
720 static void __exit amd_exit(void)
722 pci_unregister_driver(&amd_pci_driver);
725 MODULE_AUTHOR("Alan Cox");
726 MODULE_DESCRIPTION("low-level driver for AMD PATA IDE");
727 MODULE_LICENSE("GPL");
728 MODULE_DEVICE_TABLE(pci, amd);
729 MODULE_VERSION(DRV_VERSION);
731 module_init(amd_init);
732 module_exit(amd_exit);