2 * pata_amd.c - AMD PATA for new ATA layer
3 * (C) 2005-2006 Red Hat Inc
4 * Alan Cox <alan@redhat.com>
6 * Based on pata-sil680. Errata information is taken from data sheets
7 * and the amd74xx.c driver by Vojtech Pavlik. Nvidia SATA devices are
8 * claimed by sata-nv.c.
11 * Variable system clock when/if it makes sense
12 * Power management on ports
15 * Documentation publically available.
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/pci.h>
21 #include <linux/init.h>
22 #include <linux/blkdev.h>
23 #include <linux/delay.h>
24 #include <scsi/scsi_host.h>
25 #include <linux/libata.h>
27 #define DRV_NAME "pata_amd"
28 #define DRV_VERSION "0.2.8"
31 * timing_setup - shared timing computation and load
32 * @ap: ATA port being set up
33 * @adev: drive being configured
34 * @offset: port offset
35 * @speed: target speed
36 * @clock: clock multiplier (number of times 33MHz for this part)
38 * Perform the actual timing set up for Nvidia or AMD PATA devices.
39 * The actual devices vary so they all call into this helper function
40 * providing the clock multipler and offset (because AMD and Nvidia put
41 * the ports at different locations).
44 static void timing_setup(struct ata_port *ap, struct ata_device *adev, int offset, int speed, int clock)
46 static const unsigned char amd_cyc2udma[] = {
47 6, 6, 5, 4, 0, 1, 1, 2, 2, 3, 3, 3, 3, 3, 3, 7
50 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
51 struct ata_device *peer = ata_dev_pair(adev);
52 int dn = ap->port_no * 2 + adev->devno;
53 struct ata_timing at, apeer;
55 const int amd_clock = 33333; /* KHz. */
58 T = 1000000000 / amd_clock;
59 UT = T / min_t(int, max_t(int, clock, 1), 2);
61 if (ata_timing_compute(adev, speed, &at, T, UT) < 0) {
62 dev_printk(KERN_ERR, &pdev->dev, "unknown mode %d.\n", speed);
67 /* This may be over conservative */
69 ata_timing_compute(peer, peer->dma_mode, &apeer, T, UT);
70 ata_timing_merge(&apeer, &at, &at, ATA_TIMING_8BIT);
72 ata_timing_compute(peer, peer->pio_mode, &apeer, T, UT);
73 ata_timing_merge(&apeer, &at, &at, ATA_TIMING_8BIT);
76 if (speed == XFER_UDMA_5 && amd_clock <= 33333) at.udma = 1;
77 if (speed == XFER_UDMA_6 && amd_clock <= 33333) at.udma = 15;
80 * Now do the setup work
83 /* Configure the address set up timing */
84 pci_read_config_byte(pdev, offset + 0x0C, &t);
85 t = (t & ~(3 << ((3 - dn) << 1))) | ((FIT(at.setup, 1, 4) - 1) << ((3 - dn) << 1));
86 pci_write_config_byte(pdev, offset + 0x0C , t);
88 /* Configure the 8bit I/O timing */
89 pci_write_config_byte(pdev, offset + 0x0E + (1 - (dn >> 1)),
90 ((FIT(at.act8b, 1, 16) - 1) << 4) | (FIT(at.rec8b, 1, 16) - 1));
93 pci_write_config_byte(pdev, offset + 0x08 + (3 - dn),
94 ((FIT(at.active, 1, 16) - 1) << 4) | (FIT(at.recover, 1, 16) - 1));
98 t = at.udma ? (0xc0 | (FIT(at.udma, 2, 5) - 2)) : 0x03;
102 t = at.udma ? (0xc0 | amd_cyc2udma[FIT(at.udma, 2, 10)]) : 0x03;
106 t = at.udma ? (0xc0 | amd_cyc2udma[FIT(at.udma, 1, 10)]) : 0x03;
110 t = at.udma ? (0xc0 | amd_cyc2udma[FIT(at.udma, 1, 15)]) : 0x03;
118 pci_write_config_byte(pdev, offset + 0x10 + (3 - dn), t);
122 * amd_probe_init - cable detection
125 * Perform cable detection. The BIOS stores this in PCI config
129 static int amd_pre_reset(struct ata_port *ap)
131 static const u32 bitmask[2] = {0x03, 0x0C};
132 static const struct pci_bits amd_enable_bits[] = {
133 { 0x40, 1, 0x02, 0x02 },
134 { 0x40, 1, 0x01, 0x01 }
137 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
140 if (!pci_test_config_bits(pdev, &amd_enable_bits[ap->port_no]))
143 pci_read_config_byte(pdev, 0x42, &ata66);
144 if (ata66 & bitmask[ap->port_no])
145 ap->cbl = ATA_CBL_PATA80;
147 ap->cbl = ATA_CBL_PATA40;
148 return ata_std_prereset(ap);
152 static void amd_error_handler(struct ata_port *ap)
154 return ata_bmdma_drive_eh(ap, amd_pre_reset,
155 ata_std_softreset, NULL,
159 static int amd_early_pre_reset(struct ata_port *ap)
161 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
162 static struct pci_bits amd_enable_bits[] = {
163 { 0x40, 1, 0x02, 0x02 },
164 { 0x40, 1, 0x01, 0x01 }
167 if (!pci_test_config_bits(pdev, &amd_enable_bits[ap->port_no]))
170 /* No host side cable detection */
171 ap->cbl = ATA_CBL_PATA80;
172 return ata_std_prereset(ap);
176 static void amd_early_error_handler(struct ata_port *ap)
178 ata_bmdma_drive_eh(ap, amd_early_pre_reset,
179 ata_std_softreset, NULL,
184 * amd33_set_piomode - set initial PIO mode data
188 * Program the AMD registers for PIO mode.
191 static void amd33_set_piomode(struct ata_port *ap, struct ata_device *adev)
193 timing_setup(ap, adev, 0x40, adev->pio_mode, 1);
196 static void amd66_set_piomode(struct ata_port *ap, struct ata_device *adev)
198 timing_setup(ap, adev, 0x40, adev->pio_mode, 2);
201 static void amd100_set_piomode(struct ata_port *ap, struct ata_device *adev)
203 timing_setup(ap, adev, 0x40, adev->pio_mode, 3);
206 static void amd133_set_piomode(struct ata_port *ap, struct ata_device *adev)
208 timing_setup(ap, adev, 0x40, adev->pio_mode, 4);
212 * amd33_set_dmamode - set initial DMA mode data
216 * Program the MWDMA/UDMA modes for the AMD and Nvidia
220 static void amd33_set_dmamode(struct ata_port *ap, struct ata_device *adev)
222 timing_setup(ap, adev, 0x40, adev->dma_mode, 1);
225 static void amd66_set_dmamode(struct ata_port *ap, struct ata_device *adev)
227 timing_setup(ap, adev, 0x40, adev->dma_mode, 2);
230 static void amd100_set_dmamode(struct ata_port *ap, struct ata_device *adev)
232 timing_setup(ap, adev, 0x40, adev->dma_mode, 3);
235 static void amd133_set_dmamode(struct ata_port *ap, struct ata_device *adev)
237 timing_setup(ap, adev, 0x40, adev->dma_mode, 4);
242 * nv_probe_init - cable detection
245 * Perform cable detection. The BIOS stores this in PCI config
249 static int nv_pre_reset(struct ata_port *ap) {
250 static const u8 bitmask[2] = {0x03, 0x0C};
251 static const struct pci_bits nv_enable_bits[] = {
252 { 0x50, 1, 0x02, 0x02 },
253 { 0x50, 1, 0x01, 0x01 }
256 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
260 if (!pci_test_config_bits(pdev, &nv_enable_bits[ap->port_no]))
263 pci_read_config_byte(pdev, 0x52, &ata66);
264 if (ata66 & bitmask[ap->port_no])
265 ap->cbl = ATA_CBL_PATA80;
267 ap->cbl = ATA_CBL_PATA40;
269 /* We now have to double check because the Nvidia boxes BIOS
270 doesn't always set the cable bits but does set mode bits */
272 pci_read_config_word(pdev, 0x62 - 2 * ap->port_no, &udma);
273 if ((udma & 0xC4) == 0xC4 || (udma & 0xC400) == 0xC400)
274 ap->cbl = ATA_CBL_PATA80;
275 return ata_std_prereset(ap);
278 static void nv_error_handler(struct ata_port *ap)
280 ata_bmdma_drive_eh(ap, nv_pre_reset,
281 ata_std_softreset, NULL,
285 * nv100_set_piomode - set initial PIO mode data
289 * Program the AMD registers for PIO mode.
292 static void nv100_set_piomode(struct ata_port *ap, struct ata_device *adev)
294 timing_setup(ap, adev, 0x50, adev->pio_mode, 3);
297 static void nv133_set_piomode(struct ata_port *ap, struct ata_device *adev)
299 timing_setup(ap, adev, 0x50, adev->pio_mode, 4);
303 * nv100_set_dmamode - set initial DMA mode data
307 * Program the MWDMA/UDMA modes for the AMD and Nvidia
311 static void nv100_set_dmamode(struct ata_port *ap, struct ata_device *adev)
313 timing_setup(ap, adev, 0x50, adev->dma_mode, 3);
316 static void nv133_set_dmamode(struct ata_port *ap, struct ata_device *adev)
318 timing_setup(ap, adev, 0x50, adev->dma_mode, 4);
321 static struct scsi_host_template amd_sht = {
322 .module = THIS_MODULE,
324 .ioctl = ata_scsi_ioctl,
325 .queuecommand = ata_scsi_queuecmd,
326 .can_queue = ATA_DEF_QUEUE,
327 .this_id = ATA_SHT_THIS_ID,
328 .sg_tablesize = LIBATA_MAX_PRD,
329 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
330 .emulated = ATA_SHT_EMULATED,
331 .use_clustering = ATA_SHT_USE_CLUSTERING,
332 .proc_name = DRV_NAME,
333 .dma_boundary = ATA_DMA_BOUNDARY,
334 .slave_configure = ata_scsi_slave_config,
335 .slave_destroy = ata_scsi_slave_destroy,
336 .bios_param = ata_std_bios_param,
338 .resume = ata_scsi_device_resume,
339 .suspend = ata_scsi_device_suspend,
343 static struct ata_port_operations amd33_port_ops = {
344 .port_disable = ata_port_disable,
345 .set_piomode = amd33_set_piomode,
346 .set_dmamode = amd33_set_dmamode,
347 .mode_filter = ata_pci_default_filter,
348 .tf_load = ata_tf_load,
349 .tf_read = ata_tf_read,
350 .check_status = ata_check_status,
351 .exec_command = ata_exec_command,
352 .dev_select = ata_std_dev_select,
354 .freeze = ata_bmdma_freeze,
355 .thaw = ata_bmdma_thaw,
356 .error_handler = amd_early_error_handler,
357 .post_internal_cmd = ata_bmdma_post_internal_cmd,
359 .bmdma_setup = ata_bmdma_setup,
360 .bmdma_start = ata_bmdma_start,
361 .bmdma_stop = ata_bmdma_stop,
362 .bmdma_status = ata_bmdma_status,
364 .qc_prep = ata_qc_prep,
365 .qc_issue = ata_qc_issue_prot,
367 .data_xfer = ata_data_xfer,
369 .irq_handler = ata_interrupt,
370 .irq_clear = ata_bmdma_irq_clear,
371 .irq_on = ata_irq_on,
372 .irq_ack = ata_irq_ack,
374 .port_start = ata_port_start,
377 static struct ata_port_operations amd66_port_ops = {
378 .port_disable = ata_port_disable,
379 .set_piomode = amd66_set_piomode,
380 .set_dmamode = amd66_set_dmamode,
381 .mode_filter = ata_pci_default_filter,
382 .tf_load = ata_tf_load,
383 .tf_read = ata_tf_read,
384 .check_status = ata_check_status,
385 .exec_command = ata_exec_command,
386 .dev_select = ata_std_dev_select,
388 .freeze = ata_bmdma_freeze,
389 .thaw = ata_bmdma_thaw,
390 .error_handler = amd_early_error_handler,
391 .post_internal_cmd = ata_bmdma_post_internal_cmd,
393 .bmdma_setup = ata_bmdma_setup,
394 .bmdma_start = ata_bmdma_start,
395 .bmdma_stop = ata_bmdma_stop,
396 .bmdma_status = ata_bmdma_status,
398 .qc_prep = ata_qc_prep,
399 .qc_issue = ata_qc_issue_prot,
401 .data_xfer = ata_data_xfer,
403 .irq_handler = ata_interrupt,
404 .irq_clear = ata_bmdma_irq_clear,
405 .irq_on = ata_irq_on,
406 .irq_ack = ata_irq_ack,
408 .port_start = ata_port_start,
411 static struct ata_port_operations amd100_port_ops = {
412 .port_disable = ata_port_disable,
413 .set_piomode = amd100_set_piomode,
414 .set_dmamode = amd100_set_dmamode,
415 .mode_filter = ata_pci_default_filter,
416 .tf_load = ata_tf_load,
417 .tf_read = ata_tf_read,
418 .check_status = ata_check_status,
419 .exec_command = ata_exec_command,
420 .dev_select = ata_std_dev_select,
422 .freeze = ata_bmdma_freeze,
423 .thaw = ata_bmdma_thaw,
424 .error_handler = amd_error_handler,
425 .post_internal_cmd = ata_bmdma_post_internal_cmd,
427 .bmdma_setup = ata_bmdma_setup,
428 .bmdma_start = ata_bmdma_start,
429 .bmdma_stop = ata_bmdma_stop,
430 .bmdma_status = ata_bmdma_status,
432 .qc_prep = ata_qc_prep,
433 .qc_issue = ata_qc_issue_prot,
435 .data_xfer = ata_data_xfer,
437 .irq_handler = ata_interrupt,
438 .irq_clear = ata_bmdma_irq_clear,
439 .irq_on = ata_irq_on,
440 .irq_ack = ata_irq_ack,
442 .port_start = ata_port_start,
445 static struct ata_port_operations amd133_port_ops = {
446 .port_disable = ata_port_disable,
447 .set_piomode = amd133_set_piomode,
448 .set_dmamode = amd133_set_dmamode,
449 .mode_filter = ata_pci_default_filter,
450 .tf_load = ata_tf_load,
451 .tf_read = ata_tf_read,
452 .check_status = ata_check_status,
453 .exec_command = ata_exec_command,
454 .dev_select = ata_std_dev_select,
456 .freeze = ata_bmdma_freeze,
457 .thaw = ata_bmdma_thaw,
458 .error_handler = amd_error_handler,
459 .post_internal_cmd = ata_bmdma_post_internal_cmd,
461 .bmdma_setup = ata_bmdma_setup,
462 .bmdma_start = ata_bmdma_start,
463 .bmdma_stop = ata_bmdma_stop,
464 .bmdma_status = ata_bmdma_status,
466 .qc_prep = ata_qc_prep,
467 .qc_issue = ata_qc_issue_prot,
469 .data_xfer = ata_data_xfer,
471 .irq_handler = ata_interrupt,
472 .irq_clear = ata_bmdma_irq_clear,
473 .irq_on = ata_irq_on,
474 .irq_ack = ata_irq_ack,
476 .port_start = ata_port_start,
479 static struct ata_port_operations nv100_port_ops = {
480 .port_disable = ata_port_disable,
481 .set_piomode = nv100_set_piomode,
482 .set_dmamode = nv100_set_dmamode,
483 .mode_filter = ata_pci_default_filter,
484 .tf_load = ata_tf_load,
485 .tf_read = ata_tf_read,
486 .check_status = ata_check_status,
487 .exec_command = ata_exec_command,
488 .dev_select = ata_std_dev_select,
490 .freeze = ata_bmdma_freeze,
491 .thaw = ata_bmdma_thaw,
492 .error_handler = nv_error_handler,
493 .post_internal_cmd = ata_bmdma_post_internal_cmd,
495 .bmdma_setup = ata_bmdma_setup,
496 .bmdma_start = ata_bmdma_start,
497 .bmdma_stop = ata_bmdma_stop,
498 .bmdma_status = ata_bmdma_status,
500 .qc_prep = ata_qc_prep,
501 .qc_issue = ata_qc_issue_prot,
503 .data_xfer = ata_data_xfer,
505 .irq_handler = ata_interrupt,
506 .irq_clear = ata_bmdma_irq_clear,
507 .irq_on = ata_irq_on,
508 .irq_ack = ata_irq_ack,
510 .port_start = ata_port_start,
513 static struct ata_port_operations nv133_port_ops = {
514 .port_disable = ata_port_disable,
515 .set_piomode = nv133_set_piomode,
516 .set_dmamode = nv133_set_dmamode,
517 .mode_filter = ata_pci_default_filter,
518 .tf_load = ata_tf_load,
519 .tf_read = ata_tf_read,
520 .check_status = ata_check_status,
521 .exec_command = ata_exec_command,
522 .dev_select = ata_std_dev_select,
524 .freeze = ata_bmdma_freeze,
525 .thaw = ata_bmdma_thaw,
526 .error_handler = nv_error_handler,
527 .post_internal_cmd = ata_bmdma_post_internal_cmd,
529 .bmdma_setup = ata_bmdma_setup,
530 .bmdma_start = ata_bmdma_start,
531 .bmdma_stop = ata_bmdma_stop,
532 .bmdma_status = ata_bmdma_status,
534 .qc_prep = ata_qc_prep,
535 .qc_issue = ata_qc_issue_prot,
537 .data_xfer = ata_data_xfer,
539 .irq_handler = ata_interrupt,
540 .irq_clear = ata_bmdma_irq_clear,
541 .irq_on = ata_irq_on,
542 .irq_ack = ata_irq_ack,
544 .port_start = ata_port_start,
547 static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
549 static struct ata_port_info info[10] = {
552 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
554 .mwdma_mask = 0x07, /* No SWDMA */
555 .udma_mask = 0x07, /* UDMA 33 */
556 .port_ops = &amd33_port_ops
558 { /* 1: Early AMD7409 - no swdma */
560 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
563 .udma_mask = 0x1f, /* UDMA 66 */
564 .port_ops = &amd66_port_ops
566 { /* 2: AMD 7409, no swdma errata */
568 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
571 .udma_mask = 0x1f, /* UDMA 66 */
572 .port_ops = &amd66_port_ops
576 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
579 .udma_mask = 0x3f, /* UDMA 100 */
580 .port_ops = &amd100_port_ops
584 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
587 .udma_mask = 0x3f, /* UDMA 100 */
588 .port_ops = &amd100_port_ops
592 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
595 .udma_mask = 0x7f, /* UDMA 133, no swdma */
596 .port_ops = &amd133_port_ops
598 { /* 6: AMD 8111 UDMA 100 (Serenade) */
600 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
603 .udma_mask = 0x3f, /* UDMA 100, no swdma */
604 .port_ops = &amd133_port_ops
606 { /* 7: Nvidia Nforce */
608 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
611 .udma_mask = 0x3f, /* UDMA 100 */
612 .port_ops = &nv100_port_ops
614 { /* 8: Nvidia Nforce2 and later */
616 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
619 .udma_mask = 0x7f, /* UDMA 133, no swdma */
620 .port_ops = &nv133_port_ops
622 { /* 9: AMD CS5536 (Geode companion) */
624 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
627 .udma_mask = 0x3f, /* UDMA 100 */
628 .port_ops = &amd100_port_ops
631 static struct ata_port_info *port_info[2];
632 static int printed_version;
633 int type = id->driver_data;
637 if (!printed_version++)
638 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
640 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
641 pci_read_config_byte(pdev, 0x41, &fifo);
643 /* Check for AMD7409 without swdma errata and if found adjust type */
644 if (type == 1 && rev > 0x7)
647 /* Check for AMD7411 */
650 pci_write_config_byte(pdev, 0x41, fifo & 0x0F);
652 pci_write_config_byte(pdev, 0x41, fifo | 0xF0);
655 if (type == 5 && pdev->subsystem_vendor == PCI_VENDOR_ID_AMD &&
656 pdev->subsystem_device == PCI_DEVICE_ID_AMD_SERENADE)
657 type = 6; /* UDMA 100 only */
660 ata_pci_clear_simplex(pdev);
664 port_info[0] = port_info[1] = &info[type];
665 return ata_pci_init_one(pdev, port_info, 2);
669 static int amd_reinit_one(struct pci_dev *pdev)
671 if (pdev->vendor == PCI_VENDOR_ID_AMD) {
673 pci_read_config_byte(pdev, 0x41, &fifo);
674 if (pdev->device == PCI_DEVICE_ID_AMD_VIPER_7411)
676 pci_write_config_byte(pdev, 0x41, fifo & 0x0F);
678 pci_write_config_byte(pdev, 0x41, fifo | 0xF0);
679 if (pdev->device == PCI_DEVICE_ID_AMD_VIPER_7409 ||
680 pdev->device == PCI_DEVICE_ID_AMD_COBRA_7401)
681 ata_pci_clear_simplex(pdev);
683 return ata_pci_device_resume(pdev);
687 static const struct pci_device_id amd[] = {
688 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_COBRA_7401), 0 },
689 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_VIPER_7409), 1 },
690 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_VIPER_7411), 3 },
691 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_OPUS_7441), 4 },
692 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_8111_IDE), 5 },
693 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_IDE), 7 },
694 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2_IDE), 8 },
695 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_IDE), 8 },
696 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3_IDE), 8 },
697 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_IDE), 8 },
698 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_IDE), 8 },
699 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE), 8 },
700 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE), 8 },
701 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE), 8 },
702 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE), 8 },
703 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_IDE), 8 },
704 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_IDE), 8 },
705 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_IDE), 9 },
710 static struct pci_driver amd_pci_driver = {
713 .probe = amd_init_one,
714 .remove = ata_pci_remove_one,
716 .suspend = ata_pci_device_suspend,
717 .resume = amd_reinit_one,
721 static int __init amd_init(void)
723 return pci_register_driver(&amd_pci_driver);
726 static void __exit amd_exit(void)
728 pci_unregister_driver(&amd_pci_driver);
731 MODULE_AUTHOR("Alan Cox");
732 MODULE_DESCRIPTION("low-level driver for AMD PATA IDE");
733 MODULE_LICENSE("GPL");
734 MODULE_DEVICE_TABLE(pci, amd);
735 MODULE_VERSION(DRV_VERSION);
737 module_init(amd_init);
738 module_exit(amd_exit);