2 libata-core.c - helper library for ATA
4 Copyright 2003-2004 Red Hat, Inc. All rights reserved.
5 Copyright 2003-2004 Jeff Garzik
7 The contents of this file are subject to the Open
8 Software License version 1.1 that can be found at
9 http://www.opensource.org/licenses/osl-1.1.txt and is included herein
12 Alternatively, the contents of this file may be used under the terms
13 of the GNU General Public License version 2 (the "GPL") as distributed
14 in the kernel source COPYING file, in which case the provisions of
15 the GPL are applicable instead of the above. If you wish to allow
16 the use of your version of this file only under the terms of the
17 GPL and not to allow others to use your version of this file under
18 the OSL, indicate your decision by deleting the provisions above and
19 replace them with the notice and other provisions required by the GPL.
20 If you do not delete the provisions above, a recipient may use your
21 version of this file under either the OSL or the GPL.
25 #include <linux/config.h>
26 #include <linux/kernel.h>
27 #include <linux/module.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/list.h>
32 #include <linux/highmem.h>
33 #include <linux/spinlock.h>
34 #include <linux/blkdev.h>
35 #include <linux/delay.h>
36 #include <linux/timer.h>
37 #include <linux/interrupt.h>
38 #include <linux/completion.h>
39 #include <linux/suspend.h>
40 #include <linux/workqueue.h>
41 #include <scsi/scsi.h>
43 #include "scsi_priv.h"
44 #include <scsi/scsi_host.h>
45 #include <linux/libata.h>
47 #include <asm/semaphore.h>
48 #include <asm/byteorder.h>
52 static unsigned int ata_busy_sleep (struct ata_port *ap,
53 unsigned long tmout_pat,
55 static void ata_set_mode(struct ata_port *ap);
56 static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev);
57 static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift);
58 static int fgb(u32 bitmap);
59 static int ata_choose_xfer_mode(struct ata_port *ap,
61 unsigned int *xfer_shift_out);
62 static int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat);
63 static void __ata_qc_complete(struct ata_queued_cmd *qc);
65 static unsigned int ata_unique_id = 1;
66 static struct workqueue_struct *ata_wq;
68 MODULE_AUTHOR("Jeff Garzik");
69 MODULE_DESCRIPTION("Library module for ATA devices");
70 MODULE_LICENSE("GPL");
71 MODULE_VERSION(DRV_VERSION);
74 * ata_tf_load - send taskfile registers to host controller
75 * @ap: Port to which output is sent
76 * @tf: ATA taskfile register set
78 * Outputs ATA taskfile to standard ATA host controller.
81 * Inherited from caller.
84 static void ata_tf_load_pio(struct ata_port *ap, struct ata_taskfile *tf)
86 struct ata_ioports *ioaddr = &ap->ioaddr;
87 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
89 if (tf->ctl != ap->last_ctl) {
90 outb(tf->ctl, ioaddr->ctl_addr);
91 ap->last_ctl = tf->ctl;
95 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
96 outb(tf->hob_feature, ioaddr->feature_addr);
97 outb(tf->hob_nsect, ioaddr->nsect_addr);
98 outb(tf->hob_lbal, ioaddr->lbal_addr);
99 outb(tf->hob_lbam, ioaddr->lbam_addr);
100 outb(tf->hob_lbah, ioaddr->lbah_addr);
101 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
110 outb(tf->feature, ioaddr->feature_addr);
111 outb(tf->nsect, ioaddr->nsect_addr);
112 outb(tf->lbal, ioaddr->lbal_addr);
113 outb(tf->lbam, ioaddr->lbam_addr);
114 outb(tf->lbah, ioaddr->lbah_addr);
115 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
123 if (tf->flags & ATA_TFLAG_DEVICE) {
124 outb(tf->device, ioaddr->device_addr);
125 VPRINTK("device 0x%X\n", tf->device);
132 * ata_tf_load_mmio - send taskfile registers to host controller
133 * @ap: Port to which output is sent
134 * @tf: ATA taskfile register set
136 * Outputs ATA taskfile to standard ATA host controller using MMIO.
139 * Inherited from caller.
142 static void ata_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf)
144 struct ata_ioports *ioaddr = &ap->ioaddr;
145 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
147 if (tf->ctl != ap->last_ctl) {
148 writeb(tf->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
149 ap->last_ctl = tf->ctl;
153 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
154 writeb(tf->hob_feature, (void __iomem *) ioaddr->feature_addr);
155 writeb(tf->hob_nsect, (void __iomem *) ioaddr->nsect_addr);
156 writeb(tf->hob_lbal, (void __iomem *) ioaddr->lbal_addr);
157 writeb(tf->hob_lbam, (void __iomem *) ioaddr->lbam_addr);
158 writeb(tf->hob_lbah, (void __iomem *) ioaddr->lbah_addr);
159 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
168 writeb(tf->feature, (void __iomem *) ioaddr->feature_addr);
169 writeb(tf->nsect, (void __iomem *) ioaddr->nsect_addr);
170 writeb(tf->lbal, (void __iomem *) ioaddr->lbal_addr);
171 writeb(tf->lbam, (void __iomem *) ioaddr->lbam_addr);
172 writeb(tf->lbah, (void __iomem *) ioaddr->lbah_addr);
173 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
181 if (tf->flags & ATA_TFLAG_DEVICE) {
182 writeb(tf->device, (void __iomem *) ioaddr->device_addr);
183 VPRINTK("device 0x%X\n", tf->device);
191 * ata_tf_load - send taskfile registers to host controller
192 * @ap: Port to which output is sent
193 * @tf: ATA taskfile register set
195 * Outputs ATA taskfile to standard ATA host controller using MMIO
196 * or PIO as indicated by the ATA_FLAG_MMIO flag.
197 * Writes the control, feature, nsect, lbal, lbam, and lbah registers.
198 * Optionally (ATA_TFLAG_LBA48) writes hob_feature, hob_nsect,
199 * hob_lbal, hob_lbam, and hob_lbah.
201 * This function waits for idle (!BUSY and !DRQ) after writing
202 * registers. If the control register has a new value, this
203 * function also waits for idle after writing control and before
204 * writing the remaining registers.
206 * May be used as the tf_load() entry in ata_port_operations.
209 * Inherited from caller.
211 void ata_tf_load(struct ata_port *ap, struct ata_taskfile *tf)
213 if (ap->flags & ATA_FLAG_MMIO)
214 ata_tf_load_mmio(ap, tf);
216 ata_tf_load_pio(ap, tf);
220 * ata_exec_command_pio - issue ATA command to host controller
221 * @ap: port to which command is being issued
222 * @tf: ATA taskfile register set
224 * Issues PIO write to ATA command register, with proper
225 * synchronization with interrupt handler / other threads.
228 * spin_lock_irqsave(host_set lock)
231 static void ata_exec_command_pio(struct ata_port *ap, struct ata_taskfile *tf)
233 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
235 outb(tf->command, ap->ioaddr.command_addr);
241 * ata_exec_command_mmio - issue ATA command to host controller
242 * @ap: port to which command is being issued
243 * @tf: ATA taskfile register set
245 * Issues MMIO write to ATA command register, with proper
246 * synchronization with interrupt handler / other threads.
249 * spin_lock_irqsave(host_set lock)
252 static void ata_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf)
254 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
256 writeb(tf->command, (void __iomem *) ap->ioaddr.command_addr);
262 * ata_exec_command - issue ATA command to host controller
263 * @ap: port to which command is being issued
264 * @tf: ATA taskfile register set
266 * Issues PIO/MMIO write to ATA command register, with proper
267 * synchronization with interrupt handler / other threads.
270 * spin_lock_irqsave(host_set lock)
272 void ata_exec_command(struct ata_port *ap, struct ata_taskfile *tf)
274 if (ap->flags & ATA_FLAG_MMIO)
275 ata_exec_command_mmio(ap, tf);
277 ata_exec_command_pio(ap, tf);
281 * ata_exec - issue ATA command to host controller
282 * @ap: port to which command is being issued
283 * @tf: ATA taskfile register set
285 * Issues PIO/MMIO write to ATA command register, with proper
286 * synchronization with interrupt handler / other threads.
289 * Obtains host_set lock.
292 static inline void ata_exec(struct ata_port *ap, struct ata_taskfile *tf)
296 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
297 spin_lock_irqsave(&ap->host_set->lock, flags);
298 ap->ops->exec_command(ap, tf);
299 spin_unlock_irqrestore(&ap->host_set->lock, flags);
303 * ata_tf_to_host - issue ATA taskfile to host controller
304 * @ap: port to which command is being issued
305 * @tf: ATA taskfile register set
307 * Issues ATA taskfile register set to ATA host controller,
308 * with proper synchronization with interrupt handler and
312 * Obtains host_set lock.
315 static void ata_tf_to_host(struct ata_port *ap, struct ata_taskfile *tf)
317 ap->ops->tf_load(ap, tf);
323 * ata_tf_to_host_nolock - issue ATA taskfile to host controller
324 * @ap: port to which command is being issued
325 * @tf: ATA taskfile register set
327 * Issues ATA taskfile register set to ATA host controller,
328 * with proper synchronization with interrupt handler and
332 * spin_lock_irqsave(host_set lock)
335 void ata_tf_to_host_nolock(struct ata_port *ap, struct ata_taskfile *tf)
337 ap->ops->tf_load(ap, tf);
338 ap->ops->exec_command(ap, tf);
342 * ata_tf_read_pio - input device's ATA taskfile shadow registers
343 * @ap: Port from which input is read
344 * @tf: ATA taskfile register set for storing input
346 * Reads ATA taskfile registers for currently-selected device
350 * Inherited from caller.
353 static void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf)
355 struct ata_ioports *ioaddr = &ap->ioaddr;
357 tf->nsect = inb(ioaddr->nsect_addr);
358 tf->lbal = inb(ioaddr->lbal_addr);
359 tf->lbam = inb(ioaddr->lbam_addr);
360 tf->lbah = inb(ioaddr->lbah_addr);
361 tf->device = inb(ioaddr->device_addr);
363 if (tf->flags & ATA_TFLAG_LBA48) {
364 outb(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
365 tf->hob_feature = inb(ioaddr->error_addr);
366 tf->hob_nsect = inb(ioaddr->nsect_addr);
367 tf->hob_lbal = inb(ioaddr->lbal_addr);
368 tf->hob_lbam = inb(ioaddr->lbam_addr);
369 tf->hob_lbah = inb(ioaddr->lbah_addr);
374 * ata_tf_read_mmio - input device's ATA taskfile shadow registers
375 * @ap: Port from which input is read
376 * @tf: ATA taskfile register set for storing input
378 * Reads ATA taskfile registers for currently-selected device
382 * Inherited from caller.
385 static void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf)
387 struct ata_ioports *ioaddr = &ap->ioaddr;
389 tf->nsect = readb((void __iomem *)ioaddr->nsect_addr);
390 tf->lbal = readb((void __iomem *)ioaddr->lbal_addr);
391 tf->lbam = readb((void __iomem *)ioaddr->lbam_addr);
392 tf->lbah = readb((void __iomem *)ioaddr->lbah_addr);
393 tf->device = readb((void __iomem *)ioaddr->device_addr);
395 if (tf->flags & ATA_TFLAG_LBA48) {
396 writeb(tf->ctl | ATA_HOB, (void __iomem *) ap->ioaddr.ctl_addr);
397 tf->hob_feature = readb((void __iomem *)ioaddr->error_addr);
398 tf->hob_nsect = readb((void __iomem *)ioaddr->nsect_addr);
399 tf->hob_lbal = readb((void __iomem *)ioaddr->lbal_addr);
400 tf->hob_lbam = readb((void __iomem *)ioaddr->lbam_addr);
401 tf->hob_lbah = readb((void __iomem *)ioaddr->lbah_addr);
407 * ata_tf_read - input device's ATA taskfile shadow registers
408 * @ap: Port from which input is read
409 * @tf: ATA taskfile register set for storing input
411 * Reads ATA taskfile registers for currently-selected device
414 * Reads nsect, lbal, lbam, lbah, and device. If ATA_TFLAG_LBA48
415 * is set, also reads the hob registers.
417 * May be used as the tf_read() entry in ata_port_operations.
420 * Inherited from caller.
422 void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
424 if (ap->flags & ATA_FLAG_MMIO)
425 ata_tf_read_mmio(ap, tf);
427 ata_tf_read_pio(ap, tf);
431 * ata_check_status_pio - Read device status reg & clear interrupt
432 * @ap: port where the device is
434 * Reads ATA taskfile status register for currently-selected device
435 * and return its value. This also clears pending interrupts
439 * Inherited from caller.
441 static u8 ata_check_status_pio(struct ata_port *ap)
443 return inb(ap->ioaddr.status_addr);
447 * ata_check_status_mmio - Read device status reg & clear interrupt
448 * @ap: port where the device is
450 * Reads ATA taskfile status register for currently-selected device
451 * via MMIO and return its value. This also clears pending interrupts
455 * Inherited from caller.
457 static u8 ata_check_status_mmio(struct ata_port *ap)
459 return readb((void __iomem *) ap->ioaddr.status_addr);
464 * ata_check_status - Read device status reg & clear interrupt
465 * @ap: port where the device is
467 * Reads ATA taskfile status register for currently-selected device
468 * and return its value. This also clears pending interrupts
471 * May be used as the check_status() entry in ata_port_operations.
474 * Inherited from caller.
476 u8 ata_check_status(struct ata_port *ap)
478 if (ap->flags & ATA_FLAG_MMIO)
479 return ata_check_status_mmio(ap);
480 return ata_check_status_pio(ap);
485 * ata_altstatus - Read device alternate status reg
486 * @ap: port where the device is
488 * Reads ATA taskfile alternate status register for
489 * currently-selected device and return its value.
491 * Note: may NOT be used as the check_altstatus() entry in
492 * ata_port_operations.
495 * Inherited from caller.
497 u8 ata_altstatus(struct ata_port *ap)
499 if (ap->ops->check_altstatus)
500 return ap->ops->check_altstatus(ap);
502 if (ap->flags & ATA_FLAG_MMIO)
503 return readb((void __iomem *)ap->ioaddr.altstatus_addr);
504 return inb(ap->ioaddr.altstatus_addr);
509 * ata_chk_err - Read device error reg
510 * @ap: port where the device is
512 * Reads ATA taskfile error register for
513 * currently-selected device and return its value.
515 * Note: may NOT be used as the check_err() entry in
516 * ata_port_operations.
519 * Inherited from caller.
521 u8 ata_chk_err(struct ata_port *ap)
523 if (ap->ops->check_err)
524 return ap->ops->check_err(ap);
526 if (ap->flags & ATA_FLAG_MMIO) {
527 return readb((void __iomem *) ap->ioaddr.error_addr);
529 return inb(ap->ioaddr.error_addr);
533 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
534 * @tf: Taskfile to convert
535 * @fis: Buffer into which data will output
536 * @pmp: Port multiplier port
538 * Converts a standard ATA taskfile to a Serial ATA
539 * FIS structure (Register - Host to Device).
542 * Inherited from caller.
545 void ata_tf_to_fis(struct ata_taskfile *tf, u8 *fis, u8 pmp)
547 fis[0] = 0x27; /* Register - Host to Device FIS */
548 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
549 bit 7 indicates Command FIS */
550 fis[2] = tf->command;
551 fis[3] = tf->feature;
558 fis[8] = tf->hob_lbal;
559 fis[9] = tf->hob_lbam;
560 fis[10] = tf->hob_lbah;
561 fis[11] = tf->hob_feature;
564 fis[13] = tf->hob_nsect;
575 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
576 * @fis: Buffer from which data will be input
577 * @tf: Taskfile to output
579 * Converts a standard ATA taskfile to a Serial ATA
580 * FIS structure (Register - Host to Device).
583 * Inherited from caller.
586 void ata_tf_from_fis(u8 *fis, struct ata_taskfile *tf)
588 tf->command = fis[2]; /* status */
589 tf->feature = fis[3]; /* error */
596 tf->hob_lbal = fis[8];
597 tf->hob_lbam = fis[9];
598 tf->hob_lbah = fis[10];
601 tf->hob_nsect = fis[13];
605 * ata_prot_to_cmd - determine which read/write opcodes to use
606 * @protocol: ATA_PROT_xxx taskfile protocol
607 * @lba48: true is lba48 is present
609 * Given necessary input, determine which read/write commands
610 * to use to transfer data.
615 static int ata_prot_to_cmd(int protocol, int lba48)
617 int rcmd = 0, wcmd = 0;
622 rcmd = ATA_CMD_PIO_READ_EXT;
623 wcmd = ATA_CMD_PIO_WRITE_EXT;
625 rcmd = ATA_CMD_PIO_READ;
626 wcmd = ATA_CMD_PIO_WRITE;
632 rcmd = ATA_CMD_READ_EXT;
633 wcmd = ATA_CMD_WRITE_EXT;
636 wcmd = ATA_CMD_WRITE;
644 return rcmd | (wcmd << 8);
648 * ata_dev_set_protocol - set taskfile protocol and r/w commands
649 * @dev: device to examine and configure
651 * Examine the device configuration, after we have
652 * read the identify-device page and configured the
653 * data transfer mode. Set internal state related to
654 * the ATA taskfile protocol (pio, pio mult, dma, etc.)
655 * and calculate the proper read/write commands to use.
660 static void ata_dev_set_protocol(struct ata_device *dev)
662 int pio = (dev->flags & ATA_DFLAG_PIO);
663 int lba48 = (dev->flags & ATA_DFLAG_LBA48);
667 proto = dev->xfer_protocol = ATA_PROT_PIO;
669 proto = dev->xfer_protocol = ATA_PROT_DMA;
671 cmd = ata_prot_to_cmd(proto, lba48);
675 dev->read_cmd = cmd & 0xff;
676 dev->write_cmd = (cmd >> 8) & 0xff;
679 static const char * xfer_mode_str[] = {
699 * ata_udma_string - convert UDMA bit offset to string
700 * @mask: mask of bits supported; only highest bit counts.
702 * Determine string which represents the highest speed
703 * (highest bit in @udma_mask).
709 * Constant C string representing highest speed listed in
710 * @udma_mask, or the constant C string "<n/a>".
713 static const char *ata_mode_string(unsigned int mask)
717 for (i = 7; i >= 0; i--)
720 for (i = ATA_SHIFT_MWDMA + 2; i >= ATA_SHIFT_MWDMA; i--)
723 for (i = ATA_SHIFT_PIO + 4; i >= ATA_SHIFT_PIO; i--)
730 return xfer_mode_str[i];
734 * ata_pio_devchk - PATA device presence detection
735 * @ap: ATA channel to examine
736 * @device: Device to examine (starting at zero)
738 * This technique was originally described in
739 * Hale Landis's ATADRVR (www.ata-atapi.com), and
740 * later found its way into the ATA/ATAPI spec.
742 * Write a pattern to the ATA shadow registers,
743 * and if a device is present, it will respond by
744 * correctly storing and echoing back the
745 * ATA shadow register contents.
751 static unsigned int ata_pio_devchk(struct ata_port *ap,
754 struct ata_ioports *ioaddr = &ap->ioaddr;
757 ap->ops->dev_select(ap, device);
759 outb(0x55, ioaddr->nsect_addr);
760 outb(0xaa, ioaddr->lbal_addr);
762 outb(0xaa, ioaddr->nsect_addr);
763 outb(0x55, ioaddr->lbal_addr);
765 outb(0x55, ioaddr->nsect_addr);
766 outb(0xaa, ioaddr->lbal_addr);
768 nsect = inb(ioaddr->nsect_addr);
769 lbal = inb(ioaddr->lbal_addr);
771 if ((nsect == 0x55) && (lbal == 0xaa))
772 return 1; /* we found a device */
774 return 0; /* nothing found */
778 * ata_mmio_devchk - PATA device presence detection
779 * @ap: ATA channel to examine
780 * @device: Device to examine (starting at zero)
782 * This technique was originally described in
783 * Hale Landis's ATADRVR (www.ata-atapi.com), and
784 * later found its way into the ATA/ATAPI spec.
786 * Write a pattern to the ATA shadow registers,
787 * and if a device is present, it will respond by
788 * correctly storing and echoing back the
789 * ATA shadow register contents.
795 static unsigned int ata_mmio_devchk(struct ata_port *ap,
798 struct ata_ioports *ioaddr = &ap->ioaddr;
801 ap->ops->dev_select(ap, device);
803 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
804 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
806 writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
807 writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
809 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
810 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
812 nsect = readb((void __iomem *) ioaddr->nsect_addr);
813 lbal = readb((void __iomem *) ioaddr->lbal_addr);
815 if ((nsect == 0x55) && (lbal == 0xaa))
816 return 1; /* we found a device */
818 return 0; /* nothing found */
822 * ata_devchk - PATA device presence detection
823 * @ap: ATA channel to examine
824 * @device: Device to examine (starting at zero)
826 * Dispatch ATA device presence detection, depending
827 * on whether we are using PIO or MMIO to talk to the
828 * ATA shadow registers.
834 static unsigned int ata_devchk(struct ata_port *ap,
837 if (ap->flags & ATA_FLAG_MMIO)
838 return ata_mmio_devchk(ap, device);
839 return ata_pio_devchk(ap, device);
843 * ata_dev_classify - determine device type based on ATA-spec signature
844 * @tf: ATA taskfile register set for device to be identified
846 * Determine from taskfile register contents whether a device is
847 * ATA or ATAPI, as per "Signature and persistence" section
848 * of ATA/PI spec (volume 1, sect 5.14).
854 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
855 * the event of failure.
858 unsigned int ata_dev_classify(struct ata_taskfile *tf)
860 /* Apple's open source Darwin code hints that some devices only
861 * put a proper signature into the LBA mid/high registers,
862 * So, we only check those. It's sufficient for uniqueness.
865 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
866 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
867 DPRINTK("found ATA device by sig\n");
871 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
872 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
873 DPRINTK("found ATAPI device by sig\n");
874 return ATA_DEV_ATAPI;
877 DPRINTK("unknown device\n");
878 return ATA_DEV_UNKNOWN;
882 * ata_dev_try_classify - Parse returned ATA device signature
883 * @ap: ATA channel to examine
884 * @device: Device to examine (starting at zero)
886 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
887 * an ATA/ATAPI-defined set of values is placed in the ATA
888 * shadow registers, indicating the results of device detection
891 * Select the ATA device, and read the values from the ATA shadow
892 * registers. Then parse according to the Error register value,
893 * and the spec-defined values examined by ata_dev_classify().
899 static u8 ata_dev_try_classify(struct ata_port *ap, unsigned int device)
901 struct ata_device *dev = &ap->device[device];
902 struct ata_taskfile tf;
906 ap->ops->dev_select(ap, device);
908 memset(&tf, 0, sizeof(tf));
910 err = ata_chk_err(ap);
911 ap->ops->tf_read(ap, &tf);
913 dev->class = ATA_DEV_NONE;
915 /* see if device passed diags */
918 else if ((device == 0) && (err == 0x81))
923 /* determine if device if ATA or ATAPI */
924 class = ata_dev_classify(&tf);
925 if (class == ATA_DEV_UNKNOWN)
927 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
936 * ata_dev_id_string - Convert IDENTIFY DEVICE page into string
937 * @id: IDENTIFY DEVICE results we will examine
938 * @s: string into which data is output
939 * @ofs: offset into identify device page
940 * @len: length of string to return. must be an even number.
942 * The strings in the IDENTIFY DEVICE page are broken up into
943 * 16-bit chunks. Run through the string, and output each
944 * 8-bit chunk linearly, regardless of platform.
950 void ata_dev_id_string(u16 *id, unsigned char *s,
951 unsigned int ofs, unsigned int len)
971 * ata_noop_dev_select - Select device 0/1 on ATA bus
972 * @ap: ATA channel to manipulate
973 * @device: ATA device (numbered from zero) to select
975 * This function performs no actual function.
977 * May be used as the dev_select() entry in ata_port_operations.
982 void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
988 * ata_std_dev_select - Select device 0/1 on ATA bus
989 * @ap: ATA channel to manipulate
990 * @device: ATA device (numbered from zero) to select
992 * Use the method defined in the ATA specification to
993 * make either device 0, or device 1, active on the
994 * ATA channel. Works with both PIO and MMIO.
996 * May be used as the dev_select() entry in ata_port_operations.
1002 void ata_std_dev_select (struct ata_port *ap, unsigned int device)
1007 tmp = ATA_DEVICE_OBS;
1009 tmp = ATA_DEVICE_OBS | ATA_DEV1;
1011 if (ap->flags & ATA_FLAG_MMIO) {
1012 writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
1014 outb(tmp, ap->ioaddr.device_addr);
1016 ata_pause(ap); /* needed; also flushes, for mmio */
1020 * ata_dev_select - Select device 0/1 on ATA bus
1021 * @ap: ATA channel to manipulate
1022 * @device: ATA device (numbered from zero) to select
1023 * @wait: non-zero to wait for Status register BSY bit to clear
1024 * @can_sleep: non-zero if context allows sleeping
1026 * Use the method defined in the ATA specification to
1027 * make either device 0, or device 1, active on the
1030 * This is a high-level version of ata_std_dev_select(),
1031 * which additionally provides the services of inserting
1032 * the proper pauses and status polling, where needed.
1038 void ata_dev_select(struct ata_port *ap, unsigned int device,
1039 unsigned int wait, unsigned int can_sleep)
1041 VPRINTK("ENTER, ata%u: device %u, wait %u\n",
1042 ap->id, device, wait);
1047 ap->ops->dev_select(ap, device);
1050 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
1057 * ata_dump_id - IDENTIFY DEVICE info debugging output
1058 * @dev: Device whose IDENTIFY DEVICE page we will dump
1060 * Dump selected 16-bit words from a detected device's
1061 * IDENTIFY PAGE page.
1067 static inline void ata_dump_id(struct ata_device *dev)
1069 DPRINTK("49==0x%04x "
1079 DPRINTK("80==0x%04x "
1089 DPRINTK("88==0x%04x "
1096 * ata_dev_identify - obtain IDENTIFY x DEVICE page
1097 * @ap: port on which device we wish to probe resides
1098 * @device: device bus address, starting at zero
1100 * Following bus reset, we issue the IDENTIFY [PACKET] DEVICE
1101 * command, and read back the 512-byte device information page.
1102 * The device information page is fed to us via the standard
1103 * PIO-IN protocol, but we hand-code it here. (TODO: investigate
1104 * using standard PIO-IN paths)
1106 * After reading the device information page, we use several
1107 * bits of information from it to initialize data structures
1108 * that will be used during the lifetime of the ata_device.
1109 * Other data from the info page is used to disqualify certain
1110 * older ATA devices we do not wish to support.
1113 * Inherited from caller. Some functions called by this function
1114 * obtain the host_set lock.
1117 static void ata_dev_identify(struct ata_port *ap, unsigned int device)
1119 struct ata_device *dev = &ap->device[device];
1122 unsigned long xfer_modes;
1124 unsigned int using_edd;
1125 DECLARE_COMPLETION(wait);
1126 struct ata_queued_cmd *qc;
1127 unsigned long flags;
1130 if (!ata_dev_present(dev)) {
1131 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
1136 if (ap->flags & (ATA_FLAG_SRST | ATA_FLAG_SATA_RESET))
1141 DPRINTK("ENTER, host %u, dev %u\n", ap->id, device);
1143 assert (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ATAPI ||
1144 dev->class == ATA_DEV_NONE);
1146 ata_dev_select(ap, device, 1, 1); /* select device 0/1 */
1148 qc = ata_qc_new_init(ap, dev);
1151 ata_sg_init_one(qc, dev->id, sizeof(dev->id));
1152 qc->dma_dir = DMA_FROM_DEVICE;
1153 qc->tf.protocol = ATA_PROT_PIO;
1157 if (dev->class == ATA_DEV_ATA) {
1158 qc->tf.command = ATA_CMD_ID_ATA;
1159 DPRINTK("do ATA identify\n");
1161 qc->tf.command = ATA_CMD_ID_ATAPI;
1162 DPRINTK("do ATAPI identify\n");
1165 qc->waiting = &wait;
1166 qc->complete_fn = ata_qc_complete_noop;
1168 spin_lock_irqsave(&ap->host_set->lock, flags);
1169 rc = ata_qc_issue(qc);
1170 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1175 wait_for_completion(&wait);
1177 status = ata_chk_status(ap);
1178 if (status & ATA_ERR) {
1180 * arg! EDD works for all test cases, but seems to return
1181 * the ATA signature for some ATAPI devices. Until the
1182 * reason for this is found and fixed, we fix up the mess
1183 * here. If IDENTIFY DEVICE returns command aborted
1184 * (as ATAPI devices do), then we issue an
1185 * IDENTIFY PACKET DEVICE.
1187 * ATA software reset (SRST, the default) does not appear
1188 * to have this problem.
1190 if ((using_edd) && (qc->tf.command == ATA_CMD_ID_ATA)) {
1191 u8 err = ata_chk_err(ap);
1192 if (err & ATA_ABORTED) {
1193 dev->class = ATA_DEV_ATAPI;
1204 swap_buf_le16(dev->id, ATA_ID_WORDS);
1206 /* print device capabilities */
1207 printk(KERN_DEBUG "ata%u: dev %u cfg "
1208 "49:%04x 82:%04x 83:%04x 84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
1209 ap->id, device, dev->id[49],
1210 dev->id[82], dev->id[83], dev->id[84],
1211 dev->id[85], dev->id[86], dev->id[87],
1215 * common ATA, ATAPI feature tests
1218 /* we require LBA and DMA support (bits 8 & 9 of word 49) */
1219 if (!ata_id_has_dma(dev->id) || !ata_id_has_lba(dev->id)) {
1220 printk(KERN_DEBUG "ata%u: no dma/lba\n", ap->id);
1224 /* quick-n-dirty find max transfer mode; for printk only */
1225 xfer_modes = dev->id[ATA_ID_UDMA_MODES];
1227 xfer_modes = (dev->id[ATA_ID_MWDMA_MODES]) << ATA_SHIFT_MWDMA;
1229 xfer_modes = (dev->id[ATA_ID_PIO_MODES]) << (ATA_SHIFT_PIO + 3);
1230 xfer_modes |= (0x7 << ATA_SHIFT_PIO);
1235 /* ATA-specific feature tests */
1236 if (dev->class == ATA_DEV_ATA) {
1237 if (!ata_id_is_ata(dev->id)) /* sanity check */
1240 tmp = dev->id[ATA_ID_MAJOR_VER];
1241 for (i = 14; i >= 1; i--)
1245 /* we require at least ATA-3 */
1247 printk(KERN_DEBUG "ata%u: no ATA-3\n", ap->id);
1251 if (ata_id_has_lba48(dev->id)) {
1252 dev->flags |= ATA_DFLAG_LBA48;
1253 dev->n_sectors = ata_id_u64(dev->id, 100);
1255 dev->n_sectors = ata_id_u32(dev->id, 60);
1258 ap->host->max_cmd_len = 16;
1260 /* print device info to dmesg */
1261 printk(KERN_INFO "ata%u: dev %u ATA, max %s, %Lu sectors:%s\n",
1263 ata_mode_string(xfer_modes),
1264 (unsigned long long)dev->n_sectors,
1265 dev->flags & ATA_DFLAG_LBA48 ? " lba48" : "");
1268 /* ATAPI-specific feature tests */
1270 if (ata_id_is_ata(dev->id)) /* sanity check */
1273 rc = atapi_cdb_len(dev->id);
1274 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1275 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id);
1278 ap->cdb_len = (unsigned int) rc;
1279 ap->host->max_cmd_len = (unsigned char) ap->cdb_len;
1281 /* print device info to dmesg */
1282 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n",
1284 ata_mode_string(xfer_modes));
1287 DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap));
1291 printk(KERN_WARNING "ata%u: dev %u not supported, ignoring\n",
1294 dev->class++; /* converts ATA_DEV_xxx into ATA_DEV_xxx_UNSUP */
1295 DPRINTK("EXIT, err\n");
1299 * ata_bus_probe - Reset and probe ATA bus
1302 * Master ATA bus probing function. Initiates a hardware-dependent
1303 * bus reset, then attempts to identify any devices found on
1307 * PCI/etc. bus probe sem.
1310 * Zero on success, non-zero on error.
1313 static int ata_bus_probe(struct ata_port *ap)
1315 unsigned int i, found = 0;
1317 ap->ops->phy_reset(ap);
1318 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1321 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1322 ata_dev_identify(ap, i);
1323 if (ata_dev_present(&ap->device[i])) {
1325 if (ap->ops->dev_config)
1326 ap->ops->dev_config(ap, &ap->device[i]);
1330 if ((!found) || (ap->flags & ATA_FLAG_PORT_DISABLED))
1331 goto err_out_disable;
1334 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1335 goto err_out_disable;
1340 ap->ops->port_disable(ap);
1346 * ata_port_probe - Mark port as enabled
1347 * @ap: Port for which we indicate enablement
1349 * Modify @ap data structure such that the system
1350 * thinks that the entire port is enabled.
1352 * LOCKING: host_set lock, or some other form of
1356 void ata_port_probe(struct ata_port *ap)
1358 ap->flags &= ~ATA_FLAG_PORT_DISABLED;
1362 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1363 * @ap: SATA port associated with target SATA PHY.
1365 * This function issues commands to standard SATA Sxxx
1366 * PHY registers, to wake up the phy (and device), and
1367 * clear any reset condition.
1370 * PCI/etc. bus probe sem.
1373 void __sata_phy_reset(struct ata_port *ap)
1376 unsigned long timeout = jiffies + (HZ * 5);
1378 if (ap->flags & ATA_FLAG_SATA_RESET) {
1379 /* issue phy wake/reset */
1380 scr_write_flush(ap, SCR_CONTROL, 0x301);
1381 udelay(400); /* FIXME: a guess */
1383 scr_write_flush(ap, SCR_CONTROL, 0x300); /* phy wake/clear reset */
1385 /* wait for phy to become ready, if necessary */
1388 sstatus = scr_read(ap, SCR_STATUS);
1389 if ((sstatus & 0xf) != 1)
1391 } while (time_before(jiffies, timeout));
1393 /* TODO: phy layer with polling, timeouts, etc. */
1394 if (sata_dev_present(ap))
1397 sstatus = scr_read(ap, SCR_STATUS);
1398 printk(KERN_INFO "ata%u: no device found (phy stat %08x)\n",
1400 ata_port_disable(ap);
1403 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1406 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1407 ata_port_disable(ap);
1411 ap->cbl = ATA_CBL_SATA;
1415 * sata_phy_reset - Reset SATA bus.
1416 * @ap: SATA port associated with target SATA PHY.
1418 * This function resets the SATA bus, and then probes
1419 * the bus for devices.
1422 * PCI/etc. bus probe sem.
1425 void sata_phy_reset(struct ata_port *ap)
1427 __sata_phy_reset(ap);
1428 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1434 * ata_port_disable - Disable port.
1435 * @ap: Port to be disabled.
1437 * Modify @ap data structure such that the system
1438 * thinks that the entire port is disabled, and should
1439 * never attempt to probe or communicate with devices
1442 * LOCKING: host_set lock, or some other form of
1446 void ata_port_disable(struct ata_port *ap)
1448 ap->device[0].class = ATA_DEV_NONE;
1449 ap->device[1].class = ATA_DEV_NONE;
1450 ap->flags |= ATA_FLAG_PORT_DISABLED;
1456 } xfer_mode_classes[] = {
1457 { ATA_SHIFT_UDMA, XFER_UDMA_0 },
1458 { ATA_SHIFT_MWDMA, XFER_MW_DMA_0 },
1459 { ATA_SHIFT_PIO, XFER_PIO_0 },
1462 static inline u8 base_from_shift(unsigned int shift)
1466 for (i = 0; i < ARRAY_SIZE(xfer_mode_classes); i++)
1467 if (xfer_mode_classes[i].shift == shift)
1468 return xfer_mode_classes[i].base;
1473 static void ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev)
1478 if (!ata_dev_present(dev) || (ap->flags & ATA_FLAG_PORT_DISABLED))
1481 if (dev->xfer_shift == ATA_SHIFT_PIO)
1482 dev->flags |= ATA_DFLAG_PIO;
1484 ata_dev_set_xfermode(ap, dev);
1486 base = base_from_shift(dev->xfer_shift);
1487 ofs = dev->xfer_mode - base;
1488 idx = ofs + dev->xfer_shift;
1489 WARN_ON(idx >= ARRAY_SIZE(xfer_mode_str));
1491 DPRINTK("idx=%d xfer_shift=%u, xfer_mode=0x%x, base=0x%x, offset=%d\n",
1492 idx, dev->xfer_shift, (int)dev->xfer_mode, (int)base, ofs);
1494 printk(KERN_INFO "ata%u: dev %u configured for %s\n",
1495 ap->id, dev->devno, xfer_mode_str[idx]);
1498 static int ata_host_set_pio(struct ata_port *ap)
1504 mask = ata_get_mode_mask(ap, ATA_SHIFT_PIO);
1507 printk(KERN_WARNING "ata%u: no PIO support\n", ap->id);
1511 base = base_from_shift(ATA_SHIFT_PIO);
1512 xfer_mode = base + x;
1514 DPRINTK("base 0x%x xfer_mode 0x%x mask 0x%x x %d\n",
1515 (int)base, (int)xfer_mode, mask, x);
1517 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1518 struct ata_device *dev = &ap->device[i];
1519 if (ata_dev_present(dev)) {
1520 dev->pio_mode = xfer_mode;
1521 dev->xfer_mode = xfer_mode;
1522 dev->xfer_shift = ATA_SHIFT_PIO;
1523 if (ap->ops->set_piomode)
1524 ap->ops->set_piomode(ap, dev);
1531 static void ata_host_set_dma(struct ata_port *ap, u8 xfer_mode,
1532 unsigned int xfer_shift)
1536 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1537 struct ata_device *dev = &ap->device[i];
1538 if (ata_dev_present(dev)) {
1539 dev->dma_mode = xfer_mode;
1540 dev->xfer_mode = xfer_mode;
1541 dev->xfer_shift = xfer_shift;
1542 if (ap->ops->set_dmamode)
1543 ap->ops->set_dmamode(ap, dev);
1549 * ata_set_mode - Program timings and issue SET FEATURES - XFER
1550 * @ap: port on which timings will be programmed
1552 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.).
1555 * PCI/etc. bus probe sem.
1558 static void ata_set_mode(struct ata_port *ap)
1560 unsigned int i, xfer_shift;
1564 /* step 1: always set host PIO timings */
1565 rc = ata_host_set_pio(ap);
1569 /* step 2: choose the best data xfer mode */
1570 xfer_mode = xfer_shift = 0;
1571 rc = ata_choose_xfer_mode(ap, &xfer_mode, &xfer_shift);
1575 /* step 3: if that xfer mode isn't PIO, set host DMA timings */
1576 if (xfer_shift != ATA_SHIFT_PIO)
1577 ata_host_set_dma(ap, xfer_mode, xfer_shift);
1579 /* step 4: update devices' xfer mode */
1580 ata_dev_set_mode(ap, &ap->device[0]);
1581 ata_dev_set_mode(ap, &ap->device[1]);
1583 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1586 if (ap->ops->post_set_mode)
1587 ap->ops->post_set_mode(ap);
1589 for (i = 0; i < 2; i++) {
1590 struct ata_device *dev = &ap->device[i];
1591 ata_dev_set_protocol(dev);
1597 ata_port_disable(ap);
1601 * ata_busy_sleep - sleep until BSY clears, or timeout
1602 * @ap: port containing status register to be polled
1603 * @tmout_pat: impatience timeout
1604 * @tmout: overall timeout
1606 * Sleep until ATA Status register bit BSY clears,
1607 * or a timeout occurs.
1613 static unsigned int ata_busy_sleep (struct ata_port *ap,
1614 unsigned long tmout_pat,
1615 unsigned long tmout)
1617 unsigned long timer_start, timeout;
1620 status = ata_busy_wait(ap, ATA_BUSY, 300);
1621 timer_start = jiffies;
1622 timeout = timer_start + tmout_pat;
1623 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1625 status = ata_busy_wait(ap, ATA_BUSY, 3);
1628 if (status & ATA_BUSY)
1629 printk(KERN_WARNING "ata%u is slow to respond, "
1630 "please be patient\n", ap->id);
1632 timeout = timer_start + tmout;
1633 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1635 status = ata_chk_status(ap);
1638 if (status & ATA_BUSY) {
1639 printk(KERN_ERR "ata%u failed to respond (%lu secs)\n",
1640 ap->id, tmout / HZ);
1647 static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
1649 struct ata_ioports *ioaddr = &ap->ioaddr;
1650 unsigned int dev0 = devmask & (1 << 0);
1651 unsigned int dev1 = devmask & (1 << 1);
1652 unsigned long timeout;
1654 /* if device 0 was found in ata_devchk, wait for its
1658 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1660 /* if device 1 was found in ata_devchk, wait for
1661 * register access, then wait for BSY to clear
1663 timeout = jiffies + ATA_TMOUT_BOOT;
1667 ap->ops->dev_select(ap, 1);
1668 if (ap->flags & ATA_FLAG_MMIO) {
1669 nsect = readb((void __iomem *) ioaddr->nsect_addr);
1670 lbal = readb((void __iomem *) ioaddr->lbal_addr);
1672 nsect = inb(ioaddr->nsect_addr);
1673 lbal = inb(ioaddr->lbal_addr);
1675 if ((nsect == 1) && (lbal == 1))
1677 if (time_after(jiffies, timeout)) {
1681 msleep(50); /* give drive a breather */
1684 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1686 /* is all this really necessary? */
1687 ap->ops->dev_select(ap, 0);
1689 ap->ops->dev_select(ap, 1);
1691 ap->ops->dev_select(ap, 0);
1695 * ata_bus_edd - Issue EXECUTE DEVICE DIAGNOSTIC command.
1696 * @ap: Port to reset and probe
1698 * Use the EXECUTE DEVICE DIAGNOSTIC command to reset and
1699 * probe the bus. Not often used these days.
1702 * PCI/etc. bus probe sem.
1706 static unsigned int ata_bus_edd(struct ata_port *ap)
1708 struct ata_taskfile tf;
1710 /* set up execute-device-diag (bus reset) taskfile */
1711 /* also, take interrupts to a known state (disabled) */
1712 DPRINTK("execute-device-diag\n");
1713 ata_tf_init(ap, &tf, 0);
1715 tf.command = ATA_CMD_EDD;
1716 tf.protocol = ATA_PROT_NODATA;
1719 ata_tf_to_host(ap, &tf);
1721 /* spec says at least 2ms. but who knows with those
1722 * crazy ATAPI devices...
1726 return ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1729 static unsigned int ata_bus_softreset(struct ata_port *ap,
1730 unsigned int devmask)
1732 struct ata_ioports *ioaddr = &ap->ioaddr;
1734 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
1736 /* software reset. causes dev0 to be selected */
1737 if (ap->flags & ATA_FLAG_MMIO) {
1738 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1739 udelay(20); /* FIXME: flush */
1740 writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
1741 udelay(20); /* FIXME: flush */
1742 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1744 outb(ap->ctl, ioaddr->ctl_addr);
1746 outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
1748 outb(ap->ctl, ioaddr->ctl_addr);
1751 /* spec mandates ">= 2ms" before checking status.
1752 * We wait 150ms, because that was the magic delay used for
1753 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
1754 * between when the ATA command register is written, and then
1755 * status is checked. Because waiting for "a while" before
1756 * checking status is fine, post SRST, we perform this magic
1757 * delay here as well.
1761 ata_bus_post_reset(ap, devmask);
1767 * ata_bus_reset - reset host port and associated ATA channel
1768 * @ap: port to reset
1770 * This is typically the first time we actually start issuing
1771 * commands to the ATA channel. We wait for BSY to clear, then
1772 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
1773 * result. Determine what devices, if any, are on the channel
1774 * by looking at the device 0/1 error register. Look at the signature
1775 * stored in each device's taskfile registers, to determine if
1776 * the device is ATA or ATAPI.
1779 * PCI/etc. bus probe sem.
1780 * Obtains host_set lock.
1783 * Sets ATA_FLAG_PORT_DISABLED if bus reset fails.
1786 void ata_bus_reset(struct ata_port *ap)
1788 struct ata_ioports *ioaddr = &ap->ioaddr;
1789 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
1791 unsigned int dev0, dev1 = 0, rc = 0, devmask = 0;
1793 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
1795 /* determine if device 0/1 are present */
1796 if (ap->flags & ATA_FLAG_SATA_RESET)
1799 dev0 = ata_devchk(ap, 0);
1801 dev1 = ata_devchk(ap, 1);
1805 devmask |= (1 << 0);
1807 devmask |= (1 << 1);
1809 /* select device 0 again */
1810 ap->ops->dev_select(ap, 0);
1812 /* issue bus reset */
1813 if (ap->flags & ATA_FLAG_SRST)
1814 rc = ata_bus_softreset(ap, devmask);
1815 else if ((ap->flags & ATA_FLAG_SATA_RESET) == 0) {
1816 /* set up device control */
1817 if (ap->flags & ATA_FLAG_MMIO)
1818 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1820 outb(ap->ctl, ioaddr->ctl_addr);
1821 rc = ata_bus_edd(ap);
1828 * determine by signature whether we have ATA or ATAPI devices
1830 err = ata_dev_try_classify(ap, 0);
1831 if ((slave_possible) && (err != 0x81))
1832 ata_dev_try_classify(ap, 1);
1834 /* re-enable interrupts */
1835 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
1838 /* is double-select really necessary? */
1839 if (ap->device[1].class != ATA_DEV_NONE)
1840 ap->ops->dev_select(ap, 1);
1841 if (ap->device[0].class != ATA_DEV_NONE)
1842 ap->ops->dev_select(ap, 0);
1844 /* if no devices were detected, disable this port */
1845 if ((ap->device[0].class == ATA_DEV_NONE) &&
1846 (ap->device[1].class == ATA_DEV_NONE))
1849 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
1850 /* set up device control for ATA_FLAG_SATA_RESET */
1851 if (ap->flags & ATA_FLAG_MMIO)
1852 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1854 outb(ap->ctl, ioaddr->ctl_addr);
1861 printk(KERN_ERR "ata%u: disabling port\n", ap->id);
1862 ap->ops->port_disable(ap);
1867 static void ata_pr_blacklisted(struct ata_port *ap, struct ata_device *dev)
1869 printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, disabling DMA\n",
1870 ap->id, dev->devno);
1873 static const char * ata_dma_blacklist [] = {
1892 "Toshiba CD-ROM XM-6202B",
1894 "E-IDE CD-ROM CR-840",
1897 "SAMSUNG CD-ROM SC-148C",
1898 "SAMSUNG CD-ROM SC",
1900 "SAMSUNG CD-ROM SN-124",
1901 "ATAPI CD-ROM DRIVE 40X MAXIMUM",
1905 static int ata_dma_blacklisted(struct ata_port *ap, struct ata_device *dev)
1907 unsigned char model_num[40];
1912 ata_dev_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
1915 len = strnlen(s, sizeof(model_num));
1917 /* ATAPI specifies that empty space is blank-filled; remove blanks */
1918 while ((len > 0) && (s[len - 1] == ' ')) {
1923 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i++)
1924 if (!strncmp(ata_dma_blacklist[i], s, len))
1930 static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift)
1932 struct ata_device *master, *slave;
1935 master = &ap->device[0];
1936 slave = &ap->device[1];
1938 assert (ata_dev_present(master) || ata_dev_present(slave));
1940 if (shift == ATA_SHIFT_UDMA) {
1941 mask = ap->udma_mask;
1942 if (ata_dev_present(master)) {
1943 mask &= (master->id[ATA_ID_UDMA_MODES] & 0xff);
1944 if (ata_dma_blacklisted(ap, master)) {
1946 ata_pr_blacklisted(ap, master);
1949 if (ata_dev_present(slave)) {
1950 mask &= (slave->id[ATA_ID_UDMA_MODES] & 0xff);
1951 if (ata_dma_blacklisted(ap, slave)) {
1953 ata_pr_blacklisted(ap, slave);
1957 else if (shift == ATA_SHIFT_MWDMA) {
1958 mask = ap->mwdma_mask;
1959 if (ata_dev_present(master)) {
1960 mask &= (master->id[ATA_ID_MWDMA_MODES] & 0x07);
1961 if (ata_dma_blacklisted(ap, master)) {
1963 ata_pr_blacklisted(ap, master);
1966 if (ata_dev_present(slave)) {
1967 mask &= (slave->id[ATA_ID_MWDMA_MODES] & 0x07);
1968 if (ata_dma_blacklisted(ap, slave)) {
1970 ata_pr_blacklisted(ap, slave);
1974 else if (shift == ATA_SHIFT_PIO) {
1975 mask = ap->pio_mask;
1976 if (ata_dev_present(master)) {
1977 /* spec doesn't return explicit support for
1978 * PIO0-2, so we fake it
1980 u16 tmp_mode = master->id[ATA_ID_PIO_MODES] & 0x03;
1985 if (ata_dev_present(slave)) {
1986 /* spec doesn't return explicit support for
1987 * PIO0-2, so we fake it
1989 u16 tmp_mode = slave->id[ATA_ID_PIO_MODES] & 0x03;
1996 mask = 0xffffffff; /* shut up compiler warning */
2003 /* find greatest bit */
2004 static int fgb(u32 bitmap)
2009 for (i = 0; i < 32; i++)
2010 if (bitmap & (1 << i))
2017 * ata_choose_xfer_mode - attempt to find best transfer mode
2018 * @ap: Port for which an xfer mode will be selected
2019 * @xfer_mode_out: (output) SET FEATURES - XFER MODE code
2020 * @xfer_shift_out: (output) bit shift that selects this mode
2022 * Based on host and device capabilities, determine the
2023 * maximum transfer mode that is amenable to all.
2026 * PCI/etc. bus probe sem.
2029 * Zero on success, negative on error.
2032 static int ata_choose_xfer_mode(struct ata_port *ap,
2034 unsigned int *xfer_shift_out)
2036 unsigned int mask, shift;
2039 for (i = 0; i < ARRAY_SIZE(xfer_mode_classes); i++) {
2040 shift = xfer_mode_classes[i].shift;
2041 mask = ata_get_mode_mask(ap, shift);
2045 *xfer_mode_out = xfer_mode_classes[i].base + x;
2046 *xfer_shift_out = shift;
2055 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
2056 * @ap: Port associated with device @dev
2057 * @dev: Device to which command will be sent
2059 * Issue SET FEATURES - XFER MODE command to device @dev
2063 * PCI/etc. bus probe sem.
2066 static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev)
2068 DECLARE_COMPLETION(wait);
2069 struct ata_queued_cmd *qc;
2071 unsigned long flags;
2073 /* set up set-features taskfile */
2074 DPRINTK("set features - xfer mode\n");
2076 qc = ata_qc_new_init(ap, dev);
2079 qc->tf.command = ATA_CMD_SET_FEATURES;
2080 qc->tf.feature = SETFEATURES_XFER;
2081 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2082 qc->tf.protocol = ATA_PROT_NODATA;
2083 qc->tf.nsect = dev->xfer_mode;
2085 qc->waiting = &wait;
2086 qc->complete_fn = ata_qc_complete_noop;
2088 spin_lock_irqsave(&ap->host_set->lock, flags);
2089 rc = ata_qc_issue(qc);
2090 spin_unlock_irqrestore(&ap->host_set->lock, flags);
2093 ata_port_disable(ap);
2095 wait_for_completion(&wait);
2101 * ata_sg_clean - Unmap DMA memory associated with command
2102 * @qc: Command containing DMA memory to be released
2104 * Unmap all mapped DMA memory associated with this command.
2107 * spin_lock_irqsave(host_set lock)
2110 static void ata_sg_clean(struct ata_queued_cmd *qc)
2112 struct ata_port *ap = qc->ap;
2113 struct scatterlist *sg = qc->sg;
2114 int dir = qc->dma_dir;
2116 assert(qc->flags & ATA_QCFLAG_DMAMAP);
2119 if (qc->flags & ATA_QCFLAG_SINGLE)
2120 assert(qc->n_elem == 1);
2122 DPRINTK("unmapping %u sg elements\n", qc->n_elem);
2124 if (qc->flags & ATA_QCFLAG_SG)
2125 dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir);
2127 dma_unmap_single(ap->host_set->dev, sg_dma_address(&sg[0]),
2128 sg_dma_len(&sg[0]), dir);
2130 qc->flags &= ~ATA_QCFLAG_DMAMAP;
2135 * ata_fill_sg - Fill PCI IDE PRD table
2136 * @qc: Metadata associated with taskfile to be transferred
2138 * Fill PCI IDE PRD (scatter-gather) table with segments
2139 * associated with the current disk command.
2142 * spin_lock_irqsave(host_set lock)
2145 static void ata_fill_sg(struct ata_queued_cmd *qc)
2147 struct scatterlist *sg = qc->sg;
2148 struct ata_port *ap = qc->ap;
2149 unsigned int idx, nelem;
2152 assert(qc->n_elem > 0);
2155 for (nelem = qc->n_elem; nelem; nelem--,sg++) {
2159 /* determine if physical DMA addr spans 64K boundary.
2160 * Note h/w doesn't support 64-bit, so we unconditionally
2161 * truncate dma_addr_t to u32.
2163 addr = (u32) sg_dma_address(sg);
2164 sg_len = sg_dma_len(sg);
2167 offset = addr & 0xffff;
2169 if ((offset + sg_len) > 0x10000)
2170 len = 0x10000 - offset;
2172 ap->prd[idx].addr = cpu_to_le32(addr);
2173 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2174 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
2183 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2186 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
2187 * @qc: Metadata associated with taskfile to check
2189 * Allow low-level driver to filter ATA PACKET commands, returning
2190 * a status indicating whether or not it is OK to use DMA for the
2191 * supplied PACKET command.
2194 * spin_lock_irqsave(host_set lock)
2196 * RETURNS: 0 when ATAPI DMA can be used
2199 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
2201 struct ata_port *ap = qc->ap;
2202 int rc = 0; /* Assume ATAPI DMA is OK by default */
2204 if (ap->ops->check_atapi_dma)
2205 rc = ap->ops->check_atapi_dma(qc);
2210 * ata_qc_prep - Prepare taskfile for submission
2211 * @qc: Metadata associated with taskfile to be prepared
2213 * Prepare ATA taskfile for submission.
2216 * spin_lock_irqsave(host_set lock)
2218 void ata_qc_prep(struct ata_queued_cmd *qc)
2220 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2227 * ata_sg_init_one - Associate command with memory buffer
2228 * @qc: Command to be associated
2229 * @buf: Memory buffer
2230 * @buflen: Length of memory buffer, in bytes.
2232 * Initialize the data-related elements of queued_cmd @qc
2233 * to point to a single memory buffer, @buf of byte length @buflen.
2236 * spin_lock_irqsave(host_set lock)
2242 * ata_sg_init_one - Prepare a one-entry scatter-gather list.
2243 * @qc: Queued command
2244 * @buf: transfer buffer
2245 * @buflen: length of buf
2247 * Builds a single-entry scatter-gather list to initiate a
2248 * transfer utilizing the specified buffer.
2252 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
2254 struct scatterlist *sg;
2256 qc->flags |= ATA_QCFLAG_SINGLE;
2258 memset(&qc->sgent, 0, sizeof(qc->sgent));
2259 qc->sg = &qc->sgent;
2264 sg->page = virt_to_page(buf);
2265 sg->offset = (unsigned long) buf & ~PAGE_MASK;
2266 sg->length = buflen;
2270 * ata_sg_init - Associate command with scatter-gather table.
2271 * @qc: Command to be associated
2272 * @sg: Scatter-gather table.
2273 * @n_elem: Number of elements in s/g table.
2275 * Initialize the data-related elements of queued_cmd @qc
2276 * to point to a scatter-gather table @sg, containing @n_elem
2280 * spin_lock_irqsave(host_set lock)
2285 * ata_sg_init - Assign a scatter gather list to a queued command
2286 * @qc: Queued command
2287 * @sg: Scatter-gather list
2288 * @n_elem: length of sg list
2290 * Attaches a scatter-gather list to a queued command.
2295 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
2296 unsigned int n_elem)
2298 qc->flags |= ATA_QCFLAG_SG;
2300 qc->n_elem = n_elem;
2304 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
2305 * @qc: Command with memory buffer to be mapped.
2307 * DMA-map the memory buffer associated with queued_cmd @qc.
2310 * spin_lock_irqsave(host_set lock)
2313 * Zero on success, negative on error.
2316 static int ata_sg_setup_one(struct ata_queued_cmd *qc)
2318 struct ata_port *ap = qc->ap;
2319 int dir = qc->dma_dir;
2320 struct scatterlist *sg = qc->sg;
2321 dma_addr_t dma_address;
2323 dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt,
2325 if (dma_mapping_error(dma_address))
2328 sg_dma_address(sg) = dma_address;
2329 sg_dma_len(sg) = sg->length;
2331 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
2332 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
2338 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
2339 * @qc: Command with scatter-gather table to be mapped.
2341 * DMA-map the scatter-gather table associated with queued_cmd @qc.
2344 * spin_lock_irqsave(host_set lock)
2347 * Zero on success, negative on error.
2351 static int ata_sg_setup(struct ata_queued_cmd *qc)
2353 struct ata_port *ap = qc->ap;
2354 struct scatterlist *sg = qc->sg;
2357 VPRINTK("ENTER, ata%u\n", ap->id);
2358 assert(qc->flags & ATA_QCFLAG_SG);
2361 n_elem = dma_map_sg(ap->host_set->dev, sg, qc->n_elem, dir);
2365 DPRINTK("%d sg elements mapped\n", n_elem);
2367 qc->n_elem = n_elem;
2377 * None. (executing in kernel thread context)
2383 static unsigned long ata_pio_poll(struct ata_port *ap)
2386 unsigned int poll_state = PIO_ST_UNKNOWN;
2387 unsigned int reg_state = PIO_ST_UNKNOWN;
2388 const unsigned int tmout_state = PIO_ST_TMOUT;
2390 switch (ap->pio_task_state) {
2393 poll_state = PIO_ST_POLL;
2397 case PIO_ST_LAST_POLL:
2398 poll_state = PIO_ST_LAST_POLL;
2399 reg_state = PIO_ST_LAST;
2406 status = ata_chk_status(ap);
2407 if (status & ATA_BUSY) {
2408 if (time_after(jiffies, ap->pio_task_timeout)) {
2409 ap->pio_task_state = tmout_state;
2412 ap->pio_task_state = poll_state;
2413 return ATA_SHORT_PAUSE;
2416 ap->pio_task_state = reg_state;
2421 * ata_pio_complete -
2425 * None. (executing in kernel thread context)
2428 static void ata_pio_complete (struct ata_port *ap)
2430 struct ata_queued_cmd *qc;
2434 * This is purely hueristic. This is a fast path.
2435 * Sometimes when we enter, BSY will be cleared in
2436 * a chk-status or two. If not, the drive is probably seeking
2437 * or something. Snooze for a couple msecs, then
2438 * chk-status again. If still busy, fall back to
2439 * PIO_ST_POLL state.
2441 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10);
2442 if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
2444 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10);
2445 if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
2446 ap->pio_task_state = PIO_ST_LAST_POLL;
2447 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
2452 drv_stat = ata_wait_idle(ap);
2453 if (!ata_ok(drv_stat)) {
2454 ap->pio_task_state = PIO_ST_ERR;
2458 qc = ata_qc_from_tag(ap, ap->active_tag);
2461 ap->pio_task_state = PIO_ST_IDLE;
2465 ata_qc_complete(qc, drv_stat);
2471 * @buf: Buffer to swap
2472 * @buf_words: Number of 16-bit words in buffer.
2474 * Swap halves of 16-bit words if needed to convert from
2475 * little-endian byte order to native cpu byte order, or
2480 void swap_buf_le16(u16 *buf, unsigned int buf_words)
2485 for (i = 0; i < buf_words; i++)
2486 buf[i] = le16_to_cpu(buf[i]);
2487 #endif /* __BIG_ENDIAN */
2490 static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
2491 unsigned int buflen, int write_data)
2494 unsigned int words = buflen >> 1;
2495 u16 *buf16 = (u16 *) buf;
2496 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
2499 for (i = 0; i < words; i++)
2500 writew(le16_to_cpu(buf16[i]), mmio);
2502 for (i = 0; i < words; i++)
2503 buf16[i] = cpu_to_le16(readw(mmio));
2507 static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
2508 unsigned int buflen, int write_data)
2510 unsigned int dwords = buflen >> 1;
2513 outsw(ap->ioaddr.data_addr, buf, dwords);
2515 insw(ap->ioaddr.data_addr, buf, dwords);
2518 static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
2519 unsigned int buflen, int do_write)
2521 if (ap->flags & ATA_FLAG_MMIO)
2522 ata_mmio_data_xfer(ap, buf, buflen, do_write);
2524 ata_pio_data_xfer(ap, buf, buflen, do_write);
2527 static void ata_pio_sector(struct ata_queued_cmd *qc)
2529 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
2530 struct scatterlist *sg = qc->sg;
2531 struct ata_port *ap = qc->ap;
2533 unsigned int offset;
2536 if (qc->cursect == (qc->nsect - 1))
2537 ap->pio_task_state = PIO_ST_LAST;
2539 page = sg[qc->cursg].page;
2540 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
2542 /* get the current page and offset */
2543 page = nth_page(page, (offset >> PAGE_SHIFT));
2544 offset %= PAGE_SIZE;
2546 buf = kmap(page) + offset;
2551 if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) {
2556 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
2558 /* do the actual data transfer */
2559 do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
2560 ata_data_xfer(ap, buf, ATA_SECT_SIZE, do_write);
2565 static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
2567 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
2568 struct scatterlist *sg = qc->sg;
2569 struct ata_port *ap = qc->ap;
2572 unsigned int offset, count;
2574 if (qc->curbytes == qc->nbytes - bytes)
2575 ap->pio_task_state = PIO_ST_LAST;
2578 sg = &qc->sg[qc->cursg];
2582 offset = sg->offset + qc->cursg_ofs;
2584 /* get the current page and offset */
2585 page = nth_page(page, (offset >> PAGE_SHIFT));
2586 offset %= PAGE_SIZE;
2588 count = min(sg->length - qc->cursg_ofs, bytes);
2590 /* don't cross page boundaries */
2591 count = min(count, (unsigned int)PAGE_SIZE - offset);
2593 buf = kmap(page) + offset;
2596 qc->curbytes += count;
2597 qc->cursg_ofs += count;
2599 if (qc->cursg_ofs == sg->length) {
2604 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
2606 /* do the actual data transfer */
2607 ata_data_xfer(ap, buf, count, do_write);
2612 if (qc->cursg_ofs < sg->length)
2618 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
2620 struct ata_port *ap = qc->ap;
2621 struct ata_device *dev = qc->dev;
2622 unsigned int ireason, bc_lo, bc_hi, bytes;
2623 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
2625 ap->ops->tf_read(ap, &qc->tf);
2626 ireason = qc->tf.nsect;
2627 bc_lo = qc->tf.lbam;
2628 bc_hi = qc->tf.lbah;
2629 bytes = (bc_hi << 8) | bc_lo;
2631 /* shall be cleared to zero, indicating xfer of data */
2632 if (ireason & (1 << 0))
2635 /* make sure transfer direction matches expected */
2636 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
2637 if (do_write != i_write)
2640 __atapi_pio_bytes(qc, bytes);
2645 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n",
2646 ap->id, dev->devno);
2647 ap->pio_task_state = PIO_ST_ERR;
2655 * None. (executing in kernel thread context)
2658 static void ata_pio_block(struct ata_port *ap)
2660 struct ata_queued_cmd *qc;
2664 * This is purely hueristic. This is a fast path.
2665 * Sometimes when we enter, BSY will be cleared in
2666 * a chk-status or two. If not, the drive is probably seeking
2667 * or something. Snooze for a couple msecs, then
2668 * chk-status again. If still busy, fall back to
2669 * PIO_ST_POLL state.
2671 status = ata_busy_wait(ap, ATA_BUSY, 5);
2672 if (status & ATA_BUSY) {
2674 status = ata_busy_wait(ap, ATA_BUSY, 10);
2675 if (status & ATA_BUSY) {
2676 ap->pio_task_state = PIO_ST_POLL;
2677 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
2682 qc = ata_qc_from_tag(ap, ap->active_tag);
2685 if (is_atapi_taskfile(&qc->tf)) {
2686 /* no more data to transfer or unsupported ATAPI command */
2687 if ((status & ATA_DRQ) == 0) {
2688 ap->pio_task_state = PIO_ST_IDLE;
2692 ata_qc_complete(qc, status);
2696 atapi_pio_bytes(qc);
2698 /* handle BSY=0, DRQ=0 as error */
2699 if ((status & ATA_DRQ) == 0) {
2700 ap->pio_task_state = PIO_ST_ERR;
2708 static void ata_pio_error(struct ata_port *ap)
2710 struct ata_queued_cmd *qc;
2713 qc = ata_qc_from_tag(ap, ap->active_tag);
2716 drv_stat = ata_chk_status(ap);
2717 printk(KERN_WARNING "ata%u: PIO error, drv_stat 0x%x\n",
2720 ap->pio_task_state = PIO_ST_IDLE;
2724 ata_qc_complete(qc, drv_stat | ATA_ERR);
2727 static void ata_pio_task(void *_data)
2729 struct ata_port *ap = _data;
2730 unsigned long timeout = 0;
2732 switch (ap->pio_task_state) {
2741 ata_pio_complete(ap);
2745 case PIO_ST_LAST_POLL:
2746 timeout = ata_pio_poll(ap);
2756 queue_delayed_work(ata_wq, &ap->pio_task,
2759 queue_work(ata_wq, &ap->pio_task);
2762 static void atapi_request_sense(struct ata_port *ap, struct ata_device *dev,
2763 struct scsi_cmnd *cmd)
2765 DECLARE_COMPLETION(wait);
2766 struct ata_queued_cmd *qc;
2767 unsigned long flags;
2770 DPRINTK("ATAPI request sense\n");
2772 qc = ata_qc_new_init(ap, dev);
2775 /* FIXME: is this needed? */
2776 memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
2778 ata_sg_init_one(qc, cmd->sense_buffer, sizeof(cmd->sense_buffer));
2779 qc->dma_dir = DMA_FROM_DEVICE;
2781 memset(&qc->cdb, 0, ap->cdb_len);
2782 qc->cdb[0] = REQUEST_SENSE;
2783 qc->cdb[4] = SCSI_SENSE_BUFFERSIZE;
2785 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2786 qc->tf.command = ATA_CMD_PACKET;
2788 qc->tf.protocol = ATA_PROT_ATAPI;
2789 qc->tf.lbam = (8 * 1024) & 0xff;
2790 qc->tf.lbah = (8 * 1024) >> 8;
2791 qc->nbytes = SCSI_SENSE_BUFFERSIZE;
2793 qc->waiting = &wait;
2794 qc->complete_fn = ata_qc_complete_noop;
2796 spin_lock_irqsave(&ap->host_set->lock, flags);
2797 rc = ata_qc_issue(qc);
2798 spin_unlock_irqrestore(&ap->host_set->lock, flags);
2801 ata_port_disable(ap);
2803 wait_for_completion(&wait);
2809 * ata_qc_timeout - Handle timeout of queued command
2810 * @qc: Command that timed out
2812 * Some part of the kernel (currently, only the SCSI layer)
2813 * has noticed that the active command on port @ap has not
2814 * completed after a specified length of time. Handle this
2815 * condition by disabling DMA (if necessary) and completing
2816 * transactions, with error if necessary.
2818 * This also handles the case of the "lost interrupt", where
2819 * for some reason (possibly hardware bug, possibly driver bug)
2820 * an interrupt was not delivered to the driver, even though the
2821 * transaction completed successfully.
2824 * Inherited from SCSI layer (none, can sleep)
2827 static void ata_qc_timeout(struct ata_queued_cmd *qc)
2829 struct ata_port *ap = qc->ap;
2830 struct ata_device *dev = qc->dev;
2831 u8 host_stat = 0, drv_stat;
2835 /* FIXME: doesn't this conflict with timeout handling? */
2836 if (qc->dev->class == ATA_DEV_ATAPI && qc->scsicmd) {
2837 struct scsi_cmnd *cmd = qc->scsicmd;
2839 if (!scsi_eh_eflags_chk(cmd, SCSI_EH_CANCEL_CMD)) {
2841 /* finish completing original command */
2842 __ata_qc_complete(qc);
2844 atapi_request_sense(ap, dev, cmd);
2846 cmd->result = (CHECK_CONDITION << 1) | (DID_OK << 16);
2847 scsi_finish_command(cmd);
2853 /* hack alert! We cannot use the supplied completion
2854 * function from inside the ->eh_strategy_handler() thread.
2855 * libata is the only user of ->eh_strategy_handler() in
2856 * any kernel, so the default scsi_done() assumes it is
2857 * not being called from the SCSI EH.
2859 qc->scsidone = scsi_finish_command;
2861 switch (qc->tf.protocol) {
2864 case ATA_PROT_ATAPI_DMA:
2865 host_stat = ap->ops->bmdma_status(ap);
2867 /* before we do anything else, clear DMA-Start bit */
2868 ap->ops->bmdma_stop(ap);
2874 drv_stat = ata_chk_status(ap);
2876 /* ack bmdma irq events */
2877 ap->ops->irq_clear(ap);
2879 printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n",
2880 ap->id, qc->tf.command, drv_stat, host_stat);
2882 /* complete taskfile transaction */
2883 ata_qc_complete(qc, drv_stat);
2891 * ata_eng_timeout - Handle timeout of queued command
2892 * @ap: Port on which timed-out command is active
2894 * Some part of the kernel (currently, only the SCSI layer)
2895 * has noticed that the active command on port @ap has not
2896 * completed after a specified length of time. Handle this
2897 * condition by disabling DMA (if necessary) and completing
2898 * transactions, with error if necessary.
2900 * This also handles the case of the "lost interrupt", where
2901 * for some reason (possibly hardware bug, possibly driver bug)
2902 * an interrupt was not delivered to the driver, even though the
2903 * transaction completed successfully.
2906 * Inherited from SCSI layer (none, can sleep)
2909 void ata_eng_timeout(struct ata_port *ap)
2911 struct ata_queued_cmd *qc;
2915 qc = ata_qc_from_tag(ap, ap->active_tag);
2917 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
2929 * ata_qc_new - Request an available ATA command, for queueing
2930 * @ap: Port associated with device @dev
2931 * @dev: Device from whom we request an available command structure
2937 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
2939 struct ata_queued_cmd *qc = NULL;
2942 for (i = 0; i < ATA_MAX_QUEUE; i++)
2943 if (!test_and_set_bit(i, &ap->qactive)) {
2944 qc = ata_qc_from_tag(ap, i);
2955 * ata_qc_new_init - Request an available ATA command, and initialize it
2956 * @ap: Port associated with device @dev
2957 * @dev: Device from whom we request an available command structure
2963 struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
2964 struct ata_device *dev)
2966 struct ata_queued_cmd *qc;
2968 qc = ata_qc_new(ap);
2975 qc->cursect = qc->cursg = qc->cursg_ofs = 0;
2977 qc->nbytes = qc->curbytes = 0;
2979 ata_tf_init(ap, &qc->tf, dev->devno);
2981 if (dev->flags & ATA_DFLAG_LBA48)
2982 qc->tf.flags |= ATA_TFLAG_LBA48;
2988 static int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat)
2993 static void __ata_qc_complete(struct ata_queued_cmd *qc)
2995 struct ata_port *ap = qc->ap;
2996 unsigned int tag, do_clear = 0;
3000 if (likely(ata_tag_valid(tag))) {
3001 if (tag == ap->active_tag)
3002 ap->active_tag = ATA_TAG_POISON;
3003 qc->tag = ATA_TAG_POISON;
3008 struct completion *waiting = qc->waiting;
3013 if (likely(do_clear))
3014 clear_bit(tag, &ap->qactive);
3018 * ata_qc_free - free unused ata_queued_cmd
3019 * @qc: Command to complete
3021 * Designed to free unused ata_queued_cmd object
3022 * in case something prevents using it.
3025 * spin_lock_irqsave(host_set lock)
3028 void ata_qc_free(struct ata_queued_cmd *qc)
3030 assert(qc != NULL); /* ata_qc_from_tag _might_ return NULL */
3031 assert(qc->waiting == NULL); /* nothing should be waiting */
3033 __ata_qc_complete(qc);
3037 * ata_qc_complete - Complete an active ATA command
3038 * @qc: Command to complete
3039 * @drv_stat: ATA Status register contents
3041 * Indicate to the mid and upper layers that an ATA
3042 * command has completed, with either an ok or not-ok status.
3045 * spin_lock_irqsave(host_set lock)
3049 void ata_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
3053 assert(qc != NULL); /* ata_qc_from_tag _might_ return NULL */
3054 assert(qc->flags & ATA_QCFLAG_ACTIVE);
3056 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
3059 /* call completion callback */
3060 rc = qc->complete_fn(qc, drv_stat);
3061 qc->flags &= ~ATA_QCFLAG_ACTIVE;
3063 /* if callback indicates not to complete command (non-zero),
3064 * return immediately
3069 __ata_qc_complete(qc);
3074 static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
3076 struct ata_port *ap = qc->ap;
3078 switch (qc->tf.protocol) {
3080 case ATA_PROT_ATAPI_DMA:
3083 case ATA_PROT_ATAPI:
3085 case ATA_PROT_PIO_MULT:
3086 if (ap->flags & ATA_FLAG_PIO_DMA)
3099 * ata_qc_issue - issue taskfile to device
3100 * @qc: command to issue to device
3102 * Prepare an ATA command to submission to device.
3103 * This includes mapping the data into a DMA-able
3104 * area, filling in the S/G table, and finally
3105 * writing the taskfile to hardware, starting the command.
3108 * spin_lock_irqsave(host_set lock)
3111 * Zero on success, negative on error.
3114 int ata_qc_issue(struct ata_queued_cmd *qc)
3116 struct ata_port *ap = qc->ap;
3118 if (ata_should_dma_map(qc)) {
3119 if (qc->flags & ATA_QCFLAG_SG) {
3120 if (ata_sg_setup(qc))
3122 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
3123 if (ata_sg_setup_one(qc))
3127 qc->flags &= ~ATA_QCFLAG_DMAMAP;
3130 ap->ops->qc_prep(qc);
3132 qc->ap->active_tag = qc->tag;
3133 qc->flags |= ATA_QCFLAG_ACTIVE;
3135 return ap->ops->qc_issue(qc);
3143 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
3144 * @qc: command to issue to device
3146 * Using various libata functions and hooks, this function
3147 * starts an ATA command. ATA commands are grouped into
3148 * classes called "protocols", and issuing each type of protocol
3149 * is slightly different.
3151 * May be used as the qc_issue() entry in ata_port_operations.
3154 * spin_lock_irqsave(host_set lock)
3157 * Zero on success, negative on error.
3160 int ata_qc_issue_prot(struct ata_queued_cmd *qc)
3162 struct ata_port *ap = qc->ap;
3164 ata_dev_select(ap, qc->dev->devno, 1, 0);
3166 switch (qc->tf.protocol) {
3167 case ATA_PROT_NODATA:
3168 ata_tf_to_host_nolock(ap, &qc->tf);
3172 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
3173 ap->ops->bmdma_setup(qc); /* set up bmdma */
3174 ap->ops->bmdma_start(qc); /* initiate bmdma */
3177 case ATA_PROT_PIO: /* load tf registers, initiate polling pio */
3178 ata_qc_set_polling(qc);
3179 ata_tf_to_host_nolock(ap, &qc->tf);
3180 ap->pio_task_state = PIO_ST;
3181 queue_work(ata_wq, &ap->pio_task);
3184 case ATA_PROT_ATAPI:
3185 ata_qc_set_polling(qc);
3186 ata_tf_to_host_nolock(ap, &qc->tf);
3187 queue_work(ata_wq, &ap->packet_task);
3190 case ATA_PROT_ATAPI_NODATA:
3191 ata_tf_to_host_nolock(ap, &qc->tf);
3192 queue_work(ata_wq, &ap->packet_task);
3195 case ATA_PROT_ATAPI_DMA:
3196 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
3197 ap->ops->bmdma_setup(qc); /* set up bmdma */
3198 queue_work(ata_wq, &ap->packet_task);
3210 * ata_bmdma_setup_mmio - Set up PCI IDE BMDMA transaction
3211 * @qc: Info associated with this ATA transaction.
3214 * spin_lock_irqsave(host_set lock)
3217 static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc)
3219 struct ata_port *ap = qc->ap;
3220 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
3222 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
3224 /* load PRD table addr. */
3225 mb(); /* make sure PRD table writes are visible to controller */
3226 writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS);
3228 /* specify data direction, triple-check start bit is clear */
3229 dmactl = readb(mmio + ATA_DMA_CMD);
3230 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
3232 dmactl |= ATA_DMA_WR;
3233 writeb(dmactl, mmio + ATA_DMA_CMD);
3235 /* issue r/w command */
3236 ap->ops->exec_command(ap, &qc->tf);
3240 * ata_bmdma_start - Start a PCI IDE BMDMA transaction
3241 * @qc: Info associated with this ATA transaction.
3244 * spin_lock_irqsave(host_set lock)
3247 static void ata_bmdma_start_mmio (struct ata_queued_cmd *qc)
3249 struct ata_port *ap = qc->ap;
3250 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
3253 /* start host DMA transaction */
3254 dmactl = readb(mmio + ATA_DMA_CMD);
3255 writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD);
3257 /* Strictly, one may wish to issue a readb() here, to
3258 * flush the mmio write. However, control also passes
3259 * to the hardware at this point, and it will interrupt
3260 * us when we are to resume control. So, in effect,
3261 * we don't care when the mmio write flushes.
3262 * Further, a read of the DMA status register _immediately_
3263 * following the write may not be what certain flaky hardware
3264 * is expected, so I think it is best to not add a readb()
3265 * without first all the MMIO ATA cards/mobos.
3266 * Or maybe I'm just being paranoid.
3271 * ata_bmdma_setup_pio - Set up PCI IDE BMDMA transaction (PIO)
3272 * @qc: Info associated with this ATA transaction.
3275 * spin_lock_irqsave(host_set lock)
3278 static void ata_bmdma_setup_pio (struct ata_queued_cmd *qc)
3280 struct ata_port *ap = qc->ap;
3281 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
3284 /* load PRD table addr. */
3285 outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
3287 /* specify data direction, triple-check start bit is clear */
3288 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3289 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
3291 dmactl |= ATA_DMA_WR;
3292 outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3294 /* issue r/w command */
3295 ap->ops->exec_command(ap, &qc->tf);
3299 * ata_bmdma_start_pio - Start a PCI IDE BMDMA transaction (PIO)
3300 * @qc: Info associated with this ATA transaction.
3303 * spin_lock_irqsave(host_set lock)
3306 static void ata_bmdma_start_pio (struct ata_queued_cmd *qc)
3308 struct ata_port *ap = qc->ap;
3311 /* start host DMA transaction */
3312 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3313 outb(dmactl | ATA_DMA_START,
3314 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3319 * ata_bmdma_start - Start a PCI IDE BMDMA transaction
3320 * @qc: Info associated with this ATA transaction.
3322 * Writes the ATA_DMA_START flag to the DMA command register.
3324 * May be used as the bmdma_start() entry in ata_port_operations.
3327 * spin_lock_irqsave(host_set lock)
3329 void ata_bmdma_start(struct ata_queued_cmd *qc)
3331 if (qc->ap->flags & ATA_FLAG_MMIO)
3332 ata_bmdma_start_mmio(qc);
3334 ata_bmdma_start_pio(qc);
3339 * ata_bmdma_setup - Set up PCI IDE BMDMA transaction
3340 * @qc: Info associated with this ATA transaction.
3342 * Writes address of PRD table to device's PRD Table Address
3343 * register, sets the DMA control register, and calls
3344 * ops->exec_command() to start the transfer.
3346 * May be used as the bmdma_setup() entry in ata_port_operations.
3349 * spin_lock_irqsave(host_set lock)
3351 void ata_bmdma_setup(struct ata_queued_cmd *qc)
3353 if (qc->ap->flags & ATA_FLAG_MMIO)
3354 ata_bmdma_setup_mmio(qc);
3356 ata_bmdma_setup_pio(qc);
3361 * ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt.
3362 * @ap: Port associated with this ATA transaction.
3364 * Clear interrupt and error flags in DMA status register.
3366 * May be used as the irq_clear() entry in ata_port_operations.
3369 * spin_lock_irqsave(host_set lock)
3372 void ata_bmdma_irq_clear(struct ata_port *ap)
3374 if (ap->flags & ATA_FLAG_MMIO) {
3375 void __iomem *mmio = ((void __iomem *) ap->ioaddr.bmdma_addr) + ATA_DMA_STATUS;
3376 writeb(readb(mmio), mmio);
3378 unsigned long addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
3379 outb(inb(addr), addr);
3386 * ata_bmdma_status - Read PCI IDE BMDMA status
3387 * @ap: Port associated with this ATA transaction.
3389 * Read and return BMDMA status register.
3391 * May be used as the bmdma_status() entry in ata_port_operations.
3394 * spin_lock_irqsave(host_set lock)
3397 u8 ata_bmdma_status(struct ata_port *ap)
3400 if (ap->flags & ATA_FLAG_MMIO) {
3401 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
3402 host_stat = readb(mmio + ATA_DMA_STATUS);
3404 host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
3410 * ata_bmdma_stop - Stop PCI IDE BMDMA transfer
3411 * @ap: Port associated with this ATA transaction.
3413 * Clears the ATA_DMA_START flag in the dma control register
3415 * May be used as the bmdma_stop() entry in ata_port_operations.
3418 * spin_lock_irqsave(host_set lock)
3421 void ata_bmdma_stop(struct ata_port *ap)
3423 if (ap->flags & ATA_FLAG_MMIO) {
3424 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
3426 /* clear start/stop bit */
3427 writeb(readb(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
3428 mmio + ATA_DMA_CMD);
3430 /* clear start/stop bit */
3431 outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
3432 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3435 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
3436 ata_altstatus(ap); /* dummy read */
3440 * ata_host_intr - Handle host interrupt for given (port, task)
3441 * @ap: Port on which interrupt arrived (possibly...)
3442 * @qc: Taskfile currently active in engine
3444 * Handle host interrupt for given queued command. Currently,
3445 * only DMA interrupts are handled. All other commands are
3446 * handled via polling with interrupts disabled (nIEN bit).
3449 * spin_lock_irqsave(host_set lock)
3452 * One if interrupt was handled, zero if not (shared irq).
3455 inline unsigned int ata_host_intr (struct ata_port *ap,
3456 struct ata_queued_cmd *qc)
3458 u8 status, host_stat;
3460 switch (qc->tf.protocol) {
3463 case ATA_PROT_ATAPI_DMA:
3464 case ATA_PROT_ATAPI:
3465 /* check status of DMA engine */
3466 host_stat = ap->ops->bmdma_status(ap);
3467 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
3469 /* if it's not our irq... */
3470 if (!(host_stat & ATA_DMA_INTR))
3473 /* before we do anything else, clear DMA-Start bit */
3474 ap->ops->bmdma_stop(ap);
3478 case ATA_PROT_ATAPI_NODATA:
3479 case ATA_PROT_NODATA:
3480 /* check altstatus */
3481 status = ata_altstatus(ap);
3482 if (status & ATA_BUSY)
3485 /* check main status, clearing INTRQ */
3486 status = ata_chk_status(ap);
3487 if (unlikely(status & ATA_BUSY))
3489 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
3490 ap->id, qc->tf.protocol, status);
3492 /* ack bmdma irq events */
3493 ap->ops->irq_clear(ap);
3495 /* complete taskfile transaction */
3496 ata_qc_complete(qc, status);
3503 return 1; /* irq handled */
3506 ap->stats.idle_irq++;
3509 if ((ap->stats.idle_irq % 1000) == 0) {
3511 ata_irq_ack(ap, 0); /* debug trap */
3512 printk(KERN_WARNING "ata%d: irq trap\n", ap->id);
3515 return 0; /* irq not handled */
3519 * ata_interrupt - Default ATA host interrupt handler
3520 * @irq: irq line (unused)
3521 * @dev_instance: pointer to our ata_host_set information structure
3524 * Default interrupt handler for PCI IDE devices. Calls
3525 * ata_host_intr() for each port that is not disabled.
3528 * Obtains host_set lock during operation.
3531 * IRQ_NONE or IRQ_HANDLED.
3535 irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
3537 struct ata_host_set *host_set = dev_instance;
3539 unsigned int handled = 0;
3540 unsigned long flags;
3542 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
3543 spin_lock_irqsave(&host_set->lock, flags);
3545 for (i = 0; i < host_set->n_ports; i++) {
3546 struct ata_port *ap;
3548 ap = host_set->ports[i];
3549 if (ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) {
3550 struct ata_queued_cmd *qc;
3552 qc = ata_qc_from_tag(ap, ap->active_tag);
3553 if (qc && (!(qc->tf.ctl & ATA_NIEN)) &&
3554 (qc->flags & ATA_QCFLAG_ACTIVE))
3555 handled |= ata_host_intr(ap, qc);
3559 spin_unlock_irqrestore(&host_set->lock, flags);
3561 return IRQ_RETVAL(handled);
3565 * atapi_packet_task - Write CDB bytes to hardware
3566 * @_data: Port to which ATAPI device is attached.
3568 * When device has indicated its readiness to accept
3569 * a CDB, this function is called. Send the CDB.
3570 * If DMA is to be performed, exit immediately.
3571 * Otherwise, we are in polling mode, so poll
3572 * status under operation succeeds or fails.
3575 * Kernel thread context (may sleep)
3578 static void atapi_packet_task(void *_data)
3580 struct ata_port *ap = _data;
3581 struct ata_queued_cmd *qc;
3584 qc = ata_qc_from_tag(ap, ap->active_tag);
3586 assert(qc->flags & ATA_QCFLAG_ACTIVE);
3588 /* sleep-wait for BSY to clear */
3589 DPRINTK("busy wait\n");
3590 if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB))
3593 /* make sure DRQ is set */
3594 status = ata_chk_status(ap);
3595 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ)
3599 DPRINTK("send cdb\n");
3600 assert(ap->cdb_len >= 12);
3601 ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1);
3603 /* if we are DMA'ing, irq handler takes over from here */
3604 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA)
3605 ap->ops->bmdma_start(qc); /* initiate bmdma */
3607 /* non-data commands are also handled via irq */
3608 else if (qc->tf.protocol == ATA_PROT_ATAPI_NODATA) {
3612 /* PIO commands are handled by polling */
3614 ap->pio_task_state = PIO_ST;
3615 queue_work(ata_wq, &ap->pio_task);
3621 ata_qc_complete(qc, ATA_ERR);
3626 * ata_port_start - Set port up for dma.
3627 * @ap: Port to initialize
3629 * Called just after data structures for each port are
3630 * initialized. Allocates space for PRD table.
3632 * May be used as the port_start() entry in ata_port_operations.
3637 int ata_port_start (struct ata_port *ap)
3639 struct device *dev = ap->host_set->dev;
3641 ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
3645 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
3652 * ata_port_stop - Undo ata_port_start()
3653 * @ap: Port to shut down
3655 * Frees the PRD table.
3657 * May be used as the port_stop() entry in ata_port_operations.
3662 void ata_port_stop (struct ata_port *ap)
3664 struct device *dev = ap->host_set->dev;
3666 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
3669 void ata_host_stop (struct ata_host_set *host_set)
3671 if (host_set->mmio_base)
3672 iounmap(host_set->mmio_base);
3677 * ata_host_remove - Unregister SCSI host structure with upper layers
3678 * @ap: Port to unregister
3679 * @do_unregister: 1 if we fully unregister, 0 to just stop the port
3684 static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
3686 struct Scsi_Host *sh = ap->host;
3691 scsi_remove_host(sh);
3693 ap->ops->port_stop(ap);
3697 * ata_host_init - Initialize an ata_port structure
3698 * @ap: Structure to initialize
3699 * @host: associated SCSI mid-layer structure
3700 * @host_set: Collection of hosts to which @ap belongs
3701 * @ent: Probe information provided by low-level driver
3702 * @port_no: Port number associated with this ata_port
3704 * Initialize a new ata_port structure, and its associated
3708 * Inherited from caller.
3712 static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
3713 struct ata_host_set *host_set,
3714 struct ata_probe_ent *ent, unsigned int port_no)
3720 host->max_channel = 1;
3721 host->unique_id = ata_unique_id++;
3722 host->max_cmd_len = 12;
3723 scsi_set_device(host, ent->dev);
3724 scsi_assign_lock(host, &host_set->lock);
3726 ap->flags = ATA_FLAG_PORT_DISABLED;
3727 ap->id = host->unique_id;
3729 ap->ctl = ATA_DEVCTL_OBS;
3730 ap->host_set = host_set;
3731 ap->port_no = port_no;
3733 ent->legacy_mode ? ent->hard_port_no : port_no;
3734 ap->pio_mask = ent->pio_mask;
3735 ap->mwdma_mask = ent->mwdma_mask;
3736 ap->udma_mask = ent->udma_mask;
3737 ap->flags |= ent->host_flags;
3738 ap->ops = ent->port_ops;
3739 ap->cbl = ATA_CBL_NONE;
3740 ap->active_tag = ATA_TAG_POISON;
3741 ap->last_ctl = 0xFF;
3743 INIT_WORK(&ap->packet_task, atapi_packet_task, ap);
3744 INIT_WORK(&ap->pio_task, ata_pio_task, ap);
3746 for (i = 0; i < ATA_MAX_DEVICES; i++)
3747 ap->device[i].devno = i;
3750 ap->stats.unhandled_irq = 1;
3751 ap->stats.idle_irq = 1;
3754 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
3758 * ata_host_add - Attach low-level ATA driver to system
3759 * @ent: Information provided by low-level driver
3760 * @host_set: Collections of ports to which we add
3761 * @port_no: Port number associated with this host
3763 * Attach low-level ATA driver to system.
3766 * PCI/etc. bus probe sem.
3769 * New ata_port on success, for NULL on error.
3773 static struct ata_port * ata_host_add(struct ata_probe_ent *ent,
3774 struct ata_host_set *host_set,
3775 unsigned int port_no)
3777 struct Scsi_Host *host;
3778 struct ata_port *ap;
3782 host = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
3786 ap = (struct ata_port *) &host->hostdata[0];
3788 ata_host_init(ap, host, host_set, ent, port_no);
3790 rc = ap->ops->port_start(ap);
3797 scsi_host_put(host);
3802 * ata_device_add - Register hardware device with ATA and SCSI layers
3803 * @ent: Probe information describing hardware device to be registered
3805 * This function processes the information provided in the probe
3806 * information struct @ent, allocates the necessary ATA and SCSI
3807 * host information structures, initializes them, and registers
3808 * everything with requisite kernel subsystems.
3810 * This function requests irqs, probes the ATA bus, and probes
3814 * PCI/etc. bus probe sem.
3817 * Number of ports registered. Zero on error (no ports registered).
3821 int ata_device_add(struct ata_probe_ent *ent)
3823 unsigned int count = 0, i;
3824 struct device *dev = ent->dev;
3825 struct ata_host_set *host_set;
3828 /* alloc a container for our list of ATA ports (buses) */
3829 host_set = kmalloc(sizeof(struct ata_host_set) +
3830 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
3833 memset(host_set, 0, sizeof(struct ata_host_set) + (ent->n_ports * sizeof(void *)));
3834 spin_lock_init(&host_set->lock);
3836 host_set->dev = dev;
3837 host_set->n_ports = ent->n_ports;
3838 host_set->irq = ent->irq;
3839 host_set->mmio_base = ent->mmio_base;
3840 host_set->private_data = ent->private_data;
3841 host_set->ops = ent->port_ops;
3843 /* register each port bound to this device */
3844 for (i = 0; i < ent->n_ports; i++) {
3845 struct ata_port *ap;
3846 unsigned long xfer_mode_mask;
3848 ap = ata_host_add(ent, host_set, i);
3852 host_set->ports[i] = ap;
3853 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
3854 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
3855 (ap->pio_mask << ATA_SHIFT_PIO);
3857 /* print per-port info to dmesg */
3858 printk(KERN_INFO "ata%u: %cATA max %s cmd 0x%lX ctl 0x%lX "
3859 "bmdma 0x%lX irq %lu\n",
3861 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
3862 ata_mode_string(xfer_mode_mask),
3863 ap->ioaddr.cmd_addr,
3864 ap->ioaddr.ctl_addr,
3865 ap->ioaddr.bmdma_addr,
3869 host_set->ops->irq_clear(ap);
3878 /* obtain irq, that is shared between channels */
3879 if (request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
3880 DRV_NAME, host_set))
3883 /* perform each probe synchronously */
3884 DPRINTK("probe begin\n");
3885 for (i = 0; i < count; i++) {
3886 struct ata_port *ap;
3889 ap = host_set->ports[i];
3891 DPRINTK("ata%u: probe begin\n", ap->id);
3892 rc = ata_bus_probe(ap);
3893 DPRINTK("ata%u: probe end\n", ap->id);
3896 /* FIXME: do something useful here?
3897 * Current libata behavior will
3898 * tear down everything when
3899 * the module is removed
3900 * or the h/w is unplugged.
3904 rc = scsi_add_host(ap->host, dev);
3906 printk(KERN_ERR "ata%u: scsi_add_host failed\n",
3908 /* FIXME: do something useful here */
3909 /* FIXME: handle unconditional calls to
3910 * scsi_scan_host and ata_host_remove, below,
3916 /* probes are done, now scan each port's disk(s) */
3917 DPRINTK("probe begin\n");
3918 for (i = 0; i < count; i++) {
3919 struct ata_port *ap = host_set->ports[i];
3921 scsi_scan_host(ap->host);
3924 dev_set_drvdata(dev, host_set);
3926 VPRINTK("EXIT, returning %u\n", ent->n_ports);
3927 return ent->n_ports; /* success */
3930 for (i = 0; i < count; i++) {
3931 ata_host_remove(host_set->ports[i], 1);
3932 scsi_host_put(host_set->ports[i]->host);
3935 VPRINTK("EXIT, returning 0\n");
3940 * ata_scsi_release - SCSI layer callback hook for host unload
3941 * @host: libata host to be unloaded
3943 * Performs all duties necessary to shut down a libata port...
3944 * Kill port kthread, disable port, and release resources.
3947 * Inherited from SCSI layer.
3953 int ata_scsi_release(struct Scsi_Host *host)
3955 struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
3959 ap->ops->port_disable(ap);
3960 ata_host_remove(ap, 0);
3967 * ata_std_ports - initialize ioaddr with standard port offsets.
3968 * @ioaddr: IO address structure to be initialized
3970 * Utility function which initializes data_addr, error_addr,
3971 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
3972 * device_addr, status_addr, and command_addr to standard offsets
3973 * relative to cmd_addr.
3975 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
3978 void ata_std_ports(struct ata_ioports *ioaddr)
3980 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
3981 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
3982 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
3983 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
3984 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
3985 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
3986 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
3987 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
3988 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
3989 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
3992 static struct ata_probe_ent *
3993 ata_probe_ent_alloc(struct device *dev, struct ata_port_info *port)
3995 struct ata_probe_ent *probe_ent;
3997 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
3999 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
4000 kobject_name(&(dev->kobj)));
4004 memset(probe_ent, 0, sizeof(*probe_ent));
4006 INIT_LIST_HEAD(&probe_ent->node);
4007 probe_ent->dev = dev;
4009 probe_ent->sht = port->sht;
4010 probe_ent->host_flags = port->host_flags;
4011 probe_ent->pio_mask = port->pio_mask;
4012 probe_ent->mwdma_mask = port->mwdma_mask;
4013 probe_ent->udma_mask = port->udma_mask;
4014 probe_ent->port_ops = port->port_ops;
4022 * ata_pci_init_native_mode - Initialize native-mode driver
4023 * @pdev: pci device to be initialized
4024 * @port: array[2] of pointers to port info structures.
4026 * Utility function which allocates and initializes an
4027 * ata_probe_ent structure for a standard dual-port
4028 * PIO-based IDE controller. The returned ata_probe_ent
4029 * structure can be passed to ata_device_add(). The returned
4030 * ata_probe_ent structure should then be freed with kfree().
4034 struct ata_probe_ent *
4035 ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port)
4037 struct ata_probe_ent *probe_ent =
4038 ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
4042 probe_ent->n_ports = 2;
4043 probe_ent->irq = pdev->irq;
4044 probe_ent->irq_flags = SA_SHIRQ;
4046 probe_ent->port[0].cmd_addr = pci_resource_start(pdev, 0);
4047 probe_ent->port[0].altstatus_addr =
4048 probe_ent->port[0].ctl_addr =
4049 pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS;
4050 probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4);
4052 probe_ent->port[1].cmd_addr = pci_resource_start(pdev, 2);
4053 probe_ent->port[1].altstatus_addr =
4054 probe_ent->port[1].ctl_addr =
4055 pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS;
4056 probe_ent->port[1].bmdma_addr = pci_resource_start(pdev, 4) + 8;
4058 ata_std_ports(&probe_ent->port[0]);
4059 ata_std_ports(&probe_ent->port[1]);
4064 static struct ata_probe_ent *
4065 ata_pci_init_legacy_mode(struct pci_dev *pdev, struct ata_port_info **port,
4066 struct ata_probe_ent **ppe2)
4068 struct ata_probe_ent *probe_ent, *probe_ent2;
4070 probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
4073 probe_ent2 = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[1]);
4079 probe_ent->n_ports = 1;
4080 probe_ent->irq = 14;
4082 probe_ent->hard_port_no = 0;
4083 probe_ent->legacy_mode = 1;
4085 probe_ent2->n_ports = 1;
4086 probe_ent2->irq = 15;
4088 probe_ent2->hard_port_no = 1;
4089 probe_ent2->legacy_mode = 1;
4091 probe_ent->port[0].cmd_addr = 0x1f0;
4092 probe_ent->port[0].altstatus_addr =
4093 probe_ent->port[0].ctl_addr = 0x3f6;
4094 probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4);
4096 probe_ent2->port[0].cmd_addr = 0x170;
4097 probe_ent2->port[0].altstatus_addr =
4098 probe_ent2->port[0].ctl_addr = 0x376;
4099 probe_ent2->port[0].bmdma_addr = pci_resource_start(pdev, 4)+8;
4101 ata_std_ports(&probe_ent->port[0]);
4102 ata_std_ports(&probe_ent2->port[0]);
4109 * ata_pci_init_one - Initialize/register PCI IDE host controller
4110 * @pdev: Controller to be initialized
4111 * @port_info: Information from low-level host driver
4112 * @n_ports: Number of ports attached to host controller
4114 * This is a helper function which can be called from a driver's
4115 * xxx_init_one() probe function if the hardware uses traditional
4116 * IDE taskfile registers.
4118 * This function calls pci_enable_device(), reserves its register
4119 * regions, sets the dma mask, enables bus master mode, and calls
4123 * Inherited from PCI layer (may sleep).
4126 * Zero on success, negative on errno-based value on error.
4130 int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
4131 unsigned int n_ports)
4133 struct ata_probe_ent *probe_ent, *probe_ent2 = NULL;
4134 struct ata_port_info *port[2];
4136 unsigned int legacy_mode = 0;
4137 int disable_dev_on_err = 1;
4142 port[0] = port_info[0];
4144 port[1] = port_info[1];
4148 if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0
4149 && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
4150 /* TODO: support transitioning to native mode? */
4151 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
4152 mask = (1 << 2) | (1 << 0);
4153 if ((tmp8 & mask) != mask)
4154 legacy_mode = (1 << 3);
4158 if ((!legacy_mode) && (n_ports > 1)) {
4159 printk(KERN_ERR "ata: BUG: native mode, n_ports > 1\n");
4163 rc = pci_enable_device(pdev);
4167 rc = pci_request_regions(pdev, DRV_NAME);
4169 disable_dev_on_err = 0;
4174 if (!request_region(0x1f0, 8, "libata")) {
4175 struct resource *conflict, res;
4177 res.end = 0x1f0 + 8 - 1;
4178 conflict = ____request_resource(&ioport_resource, &res);
4179 if (!strcmp(conflict->name, "libata"))
4180 legacy_mode |= (1 << 0);
4182 disable_dev_on_err = 0;
4183 printk(KERN_WARNING "ata: 0x1f0 IDE port busy\n");
4186 legacy_mode |= (1 << 0);
4188 if (!request_region(0x170, 8, "libata")) {
4189 struct resource *conflict, res;
4191 res.end = 0x170 + 8 - 1;
4192 conflict = ____request_resource(&ioport_resource, &res);
4193 if (!strcmp(conflict->name, "libata"))
4194 legacy_mode |= (1 << 1);
4196 disable_dev_on_err = 0;
4197 printk(KERN_WARNING "ata: 0x170 IDE port busy\n");
4200 legacy_mode |= (1 << 1);
4203 /* we have legacy mode, but all ports are unavailable */
4204 if (legacy_mode == (1 << 3)) {
4206 goto err_out_regions;
4209 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
4211 goto err_out_regions;
4212 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
4214 goto err_out_regions;
4217 probe_ent = ata_pci_init_legacy_mode(pdev, port, &probe_ent2);
4219 probe_ent = ata_pci_init_native_mode(pdev, port);
4222 goto err_out_regions;
4225 pci_set_master(pdev);
4227 /* FIXME: check ata_device_add return */
4229 if (legacy_mode & (1 << 0))
4230 ata_device_add(probe_ent);
4231 if (legacy_mode & (1 << 1))
4232 ata_device_add(probe_ent2);
4234 ata_device_add(probe_ent);
4242 if (legacy_mode & (1 << 0))
4243 release_region(0x1f0, 8);
4244 if (legacy_mode & (1 << 1))
4245 release_region(0x170, 8);
4246 pci_release_regions(pdev);
4248 if (disable_dev_on_err)
4249 pci_disable_device(pdev);
4254 * ata_pci_remove_one - PCI layer callback for device removal
4255 * @pdev: PCI device that was removed
4257 * PCI layer indicates to libata via this hook that
4258 * hot-unplug or module unload event has occured.
4259 * Handle this by unregistering all objects associated
4260 * with this PCI device. Free those objects. Then finally
4261 * release PCI resources and disable device.
4264 * Inherited from PCI layer (may sleep).
4267 void ata_pci_remove_one (struct pci_dev *pdev)
4269 struct device *dev = pci_dev_to_dev(pdev);
4270 struct ata_host_set *host_set = dev_get_drvdata(dev);
4271 struct ata_port *ap;
4274 for (i = 0; i < host_set->n_ports; i++) {
4275 ap = host_set->ports[i];
4277 scsi_remove_host(ap->host);
4280 free_irq(host_set->irq, host_set);
4282 for (i = 0; i < host_set->n_ports; i++) {
4283 ap = host_set->ports[i];
4285 ata_scsi_release(ap->host);
4287 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
4288 struct ata_ioports *ioaddr = &ap->ioaddr;
4290 if (ioaddr->cmd_addr == 0x1f0)
4291 release_region(0x1f0, 8);
4292 else if (ioaddr->cmd_addr == 0x170)
4293 release_region(0x170, 8);
4296 scsi_host_put(ap->host);
4299 if (host_set->ops->host_stop)
4300 host_set->ops->host_stop(host_set);
4304 pci_release_regions(pdev);
4305 pci_disable_device(pdev);
4306 dev_set_drvdata(dev, NULL);
4309 /* move to PCI subsystem */
4310 int pci_test_config_bits(struct pci_dev *pdev, struct pci_bits *bits)
4312 unsigned long tmp = 0;
4314 switch (bits->width) {
4317 pci_read_config_byte(pdev, bits->reg, &tmp8);
4323 pci_read_config_word(pdev, bits->reg, &tmp16);
4329 pci_read_config_dword(pdev, bits->reg, &tmp32);
4340 return (tmp == bits->val) ? 1 : 0;
4342 #endif /* CONFIG_PCI */
4345 static int __init ata_init(void)
4347 ata_wq = create_workqueue("ata");
4351 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
4355 static void __exit ata_exit(void)
4357 destroy_workqueue(ata_wq);
4360 module_init(ata_init);
4361 module_exit(ata_exit);
4364 * libata is essentially a library of internal helper functions for
4365 * low-level ATA host controller drivers. As such, the API/ABI is
4366 * likely to change as new drivers are added and updated.
4367 * Do not depend on ABI/API stability.
4370 EXPORT_SYMBOL_GPL(ata_std_bios_param);
4371 EXPORT_SYMBOL_GPL(ata_std_ports);
4372 EXPORT_SYMBOL_GPL(ata_device_add);
4373 EXPORT_SYMBOL_GPL(ata_sg_init);
4374 EXPORT_SYMBOL_GPL(ata_sg_init_one);
4375 EXPORT_SYMBOL_GPL(ata_qc_complete);
4376 EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
4377 EXPORT_SYMBOL_GPL(ata_eng_timeout);
4378 EXPORT_SYMBOL_GPL(ata_tf_load);
4379 EXPORT_SYMBOL_GPL(ata_tf_read);
4380 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
4381 EXPORT_SYMBOL_GPL(ata_std_dev_select);
4382 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
4383 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
4384 EXPORT_SYMBOL_GPL(ata_check_status);
4385 EXPORT_SYMBOL_GPL(ata_altstatus);
4386 EXPORT_SYMBOL_GPL(ata_chk_err);
4387 EXPORT_SYMBOL_GPL(ata_exec_command);
4388 EXPORT_SYMBOL_GPL(ata_port_start);
4389 EXPORT_SYMBOL_GPL(ata_port_stop);
4390 EXPORT_SYMBOL_GPL(ata_host_stop);
4391 EXPORT_SYMBOL_GPL(ata_interrupt);
4392 EXPORT_SYMBOL_GPL(ata_qc_prep);
4393 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
4394 EXPORT_SYMBOL_GPL(ata_bmdma_start);
4395 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
4396 EXPORT_SYMBOL_GPL(ata_bmdma_status);
4397 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
4398 EXPORT_SYMBOL_GPL(ata_port_probe);
4399 EXPORT_SYMBOL_GPL(sata_phy_reset);
4400 EXPORT_SYMBOL_GPL(__sata_phy_reset);
4401 EXPORT_SYMBOL_GPL(ata_bus_reset);
4402 EXPORT_SYMBOL_GPL(ata_port_disable);
4403 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
4404 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
4405 EXPORT_SYMBOL_GPL(ata_scsi_error);
4406 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
4407 EXPORT_SYMBOL_GPL(ata_scsi_release);
4408 EXPORT_SYMBOL_GPL(ata_host_intr);
4409 EXPORT_SYMBOL_GPL(ata_dev_classify);
4410 EXPORT_SYMBOL_GPL(ata_dev_id_string);
4411 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
4414 EXPORT_SYMBOL_GPL(pci_test_config_bits);
4415 EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
4416 EXPORT_SYMBOL_GPL(ata_pci_init_one);
4417 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
4418 #endif /* CONFIG_PCI */