2 * libata-core.c - helper library for ATA
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
35 #include <linux/config.h>
36 #include <linux/kernel.h>
37 #include <linux/module.h>
38 #include <linux/pci.h>
39 #include <linux/init.h>
40 #include <linux/list.h>
42 #include <linux/highmem.h>
43 #include <linux/spinlock.h>
44 #include <linux/blkdev.h>
45 #include <linux/delay.h>
46 #include <linux/timer.h>
47 #include <linux/interrupt.h>
48 #include <linux/completion.h>
49 #include <linux/suspend.h>
50 #include <linux/workqueue.h>
51 #include <linux/jiffies.h>
52 #include <linux/scatterlist.h>
53 #include <scsi/scsi.h>
54 #include "scsi_priv.h"
55 #include <scsi/scsi_cmnd.h>
56 #include <scsi/scsi_host.h>
57 #include <linux/libata.h>
59 #include <asm/semaphore.h>
60 #include <asm/byteorder.h>
64 static unsigned int ata_dev_init_params(struct ata_port *ap,
65 struct ata_device *dev);
66 static void ata_set_mode(struct ata_port *ap);
67 static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev);
68 static unsigned int ata_dev_xfermask(struct ata_port *ap,
69 struct ata_device *dev);
71 static unsigned int ata_unique_id = 1;
72 static struct workqueue_struct *ata_wq;
74 int atapi_enabled = 1;
75 module_param(atapi_enabled, int, 0444);
76 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
79 module_param_named(fua, libata_fua, int, 0444);
80 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
82 MODULE_AUTHOR("Jeff Garzik");
83 MODULE_DESCRIPTION("Library module for ATA devices");
84 MODULE_LICENSE("GPL");
85 MODULE_VERSION(DRV_VERSION);
89 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
90 * @tf: Taskfile to convert
91 * @fis: Buffer into which data will output
92 * @pmp: Port multiplier port
94 * Converts a standard ATA taskfile to a Serial ATA
95 * FIS structure (Register - Host to Device).
98 * Inherited from caller.
101 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
103 fis[0] = 0x27; /* Register - Host to Device FIS */
104 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
105 bit 7 indicates Command FIS */
106 fis[2] = tf->command;
107 fis[3] = tf->feature;
114 fis[8] = tf->hob_lbal;
115 fis[9] = tf->hob_lbam;
116 fis[10] = tf->hob_lbah;
117 fis[11] = tf->hob_feature;
120 fis[13] = tf->hob_nsect;
131 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
132 * @fis: Buffer from which data will be input
133 * @tf: Taskfile to output
135 * Converts a serial ATA FIS structure to a standard ATA taskfile.
138 * Inherited from caller.
141 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
143 tf->command = fis[2]; /* status */
144 tf->feature = fis[3]; /* error */
151 tf->hob_lbal = fis[8];
152 tf->hob_lbam = fis[9];
153 tf->hob_lbah = fis[10];
156 tf->hob_nsect = fis[13];
159 static const u8 ata_rw_cmds[] = {
163 ATA_CMD_READ_MULTI_EXT,
164 ATA_CMD_WRITE_MULTI_EXT,
168 ATA_CMD_WRITE_MULTI_FUA_EXT,
172 ATA_CMD_PIO_READ_EXT,
173 ATA_CMD_PIO_WRITE_EXT,
186 ATA_CMD_WRITE_FUA_EXT
190 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
191 * @qc: command to examine and configure
193 * Examine the device configuration and tf->flags to calculate
194 * the proper read/write commands and protocol to use.
199 int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
201 struct ata_taskfile *tf = &qc->tf;
202 struct ata_device *dev = qc->dev;
205 int index, fua, lba48, write;
207 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
208 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
209 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
211 if (dev->flags & ATA_DFLAG_PIO) {
212 tf->protocol = ATA_PROT_PIO;
213 index = dev->multi_count ? 0 : 8;
214 } else if (lba48 && (qc->ap->flags & ATA_FLAG_PIO_LBA48)) {
215 /* Unable to use DMA due to host limitation */
216 tf->protocol = ATA_PROT_PIO;
217 index = dev->multi_count ? 0 : 8;
219 tf->protocol = ATA_PROT_DMA;
223 cmd = ata_rw_cmds[index + fua + lba48 + write];
232 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
233 * @pio_mask: pio_mask
234 * @mwdma_mask: mwdma_mask
235 * @udma_mask: udma_mask
237 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
238 * unsigned int xfer_mask.
246 static unsigned int ata_pack_xfermask(unsigned int pio_mask,
247 unsigned int mwdma_mask,
248 unsigned int udma_mask)
250 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
251 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
252 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
255 static const struct ata_xfer_ent {
256 unsigned int shift, bits;
259 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
260 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
261 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
266 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
267 * @xfer_mask: xfer_mask of interest
269 * Return matching XFER_* value for @xfer_mask. Only the highest
270 * bit of @xfer_mask is considered.
276 * Matching XFER_* value, 0 if no match found.
278 static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
280 int highbit = fls(xfer_mask) - 1;
281 const struct ata_xfer_ent *ent;
283 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
284 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
285 return ent->base + highbit - ent->shift;
290 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
291 * @xfer_mode: XFER_* of interest
293 * Return matching xfer_mask for @xfer_mode.
299 * Matching xfer_mask, 0 if no match found.
301 static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
303 const struct ata_xfer_ent *ent;
305 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
306 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
307 return 1 << (ent->shift + xfer_mode - ent->base);
312 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
313 * @xfer_mode: XFER_* of interest
315 * Return matching xfer_shift for @xfer_mode.
321 * Matching xfer_shift, -1 if no match found.
323 static int ata_xfer_mode2shift(unsigned int xfer_mode)
325 const struct ata_xfer_ent *ent;
327 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
328 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
334 * ata_mode_string - convert xfer_mask to string
335 * @xfer_mask: mask of bits supported; only highest bit counts.
337 * Determine string which represents the highest speed
338 * (highest bit in @modemask).
344 * Constant C string representing highest speed listed in
345 * @mode_mask, or the constant C string "<n/a>".
347 static const char *ata_mode_string(unsigned int xfer_mask)
349 static const char * const xfer_mode_str[] = {
369 highbit = fls(xfer_mask) - 1;
370 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
371 return xfer_mode_str[highbit];
376 * ata_pio_devchk - PATA device presence detection
377 * @ap: ATA channel to examine
378 * @device: Device to examine (starting at zero)
380 * This technique was originally described in
381 * Hale Landis's ATADRVR (www.ata-atapi.com), and
382 * later found its way into the ATA/ATAPI spec.
384 * Write a pattern to the ATA shadow registers,
385 * and if a device is present, it will respond by
386 * correctly storing and echoing back the
387 * ATA shadow register contents.
393 static unsigned int ata_pio_devchk(struct ata_port *ap,
396 struct ata_ioports *ioaddr = &ap->ioaddr;
399 ap->ops->dev_select(ap, device);
401 outb(0x55, ioaddr->nsect_addr);
402 outb(0xaa, ioaddr->lbal_addr);
404 outb(0xaa, ioaddr->nsect_addr);
405 outb(0x55, ioaddr->lbal_addr);
407 outb(0x55, ioaddr->nsect_addr);
408 outb(0xaa, ioaddr->lbal_addr);
410 nsect = inb(ioaddr->nsect_addr);
411 lbal = inb(ioaddr->lbal_addr);
413 if ((nsect == 0x55) && (lbal == 0xaa))
414 return 1; /* we found a device */
416 return 0; /* nothing found */
420 * ata_mmio_devchk - PATA device presence detection
421 * @ap: ATA channel to examine
422 * @device: Device to examine (starting at zero)
424 * This technique was originally described in
425 * Hale Landis's ATADRVR (www.ata-atapi.com), and
426 * later found its way into the ATA/ATAPI spec.
428 * Write a pattern to the ATA shadow registers,
429 * and if a device is present, it will respond by
430 * correctly storing and echoing back the
431 * ATA shadow register contents.
437 static unsigned int ata_mmio_devchk(struct ata_port *ap,
440 struct ata_ioports *ioaddr = &ap->ioaddr;
443 ap->ops->dev_select(ap, device);
445 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
446 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
448 writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
449 writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
451 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
452 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
454 nsect = readb((void __iomem *) ioaddr->nsect_addr);
455 lbal = readb((void __iomem *) ioaddr->lbal_addr);
457 if ((nsect == 0x55) && (lbal == 0xaa))
458 return 1; /* we found a device */
460 return 0; /* nothing found */
464 * ata_devchk - PATA device presence detection
465 * @ap: ATA channel to examine
466 * @device: Device to examine (starting at zero)
468 * Dispatch ATA device presence detection, depending
469 * on whether we are using PIO or MMIO to talk to the
470 * ATA shadow registers.
476 static unsigned int ata_devchk(struct ata_port *ap,
479 if (ap->flags & ATA_FLAG_MMIO)
480 return ata_mmio_devchk(ap, device);
481 return ata_pio_devchk(ap, device);
485 * ata_dev_classify - determine device type based on ATA-spec signature
486 * @tf: ATA taskfile register set for device to be identified
488 * Determine from taskfile register contents whether a device is
489 * ATA or ATAPI, as per "Signature and persistence" section
490 * of ATA/PI spec (volume 1, sect 5.14).
496 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
497 * the event of failure.
500 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
502 /* Apple's open source Darwin code hints that some devices only
503 * put a proper signature into the LBA mid/high registers,
504 * So, we only check those. It's sufficient for uniqueness.
507 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
508 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
509 DPRINTK("found ATA device by sig\n");
513 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
514 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
515 DPRINTK("found ATAPI device by sig\n");
516 return ATA_DEV_ATAPI;
519 DPRINTK("unknown device\n");
520 return ATA_DEV_UNKNOWN;
524 * ata_dev_try_classify - Parse returned ATA device signature
525 * @ap: ATA channel to examine
526 * @device: Device to examine (starting at zero)
527 * @r_err: Value of error register on completion
529 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
530 * an ATA/ATAPI-defined set of values is placed in the ATA
531 * shadow registers, indicating the results of device detection
534 * Select the ATA device, and read the values from the ATA shadow
535 * registers. Then parse according to the Error register value,
536 * and the spec-defined values examined by ata_dev_classify().
542 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
546 ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
548 struct ata_taskfile tf;
552 ap->ops->dev_select(ap, device);
554 memset(&tf, 0, sizeof(tf));
556 ap->ops->tf_read(ap, &tf);
561 /* see if device passed diags */
564 else if ((device == 0) && (err == 0x81))
569 /* determine if device is ATA or ATAPI */
570 class = ata_dev_classify(&tf);
572 if (class == ATA_DEV_UNKNOWN)
574 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
580 * ata_id_string - Convert IDENTIFY DEVICE page into string
581 * @id: IDENTIFY DEVICE results we will examine
582 * @s: string into which data is output
583 * @ofs: offset into identify device page
584 * @len: length of string to return. must be an even number.
586 * The strings in the IDENTIFY DEVICE page are broken up into
587 * 16-bit chunks. Run through the string, and output each
588 * 8-bit chunk linearly, regardless of platform.
594 void ata_id_string(const u16 *id, unsigned char *s,
595 unsigned int ofs, unsigned int len)
614 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
615 * @id: IDENTIFY DEVICE results we will examine
616 * @s: string into which data is output
617 * @ofs: offset into identify device page
618 * @len: length of string to return. must be an odd number.
620 * This function is identical to ata_id_string except that it
621 * trims trailing spaces and terminates the resulting string with
622 * null. @len must be actual maximum length (even number) + 1.
627 void ata_id_c_string(const u16 *id, unsigned char *s,
628 unsigned int ofs, unsigned int len)
634 ata_id_string(id, s, ofs, len - 1);
636 p = s + strnlen(s, len - 1);
637 while (p > s && p[-1] == ' ')
642 static u64 ata_id_n_sectors(const u16 *id)
644 if (ata_id_has_lba(id)) {
645 if (ata_id_has_lba48(id))
646 return ata_id_u64(id, 100);
648 return ata_id_u32(id, 60);
650 if (ata_id_current_chs_valid(id))
651 return ata_id_u32(id, 57);
653 return id[1] * id[3] * id[6];
658 * ata_noop_dev_select - Select device 0/1 on ATA bus
659 * @ap: ATA channel to manipulate
660 * @device: ATA device (numbered from zero) to select
662 * This function performs no actual function.
664 * May be used as the dev_select() entry in ata_port_operations.
669 void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
675 * ata_std_dev_select - Select device 0/1 on ATA bus
676 * @ap: ATA channel to manipulate
677 * @device: ATA device (numbered from zero) to select
679 * Use the method defined in the ATA specification to
680 * make either device 0, or device 1, active on the
681 * ATA channel. Works with both PIO and MMIO.
683 * May be used as the dev_select() entry in ata_port_operations.
689 void ata_std_dev_select (struct ata_port *ap, unsigned int device)
694 tmp = ATA_DEVICE_OBS;
696 tmp = ATA_DEVICE_OBS | ATA_DEV1;
698 if (ap->flags & ATA_FLAG_MMIO) {
699 writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
701 outb(tmp, ap->ioaddr.device_addr);
703 ata_pause(ap); /* needed; also flushes, for mmio */
707 * ata_dev_select - Select device 0/1 on ATA bus
708 * @ap: ATA channel to manipulate
709 * @device: ATA device (numbered from zero) to select
710 * @wait: non-zero to wait for Status register BSY bit to clear
711 * @can_sleep: non-zero if context allows sleeping
713 * Use the method defined in the ATA specification to
714 * make either device 0, or device 1, active on the
717 * This is a high-level version of ata_std_dev_select(),
718 * which additionally provides the services of inserting
719 * the proper pauses and status polling, where needed.
725 void ata_dev_select(struct ata_port *ap, unsigned int device,
726 unsigned int wait, unsigned int can_sleep)
728 VPRINTK("ENTER, ata%u: device %u, wait %u\n",
729 ap->id, device, wait);
734 ap->ops->dev_select(ap, device);
737 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
744 * ata_dump_id - IDENTIFY DEVICE info debugging output
745 * @id: IDENTIFY DEVICE page to dump
747 * Dump selected 16-bit words from the given IDENTIFY DEVICE
754 static inline void ata_dump_id(const u16 *id)
756 DPRINTK("49==0x%04x "
766 DPRINTK("80==0x%04x "
776 DPRINTK("88==0x%04x "
783 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
784 * @id: IDENTIFY data to compute xfer mask from
786 * Compute the xfermask for this device. This is not as trivial
787 * as it seems if we must consider early devices correctly.
789 * FIXME: pre IDE drive timing (do we care ?).
797 static unsigned int ata_id_xfermask(const u16 *id)
799 unsigned int pio_mask, mwdma_mask, udma_mask;
801 /* Usual case. Word 53 indicates word 64 is valid */
802 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
803 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
807 /* If word 64 isn't valid then Word 51 high byte holds
808 * the PIO timing number for the maximum. Turn it into
811 pio_mask = (2 << (id[ATA_ID_OLD_PIO_MODES] & 0xFF)) - 1 ;
813 /* But wait.. there's more. Design your standards by
814 * committee and you too can get a free iordy field to
815 * process. However its the speeds not the modes that
816 * are supported... Note drivers using the timing API
817 * will get this right anyway
821 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
824 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
825 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
827 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
831 * ata_port_queue_task - Queue port_task
832 * @ap: The ata_port to queue port_task for
834 * Schedule @fn(@data) for execution after @delay jiffies using
835 * port_task. There is one port_task per port and it's the
836 * user(low level driver)'s responsibility to make sure that only
837 * one task is active at any given time.
839 * libata core layer takes care of synchronization between
840 * port_task and EH. ata_port_queue_task() may be ignored for EH
844 * Inherited from caller.
846 void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
851 if (ap->flags & ATA_FLAG_FLUSH_PORT_TASK)
854 PREPARE_WORK(&ap->port_task, fn, data);
857 rc = queue_work(ata_wq, &ap->port_task);
859 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
861 /* rc == 0 means that another user is using port task */
866 * ata_port_flush_task - Flush port_task
867 * @ap: The ata_port to flush port_task for
869 * After this function completes, port_task is guranteed not to
870 * be running or scheduled.
873 * Kernel thread context (may sleep)
875 void ata_port_flush_task(struct ata_port *ap)
881 spin_lock_irqsave(&ap->host_set->lock, flags);
882 ap->flags |= ATA_FLAG_FLUSH_PORT_TASK;
883 spin_unlock_irqrestore(&ap->host_set->lock, flags);
885 DPRINTK("flush #1\n");
886 flush_workqueue(ata_wq);
889 * At this point, if a task is running, it's guaranteed to see
890 * the FLUSH flag; thus, it will never queue pio tasks again.
893 if (!cancel_delayed_work(&ap->port_task)) {
894 DPRINTK("flush #2\n");
895 flush_workqueue(ata_wq);
898 spin_lock_irqsave(&ap->host_set->lock, flags);
899 ap->flags &= ~ATA_FLAG_FLUSH_PORT_TASK;
900 spin_unlock_irqrestore(&ap->host_set->lock, flags);
905 void ata_qc_complete_internal(struct ata_queued_cmd *qc)
907 struct completion *waiting = qc->private_data;
909 qc->ap->ops->tf_read(qc->ap, &qc->tf);
914 * ata_exec_internal - execute libata internal command
915 * @ap: Port to which the command is sent
916 * @dev: Device to which the command is sent
917 * @tf: Taskfile registers for the command and the result
918 * @dma_dir: Data tranfer direction of the command
919 * @buf: Data buffer of the command
920 * @buflen: Length of data buffer
922 * Executes libata internal command with timeout. @tf contains
923 * command on entry and result on return. Timeout and error
924 * conditions are reported via return value. No recovery action
925 * is taken after a command times out. It's caller's duty to
926 * clean up after timeout.
929 * None. Should be called with kernel context, might sleep.
933 ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
934 struct ata_taskfile *tf,
935 int dma_dir, void *buf, unsigned int buflen)
937 u8 command = tf->command;
938 struct ata_queued_cmd *qc;
939 DECLARE_COMPLETION(wait);
941 unsigned int err_mask;
943 spin_lock_irqsave(&ap->host_set->lock, flags);
945 qc = ata_qc_new_init(ap, dev);
949 qc->dma_dir = dma_dir;
950 if (dma_dir != DMA_NONE) {
951 ata_sg_init_one(qc, buf, buflen);
952 qc->nsect = buflen / ATA_SECT_SIZE;
955 qc->private_data = &wait;
956 qc->complete_fn = ata_qc_complete_internal;
958 qc->err_mask = ata_qc_issue(qc);
962 spin_unlock_irqrestore(&ap->host_set->lock, flags);
964 if (!wait_for_completion_timeout(&wait, ATA_TMOUT_INTERNAL)) {
965 ata_port_flush_task(ap);
967 spin_lock_irqsave(&ap->host_set->lock, flags);
969 /* We're racing with irq here. If we lose, the
970 * following test prevents us from completing the qc
971 * again. If completion irq occurs after here but
972 * before the caller cleans up, it will result in a
973 * spurious interrupt. We can live with that.
975 if (qc->flags & ATA_QCFLAG_ACTIVE) {
976 qc->err_mask = AC_ERR_TIMEOUT;
978 printk(KERN_WARNING "ata%u: qc timeout (cmd 0x%x)\n",
982 spin_unlock_irqrestore(&ap->host_set->lock, flags);
986 err_mask = qc->err_mask;
994 * ata_pio_need_iordy - check if iordy needed
997 * Check if the current speed of the device requires IORDY. Used
998 * by various controllers for chip configuration.
1001 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1004 int speed = adev->pio_mode - XFER_PIO_0;
1011 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1013 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1014 pio = adev->id[ATA_ID_EIDE_PIO];
1015 /* Is the speed faster than the drive allows non IORDY ? */
1017 /* This is cycle times not frequency - watch the logic! */
1018 if (pio > 240) /* PIO2 is 240nS per cycle */
1027 * ata_dev_read_id - Read ID data from the specified device
1028 * @ap: port on which target device resides
1029 * @dev: target device
1030 * @p_class: pointer to class of the target device (may be changed)
1031 * @post_reset: is this read ID post-reset?
1032 * @p_id: read IDENTIFY page (newly allocated)
1034 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1035 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1036 * devices. This function also takes care of EDD signature
1037 * misreporting (to be removed once EDD support is gone) and
1038 * issues ATA_CMD_INIT_DEV_PARAMS for pre-ATA4 drives.
1041 * Kernel thread context (may sleep)
1044 * 0 on success, -errno otherwise.
1046 static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev,
1047 unsigned int *p_class, int post_reset, u16 **p_id)
1049 unsigned int class = *p_class;
1050 unsigned int using_edd;
1051 struct ata_taskfile tf;
1052 unsigned int err_mask = 0;
1057 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
1059 if (ap->ops->probe_reset ||
1060 ap->flags & (ATA_FLAG_SRST | ATA_FLAG_SATA_RESET))
1065 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1067 id = kmalloc(sizeof(id[0]) * ATA_ID_WORDS, GFP_KERNEL);
1070 reason = "out of memory";
1075 ata_tf_init(ap, &tf, dev->devno);
1079 tf.command = ATA_CMD_ID_ATA;
1082 tf.command = ATA_CMD_ID_ATAPI;
1086 reason = "unsupported class";
1090 tf.protocol = ATA_PROT_PIO;
1092 err_mask = ata_exec_internal(ap, dev, &tf, DMA_FROM_DEVICE,
1093 id, sizeof(id[0]) * ATA_ID_WORDS);
1097 reason = "I/O error";
1099 if (err_mask & ~AC_ERR_DEV)
1103 * arg! EDD works for all test cases, but seems to return
1104 * the ATA signature for some ATAPI devices. Until the
1105 * reason for this is found and fixed, we fix up the mess
1106 * here. If IDENTIFY DEVICE returns command aborted
1107 * (as ATAPI devices do), then we issue an
1108 * IDENTIFY PACKET DEVICE.
1110 * ATA software reset (SRST, the default) does not appear
1111 * to have this problem.
1113 if ((using_edd) && (class == ATA_DEV_ATA)) {
1114 u8 err = tf.feature;
1115 if (err & ATA_ABORTED) {
1116 class = ATA_DEV_ATAPI;
1123 swap_buf_le16(id, ATA_ID_WORDS);
1126 if ((class == ATA_DEV_ATA) != ata_id_is_ata(id)) {
1128 reason = "device reports illegal type";
1132 if (post_reset && class == ATA_DEV_ATA) {
1134 * The exact sequence expected by certain pre-ATA4 drives is:
1137 * INITIALIZE DEVICE PARAMETERS
1139 * Some drives were very specific about that exact sequence.
1141 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1142 err_mask = ata_dev_init_params(ap, dev);
1145 reason = "INIT_DEV_PARAMS failed";
1149 /* current CHS translation info (id[53-58]) might be
1150 * changed. reread the identify device info.
1162 printk(KERN_WARNING "ata%u: dev %u failed to IDENTIFY (%s)\n",
1163 ap->id, dev->devno, reason);
1168 static inline u8 ata_dev_knobble(const struct ata_port *ap,
1169 struct ata_device *dev)
1171 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1175 * ata_dev_configure - Configure the specified ATA/ATAPI device
1176 * @ap: Port on which target device resides
1177 * @dev: Target device to configure
1178 * @print_info: Enable device info printout
1180 * Configure @dev according to @dev->id. Generic and low-level
1181 * driver specific fixups are also applied.
1184 * Kernel thread context (may sleep)
1187 * 0 on success, -errno otherwise
1189 static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1192 const u16 *id = dev->id;
1193 unsigned int xfer_mask;
1196 if (!ata_dev_present(dev)) {
1197 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
1198 ap->id, dev->devno);
1202 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
1204 /* print device capabilities */
1206 printk(KERN_DEBUG "ata%u: dev %u cfg 49:%04x 82:%04x 83:%04x "
1207 "84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
1208 ap->id, dev->devno, id[49], id[82], id[83],
1209 id[84], id[85], id[86], id[87], id[88]);
1211 /* initialize to-be-configured parameters */
1213 dev->max_sectors = 0;
1221 * common ATA, ATAPI feature tests
1224 /* find max transfer mode; for printk only */
1225 xfer_mask = ata_id_xfermask(id);
1229 /* ATA-specific feature tests */
1230 if (dev->class == ATA_DEV_ATA) {
1231 dev->n_sectors = ata_id_n_sectors(id);
1233 if (ata_id_has_lba(id)) {
1234 const char *lba_desc;
1237 dev->flags |= ATA_DFLAG_LBA;
1238 if (ata_id_has_lba48(id)) {
1239 dev->flags |= ATA_DFLAG_LBA48;
1243 /* print device info to dmesg */
1245 printk(KERN_INFO "ata%u: dev %u ATA-%d, "
1246 "max %s, %Lu sectors: %s\n",
1248 ata_id_major_version(id),
1249 ata_mode_string(xfer_mask),
1250 (unsigned long long)dev->n_sectors,
1255 /* Default translation */
1256 dev->cylinders = id[1];
1258 dev->sectors = id[6];
1260 if (ata_id_current_chs_valid(id)) {
1261 /* Current CHS translation is valid. */
1262 dev->cylinders = id[54];
1263 dev->heads = id[55];
1264 dev->sectors = id[56];
1267 /* print device info to dmesg */
1269 printk(KERN_INFO "ata%u: dev %u ATA-%d, "
1270 "max %s, %Lu sectors: CHS %u/%u/%u\n",
1272 ata_id_major_version(id),
1273 ata_mode_string(xfer_mask),
1274 (unsigned long long)dev->n_sectors,
1275 dev->cylinders, dev->heads, dev->sectors);
1281 /* ATAPI-specific feature tests */
1282 else if (dev->class == ATA_DEV_ATAPI) {
1283 rc = atapi_cdb_len(id);
1284 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1285 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id);
1289 dev->cdb_len = (unsigned int) rc;
1291 /* print device info to dmesg */
1293 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n",
1294 ap->id, dev->devno, ata_mode_string(xfer_mask));
1297 ap->host->max_cmd_len = 0;
1298 for (i = 0; i < ATA_MAX_DEVICES; i++)
1299 ap->host->max_cmd_len = max_t(unsigned int,
1300 ap->host->max_cmd_len,
1301 ap->device[i].cdb_len);
1303 /* limit bridge transfers to udma5, 200 sectors */
1304 if (ata_dev_knobble(ap, dev)) {
1306 printk(KERN_INFO "ata%u(%u): applying bridge limits\n",
1307 ap->id, dev->devno);
1308 ap->udma_mask &= ATA_UDMA5;
1309 dev->max_sectors = ATA_MAX_SECTORS;
1312 if (ap->ops->dev_config)
1313 ap->ops->dev_config(ap, dev);
1315 DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap));
1319 printk(KERN_WARNING "ata%u: dev %u not supported, ignoring\n",
1320 ap->id, dev->devno);
1321 DPRINTK("EXIT, err\n");
1326 * ata_bus_probe - Reset and probe ATA bus
1329 * Master ATA bus probing function. Initiates a hardware-dependent
1330 * bus reset, then attempts to identify any devices found on
1334 * PCI/etc. bus probe sem.
1337 * Zero on success, non-zero on error.
1340 static int ata_bus_probe(struct ata_port *ap)
1342 unsigned int classes[ATA_MAX_DEVICES];
1343 unsigned int i, rc, found = 0;
1347 /* reset and determine device classes */
1348 for (i = 0; i < ATA_MAX_DEVICES; i++)
1349 classes[i] = ATA_DEV_UNKNOWN;
1351 if (ap->ops->probe_reset) {
1352 rc = ap->ops->probe_reset(ap, classes);
1354 printk("ata%u: reset failed (errno=%d)\n", ap->id, rc);
1358 ap->ops->phy_reset(ap);
1360 if (!(ap->flags & ATA_FLAG_PORT_DISABLED))
1361 for (i = 0; i < ATA_MAX_DEVICES; i++)
1362 classes[i] = ap->device[i].class;
1367 for (i = 0; i < ATA_MAX_DEVICES; i++)
1368 if (classes[i] == ATA_DEV_UNKNOWN)
1369 classes[i] = ATA_DEV_NONE;
1371 /* read IDENTIFY page and configure devices */
1372 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1373 struct ata_device *dev = &ap->device[i];
1375 dev->class = classes[i];
1377 if (!ata_dev_present(dev))
1380 WARN_ON(dev->id != NULL);
1381 if (ata_dev_read_id(ap, dev, &dev->class, 1, &dev->id)) {
1382 dev->class = ATA_DEV_NONE;
1386 if (ata_dev_configure(ap, dev, 1)) {
1387 dev->class++; /* disable device */
1395 goto err_out_disable;
1398 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1399 goto err_out_disable;
1404 ap->ops->port_disable(ap);
1409 * ata_port_probe - Mark port as enabled
1410 * @ap: Port for which we indicate enablement
1412 * Modify @ap data structure such that the system
1413 * thinks that the entire port is enabled.
1415 * LOCKING: host_set lock, or some other form of
1419 void ata_port_probe(struct ata_port *ap)
1421 ap->flags &= ~ATA_FLAG_PORT_DISABLED;
1425 * sata_print_link_status - Print SATA link status
1426 * @ap: SATA port to printk link status about
1428 * This function prints link speed and status of a SATA link.
1433 static void sata_print_link_status(struct ata_port *ap)
1438 if (!ap->ops->scr_read)
1441 sstatus = scr_read(ap, SCR_STATUS);
1443 if (sata_dev_present(ap)) {
1444 tmp = (sstatus >> 4) & 0xf;
1447 else if (tmp & (1 << 1))
1450 speed = "<unknown>";
1451 printk(KERN_INFO "ata%u: SATA link up %s Gbps (SStatus %X)\n",
1452 ap->id, speed, sstatus);
1454 printk(KERN_INFO "ata%u: SATA link down (SStatus %X)\n",
1460 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1461 * @ap: SATA port associated with target SATA PHY.
1463 * This function issues commands to standard SATA Sxxx
1464 * PHY registers, to wake up the phy (and device), and
1465 * clear any reset condition.
1468 * PCI/etc. bus probe sem.
1471 void __sata_phy_reset(struct ata_port *ap)
1474 unsigned long timeout = jiffies + (HZ * 5);
1476 if (ap->flags & ATA_FLAG_SATA_RESET) {
1477 /* issue phy wake/reset */
1478 scr_write_flush(ap, SCR_CONTROL, 0x301);
1479 /* Couldn't find anything in SATA I/II specs, but
1480 * AHCI-1.1 10.4.2 says at least 1 ms. */
1483 scr_write_flush(ap, SCR_CONTROL, 0x300); /* phy wake/clear reset */
1485 /* wait for phy to become ready, if necessary */
1488 sstatus = scr_read(ap, SCR_STATUS);
1489 if ((sstatus & 0xf) != 1)
1491 } while (time_before(jiffies, timeout));
1493 /* print link status */
1494 sata_print_link_status(ap);
1496 /* TODO: phy layer with polling, timeouts, etc. */
1497 if (sata_dev_present(ap))
1500 ata_port_disable(ap);
1502 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1505 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1506 ata_port_disable(ap);
1510 ap->cbl = ATA_CBL_SATA;
1514 * sata_phy_reset - Reset SATA bus.
1515 * @ap: SATA port associated with target SATA PHY.
1517 * This function resets the SATA bus, and then probes
1518 * the bus for devices.
1521 * PCI/etc. bus probe sem.
1524 void sata_phy_reset(struct ata_port *ap)
1526 __sata_phy_reset(ap);
1527 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1533 * ata_port_disable - Disable port.
1534 * @ap: Port to be disabled.
1536 * Modify @ap data structure such that the system
1537 * thinks that the entire port is disabled, and should
1538 * never attempt to probe or communicate with devices
1541 * LOCKING: host_set lock, or some other form of
1545 void ata_port_disable(struct ata_port *ap)
1547 ap->device[0].class = ATA_DEV_NONE;
1548 ap->device[1].class = ATA_DEV_NONE;
1549 ap->flags |= ATA_FLAG_PORT_DISABLED;
1553 * This mode timing computation functionality is ported over from
1554 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
1557 * PIO 0-5, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
1558 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
1559 * for PIO 5, which is a nonstandard extension and UDMA6, which
1560 * is currently supported only by Maxtor drives.
1563 static const struct ata_timing ata_timing[] = {
1565 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
1566 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
1567 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
1568 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
1570 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
1571 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
1572 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
1574 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
1576 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
1577 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
1578 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
1580 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
1581 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
1582 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
1584 /* { XFER_PIO_5, 20, 50, 30, 100, 50, 30, 100, 0 }, */
1585 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
1586 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
1588 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
1589 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
1590 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
1592 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
1597 #define ENOUGH(v,unit) (((v)-1)/(unit)+1)
1598 #define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
1600 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
1602 q->setup = EZ(t->setup * 1000, T);
1603 q->act8b = EZ(t->act8b * 1000, T);
1604 q->rec8b = EZ(t->rec8b * 1000, T);
1605 q->cyc8b = EZ(t->cyc8b * 1000, T);
1606 q->active = EZ(t->active * 1000, T);
1607 q->recover = EZ(t->recover * 1000, T);
1608 q->cycle = EZ(t->cycle * 1000, T);
1609 q->udma = EZ(t->udma * 1000, UT);
1612 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
1613 struct ata_timing *m, unsigned int what)
1615 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
1616 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
1617 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
1618 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
1619 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
1620 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
1621 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
1622 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
1625 static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
1627 const struct ata_timing *t;
1629 for (t = ata_timing; t->mode != speed; t++)
1630 if (t->mode == 0xFF)
1635 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1636 struct ata_timing *t, int T, int UT)
1638 const struct ata_timing *s;
1639 struct ata_timing p;
1645 if (!(s = ata_timing_find_mode(speed)))
1648 memcpy(t, s, sizeof(*s));
1651 * If the drive is an EIDE drive, it can tell us it needs extended
1652 * PIO/MW_DMA cycle timing.
1655 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
1656 memset(&p, 0, sizeof(p));
1657 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
1658 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
1659 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
1660 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
1661 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
1663 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
1667 * Convert the timing to bus clock counts.
1670 ata_timing_quantize(t, t, T, UT);
1673 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
1674 * S.M.A.R.T * and some other commands. We have to ensure that the
1675 * DMA cycle timing is slower/equal than the fastest PIO timing.
1678 if (speed > XFER_PIO_4) {
1679 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
1680 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
1684 * Lengthen active & recovery time so that cycle time is correct.
1687 if (t->act8b + t->rec8b < t->cyc8b) {
1688 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
1689 t->rec8b = t->cyc8b - t->act8b;
1692 if (t->active + t->recover < t->cycle) {
1693 t->active += (t->cycle - (t->active + t->recover)) / 2;
1694 t->recover = t->cycle - t->active;
1700 static void ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev)
1702 if (!ata_dev_present(dev) || (ap->flags & ATA_FLAG_PORT_DISABLED))
1705 if (dev->xfer_shift == ATA_SHIFT_PIO)
1706 dev->flags |= ATA_DFLAG_PIO;
1708 ata_dev_set_xfermode(ap, dev);
1710 if (ata_dev_revalidate(ap, dev, 0)) {
1711 printk(KERN_ERR "ata%u: failed to revalidate after set "
1712 "xfermode, disabled\n", ap->id);
1713 ata_port_disable(ap);
1716 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
1717 dev->xfer_shift, (int)dev->xfer_mode);
1719 printk(KERN_INFO "ata%u: dev %u configured for %s\n",
1721 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
1724 static int ata_host_set_pio(struct ata_port *ap)
1728 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1729 struct ata_device *dev = &ap->device[i];
1731 if (!ata_dev_present(dev))
1734 if (!dev->pio_mode) {
1735 printk(KERN_WARNING "ata%u: no PIO support for device %d.\n", ap->id, i);
1739 dev->xfer_mode = dev->pio_mode;
1740 dev->xfer_shift = ATA_SHIFT_PIO;
1741 if (ap->ops->set_piomode)
1742 ap->ops->set_piomode(ap, dev);
1748 static void ata_host_set_dma(struct ata_port *ap)
1752 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1753 struct ata_device *dev = &ap->device[i];
1755 if (!ata_dev_present(dev) || !dev->dma_mode)
1758 dev->xfer_mode = dev->dma_mode;
1759 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
1760 if (ap->ops->set_dmamode)
1761 ap->ops->set_dmamode(ap, dev);
1766 * ata_set_mode - Program timings and issue SET FEATURES - XFER
1767 * @ap: port on which timings will be programmed
1769 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.).
1772 * PCI/etc. bus probe sem.
1774 static void ata_set_mode(struct ata_port *ap)
1778 /* step 1: calculate xfer_mask */
1779 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1780 struct ata_device *dev = &ap->device[i];
1781 unsigned int xfer_mask;
1783 if (!ata_dev_present(dev))
1786 xfer_mask = ata_dev_xfermask(ap, dev);
1788 dev->pio_mode = ata_xfer_mask2mode(xfer_mask & ATA_MASK_PIO);
1789 dev->dma_mode = ata_xfer_mask2mode(xfer_mask & (ATA_MASK_MWDMA |
1793 /* step 2: always set host PIO timings */
1794 rc = ata_host_set_pio(ap);
1798 /* step 3: set host DMA timings */
1799 ata_host_set_dma(ap);
1801 /* step 4: update devices' xfer mode */
1802 for (i = 0; i < ATA_MAX_DEVICES; i++)
1803 ata_dev_set_mode(ap, &ap->device[i]);
1805 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1808 if (ap->ops->post_set_mode)
1809 ap->ops->post_set_mode(ap);
1814 ata_port_disable(ap);
1818 * ata_tf_to_host - issue ATA taskfile to host controller
1819 * @ap: port to which command is being issued
1820 * @tf: ATA taskfile register set
1822 * Issues ATA taskfile register set to ATA host controller,
1823 * with proper synchronization with interrupt handler and
1827 * spin_lock_irqsave(host_set lock)
1830 static inline void ata_tf_to_host(struct ata_port *ap,
1831 const struct ata_taskfile *tf)
1833 ap->ops->tf_load(ap, tf);
1834 ap->ops->exec_command(ap, tf);
1838 * ata_busy_sleep - sleep until BSY clears, or timeout
1839 * @ap: port containing status register to be polled
1840 * @tmout_pat: impatience timeout
1841 * @tmout: overall timeout
1843 * Sleep until ATA Status register bit BSY clears,
1844 * or a timeout occurs.
1849 unsigned int ata_busy_sleep (struct ata_port *ap,
1850 unsigned long tmout_pat, unsigned long tmout)
1852 unsigned long timer_start, timeout;
1855 status = ata_busy_wait(ap, ATA_BUSY, 300);
1856 timer_start = jiffies;
1857 timeout = timer_start + tmout_pat;
1858 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1860 status = ata_busy_wait(ap, ATA_BUSY, 3);
1863 if (status & ATA_BUSY)
1864 printk(KERN_WARNING "ata%u is slow to respond, "
1865 "please be patient\n", ap->id);
1867 timeout = timer_start + tmout;
1868 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1870 status = ata_chk_status(ap);
1873 if (status & ATA_BUSY) {
1874 printk(KERN_ERR "ata%u failed to respond (%lu secs)\n",
1875 ap->id, tmout / HZ);
1882 static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
1884 struct ata_ioports *ioaddr = &ap->ioaddr;
1885 unsigned int dev0 = devmask & (1 << 0);
1886 unsigned int dev1 = devmask & (1 << 1);
1887 unsigned long timeout;
1889 /* if device 0 was found in ata_devchk, wait for its
1893 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1895 /* if device 1 was found in ata_devchk, wait for
1896 * register access, then wait for BSY to clear
1898 timeout = jiffies + ATA_TMOUT_BOOT;
1902 ap->ops->dev_select(ap, 1);
1903 if (ap->flags & ATA_FLAG_MMIO) {
1904 nsect = readb((void __iomem *) ioaddr->nsect_addr);
1905 lbal = readb((void __iomem *) ioaddr->lbal_addr);
1907 nsect = inb(ioaddr->nsect_addr);
1908 lbal = inb(ioaddr->lbal_addr);
1910 if ((nsect == 1) && (lbal == 1))
1912 if (time_after(jiffies, timeout)) {
1916 msleep(50); /* give drive a breather */
1919 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1921 /* is all this really necessary? */
1922 ap->ops->dev_select(ap, 0);
1924 ap->ops->dev_select(ap, 1);
1926 ap->ops->dev_select(ap, 0);
1930 * ata_bus_edd - Issue EXECUTE DEVICE DIAGNOSTIC command.
1931 * @ap: Port to reset and probe
1933 * Use the EXECUTE DEVICE DIAGNOSTIC command to reset and
1934 * probe the bus. Not often used these days.
1937 * PCI/etc. bus probe sem.
1938 * Obtains host_set lock.
1942 static unsigned int ata_bus_edd(struct ata_port *ap)
1944 struct ata_taskfile tf;
1945 unsigned long flags;
1947 /* set up execute-device-diag (bus reset) taskfile */
1948 /* also, take interrupts to a known state (disabled) */
1949 DPRINTK("execute-device-diag\n");
1950 ata_tf_init(ap, &tf, 0);
1952 tf.command = ATA_CMD_EDD;
1953 tf.protocol = ATA_PROT_NODATA;
1956 spin_lock_irqsave(&ap->host_set->lock, flags);
1957 ata_tf_to_host(ap, &tf);
1958 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1960 /* spec says at least 2ms. but who knows with those
1961 * crazy ATAPI devices...
1965 return ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1968 static unsigned int ata_bus_softreset(struct ata_port *ap,
1969 unsigned int devmask)
1971 struct ata_ioports *ioaddr = &ap->ioaddr;
1973 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
1975 /* software reset. causes dev0 to be selected */
1976 if (ap->flags & ATA_FLAG_MMIO) {
1977 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1978 udelay(20); /* FIXME: flush */
1979 writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
1980 udelay(20); /* FIXME: flush */
1981 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1983 outb(ap->ctl, ioaddr->ctl_addr);
1985 outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
1987 outb(ap->ctl, ioaddr->ctl_addr);
1990 /* spec mandates ">= 2ms" before checking status.
1991 * We wait 150ms, because that was the magic delay used for
1992 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
1993 * between when the ATA command register is written, and then
1994 * status is checked. Because waiting for "a while" before
1995 * checking status is fine, post SRST, we perform this magic
1996 * delay here as well.
1998 * Old drivers/ide uses the 2mS rule and then waits for ready
2003 /* Before we perform post reset processing we want to see if
2004 the bus shows 0xFF because the odd clown forgets the D7 pulldown
2007 if (ata_check_status(ap) == 0xFF)
2008 return 1; /* Positive is failure for some reason */
2010 ata_bus_post_reset(ap, devmask);
2016 * ata_bus_reset - reset host port and associated ATA channel
2017 * @ap: port to reset
2019 * This is typically the first time we actually start issuing
2020 * commands to the ATA channel. We wait for BSY to clear, then
2021 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2022 * result. Determine what devices, if any, are on the channel
2023 * by looking at the device 0/1 error register. Look at the signature
2024 * stored in each device's taskfile registers, to determine if
2025 * the device is ATA or ATAPI.
2028 * PCI/etc. bus probe sem.
2029 * Obtains host_set lock.
2032 * Sets ATA_FLAG_PORT_DISABLED if bus reset fails.
2035 void ata_bus_reset(struct ata_port *ap)
2037 struct ata_ioports *ioaddr = &ap->ioaddr;
2038 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2040 unsigned int dev0, dev1 = 0, rc = 0, devmask = 0;
2042 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
2044 /* determine if device 0/1 are present */
2045 if (ap->flags & ATA_FLAG_SATA_RESET)
2048 dev0 = ata_devchk(ap, 0);
2050 dev1 = ata_devchk(ap, 1);
2054 devmask |= (1 << 0);
2056 devmask |= (1 << 1);
2058 /* select device 0 again */
2059 ap->ops->dev_select(ap, 0);
2061 /* issue bus reset */
2062 if (ap->flags & ATA_FLAG_SRST)
2063 rc = ata_bus_softreset(ap, devmask);
2064 else if ((ap->flags & ATA_FLAG_SATA_RESET) == 0) {
2065 /* set up device control */
2066 if (ap->flags & ATA_FLAG_MMIO)
2067 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2069 outb(ap->ctl, ioaddr->ctl_addr);
2070 rc = ata_bus_edd(ap);
2077 * determine by signature whether we have ATA or ATAPI devices
2079 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
2080 if ((slave_possible) && (err != 0x81))
2081 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
2083 /* re-enable interrupts */
2084 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2087 /* is double-select really necessary? */
2088 if (ap->device[1].class != ATA_DEV_NONE)
2089 ap->ops->dev_select(ap, 1);
2090 if (ap->device[0].class != ATA_DEV_NONE)
2091 ap->ops->dev_select(ap, 0);
2093 /* if no devices were detected, disable this port */
2094 if ((ap->device[0].class == ATA_DEV_NONE) &&
2095 (ap->device[1].class == ATA_DEV_NONE))
2098 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2099 /* set up device control for ATA_FLAG_SATA_RESET */
2100 if (ap->flags & ATA_FLAG_MMIO)
2101 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2103 outb(ap->ctl, ioaddr->ctl_addr);
2110 printk(KERN_ERR "ata%u: disabling port\n", ap->id);
2111 ap->ops->port_disable(ap);
2116 static int sata_phy_resume(struct ata_port *ap)
2118 unsigned long timeout = jiffies + (HZ * 5);
2121 scr_write_flush(ap, SCR_CONTROL, 0x300);
2123 /* Wait for phy to become ready, if necessary. */
2126 sstatus = scr_read(ap, SCR_STATUS);
2127 if ((sstatus & 0xf) != 1)
2129 } while (time_before(jiffies, timeout));
2135 * ata_std_probeinit - initialize probing
2136 * @ap: port to be probed
2138 * @ap is about to be probed. Initialize it. This function is
2139 * to be used as standard callback for ata_drive_probe_reset().
2141 * NOTE!!! Do not use this function as probeinit if a low level
2142 * driver implements only hardreset. Just pass NULL as probeinit
2143 * in that case. Using this function is probably okay but doing
2144 * so makes reset sequence different from the original
2145 * ->phy_reset implementation and Jeff nervous. :-P
2147 extern void ata_std_probeinit(struct ata_port *ap)
2149 if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read) {
2150 sata_phy_resume(ap);
2151 if (sata_dev_present(ap))
2152 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2157 * ata_std_softreset - reset host port via ATA SRST
2158 * @ap: port to reset
2159 * @verbose: fail verbosely
2160 * @classes: resulting classes of attached devices
2162 * Reset host port using ATA SRST. This function is to be used
2163 * as standard callback for ata_drive_*_reset() functions.
2166 * Kernel thread context (may sleep)
2169 * 0 on success, -errno otherwise.
2171 int ata_std_softreset(struct ata_port *ap, int verbose, unsigned int *classes)
2173 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2174 unsigned int devmask = 0, err_mask;
2179 if (ap->ops->scr_read && !sata_dev_present(ap)) {
2180 classes[0] = ATA_DEV_NONE;
2184 /* determine if device 0/1 are present */
2185 if (ata_devchk(ap, 0))
2186 devmask |= (1 << 0);
2187 if (slave_possible && ata_devchk(ap, 1))
2188 devmask |= (1 << 1);
2190 /* select device 0 again */
2191 ap->ops->dev_select(ap, 0);
2193 /* issue bus reset */
2194 DPRINTK("about to softreset, devmask=%x\n", devmask);
2195 err_mask = ata_bus_softreset(ap, devmask);
2198 printk(KERN_ERR "ata%u: SRST failed (err_mask=0x%x)\n",
2201 DPRINTK("EXIT, softreset failed (err_mask=0x%x)\n",
2206 /* determine by signature whether we have ATA or ATAPI devices */
2207 classes[0] = ata_dev_try_classify(ap, 0, &err);
2208 if (slave_possible && err != 0x81)
2209 classes[1] = ata_dev_try_classify(ap, 1, &err);
2212 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2217 * sata_std_hardreset - reset host port via SATA phy reset
2218 * @ap: port to reset
2219 * @verbose: fail verbosely
2220 * @class: resulting class of attached device
2222 * SATA phy-reset host port using DET bits of SControl register.
2223 * This function is to be used as standard callback for
2224 * ata_drive_*_reset().
2227 * Kernel thread context (may sleep)
2230 * 0 on success, -errno otherwise.
2232 int sata_std_hardreset(struct ata_port *ap, int verbose, unsigned int *class)
2236 /* Issue phy wake/reset */
2237 scr_write_flush(ap, SCR_CONTROL, 0x301);
2240 * Couldn't find anything in SATA I/II specs, but AHCI-1.1
2241 * 10.4.2 says at least 1 ms.
2245 /* Bring phy back */
2246 sata_phy_resume(ap);
2248 /* TODO: phy layer with polling, timeouts, etc. */
2249 if (!sata_dev_present(ap)) {
2250 *class = ATA_DEV_NONE;
2251 DPRINTK("EXIT, link offline\n");
2255 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2257 printk(KERN_ERR "ata%u: COMRESET failed "
2258 "(device not ready)\n", ap->id);
2260 DPRINTK("EXIT, device not ready\n");
2264 ap->ops->dev_select(ap, 0); /* probably unnecessary */
2266 *class = ata_dev_try_classify(ap, 0, NULL);
2268 DPRINTK("EXIT, class=%u\n", *class);
2273 * ata_std_postreset - standard postreset callback
2274 * @ap: the target ata_port
2275 * @classes: classes of attached devices
2277 * This function is invoked after a successful reset. Note that
2278 * the device might have been reset more than once using
2279 * different reset methods before postreset is invoked.
2281 * This function is to be used as standard callback for
2282 * ata_drive_*_reset().
2285 * Kernel thread context (may sleep)
2287 void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
2291 /* set cable type if it isn't already set */
2292 if (ap->cbl == ATA_CBL_NONE && ap->flags & ATA_FLAG_SATA)
2293 ap->cbl = ATA_CBL_SATA;
2295 /* print link status */
2296 if (ap->cbl == ATA_CBL_SATA)
2297 sata_print_link_status(ap);
2299 /* re-enable interrupts */
2300 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2303 /* is double-select really necessary? */
2304 if (classes[0] != ATA_DEV_NONE)
2305 ap->ops->dev_select(ap, 1);
2306 if (classes[1] != ATA_DEV_NONE)
2307 ap->ops->dev_select(ap, 0);
2309 /* bail out if no device is present */
2310 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2311 DPRINTK("EXIT, no device\n");
2315 /* set up device control */
2316 if (ap->ioaddr.ctl_addr) {
2317 if (ap->flags & ATA_FLAG_MMIO)
2318 writeb(ap->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
2320 outb(ap->ctl, ap->ioaddr.ctl_addr);
2327 * ata_std_probe_reset - standard probe reset method
2328 * @ap: prot to perform probe-reset
2329 * @classes: resulting classes of attached devices
2331 * The stock off-the-shelf ->probe_reset method.
2334 * Kernel thread context (may sleep)
2337 * 0 on success, -errno otherwise.
2339 int ata_std_probe_reset(struct ata_port *ap, unsigned int *classes)
2341 ata_reset_fn_t hardreset;
2344 if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read)
2345 hardreset = sata_std_hardreset;
2347 return ata_drive_probe_reset(ap, ata_std_probeinit,
2348 ata_std_softreset, hardreset,
2349 ata_std_postreset, classes);
2352 static int do_probe_reset(struct ata_port *ap, ata_reset_fn_t reset,
2353 ata_postreset_fn_t postreset,
2354 unsigned int *classes)
2358 for (i = 0; i < ATA_MAX_DEVICES; i++)
2359 classes[i] = ATA_DEV_UNKNOWN;
2361 rc = reset(ap, 0, classes);
2365 /* If any class isn't ATA_DEV_UNKNOWN, consider classification
2366 * is complete and convert all ATA_DEV_UNKNOWN to
2369 for (i = 0; i < ATA_MAX_DEVICES; i++)
2370 if (classes[i] != ATA_DEV_UNKNOWN)
2373 if (i < ATA_MAX_DEVICES)
2374 for (i = 0; i < ATA_MAX_DEVICES; i++)
2375 if (classes[i] == ATA_DEV_UNKNOWN)
2376 classes[i] = ATA_DEV_NONE;
2379 postreset(ap, classes);
2381 return classes[0] != ATA_DEV_UNKNOWN ? 0 : -ENODEV;
2385 * ata_drive_probe_reset - Perform probe reset with given methods
2386 * @ap: port to reset
2387 * @probeinit: probeinit method (can be NULL)
2388 * @softreset: softreset method (can be NULL)
2389 * @hardreset: hardreset method (can be NULL)
2390 * @postreset: postreset method (can be NULL)
2391 * @classes: resulting classes of attached devices
2393 * Reset the specified port and classify attached devices using
2394 * given methods. This function prefers softreset but tries all
2395 * possible reset sequences to reset and classify devices. This
2396 * function is intended to be used for constructing ->probe_reset
2397 * callback by low level drivers.
2399 * Reset methods should follow the following rules.
2401 * - Return 0 on sucess, -errno on failure.
2402 * - If classification is supported, fill classes[] with
2403 * recognized class codes.
2404 * - If classification is not supported, leave classes[] alone.
2405 * - If verbose is non-zero, print error message on failure;
2406 * otherwise, shut up.
2409 * Kernel thread context (may sleep)
2412 * 0 on success, -EINVAL if no reset method is avaliable, -ENODEV
2413 * if classification fails, and any error code from reset
2416 int ata_drive_probe_reset(struct ata_port *ap, ata_probeinit_fn_t probeinit,
2417 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
2418 ata_postreset_fn_t postreset, unsigned int *classes)
2426 rc = do_probe_reset(ap, softreset, postreset, classes);
2434 rc = do_probe_reset(ap, hardreset, postreset, classes);
2435 if (rc == 0 || rc != -ENODEV)
2439 rc = do_probe_reset(ap, softreset, postreset, classes);
2445 * ata_dev_same_device - Determine whether new ID matches configured device
2446 * @ap: port on which the device to compare against resides
2447 * @dev: device to compare against
2448 * @new_class: class of the new device
2449 * @new_id: IDENTIFY page of the new device
2451 * Compare @new_class and @new_id against @dev and determine
2452 * whether @dev is the device indicated by @new_class and
2459 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
2461 static int ata_dev_same_device(struct ata_port *ap, struct ata_device *dev,
2462 unsigned int new_class, const u16 *new_id)
2464 const u16 *old_id = dev->id;
2465 unsigned char model[2][41], serial[2][21];
2468 if (dev->class != new_class) {
2470 "ata%u: dev %u class mismatch %d != %d\n",
2471 ap->id, dev->devno, dev->class, new_class);
2475 ata_id_c_string(old_id, model[0], ATA_ID_PROD_OFS, sizeof(model[0]));
2476 ata_id_c_string(new_id, model[1], ATA_ID_PROD_OFS, sizeof(model[1]));
2477 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO_OFS, sizeof(serial[0]));
2478 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO_OFS, sizeof(serial[1]));
2479 new_n_sectors = ata_id_n_sectors(new_id);
2481 if (strcmp(model[0], model[1])) {
2483 "ata%u: dev %u model number mismatch '%s' != '%s'\n",
2484 ap->id, dev->devno, model[0], model[1]);
2488 if (strcmp(serial[0], serial[1])) {
2490 "ata%u: dev %u serial number mismatch '%s' != '%s'\n",
2491 ap->id, dev->devno, serial[0], serial[1]);
2495 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
2497 "ata%u: dev %u n_sectors mismatch %llu != %llu\n",
2498 ap->id, dev->devno, (unsigned long long)dev->n_sectors,
2499 (unsigned long long)new_n_sectors);
2507 * ata_dev_revalidate - Revalidate ATA device
2508 * @ap: port on which the device to revalidate resides
2509 * @dev: device to revalidate
2510 * @post_reset: is this revalidation after reset?
2512 * Re-read IDENTIFY page and make sure @dev is still attached to
2516 * Kernel thread context (may sleep)
2519 * 0 on success, negative errno otherwise
2521 int ata_dev_revalidate(struct ata_port *ap, struct ata_device *dev,
2528 if (!ata_dev_present(dev))
2534 /* allocate & read ID data */
2535 rc = ata_dev_read_id(ap, dev, &class, post_reset, &id);
2539 /* is the device still there? */
2540 if (!ata_dev_same_device(ap, dev, class, id)) {
2548 /* configure device according to the new ID */
2549 return ata_dev_configure(ap, dev, 0);
2552 printk(KERN_ERR "ata%u: dev %u revalidation failed (errno=%d)\n",
2553 ap->id, dev->devno, rc);
2558 static const char * const ata_dma_blacklist [] = {
2559 "WDC AC11000H", NULL,
2560 "WDC AC22100H", NULL,
2561 "WDC AC32500H", NULL,
2562 "WDC AC33100H", NULL,
2563 "WDC AC31600H", NULL,
2564 "WDC AC32100H", "24.09P07",
2565 "WDC AC23200L", "21.10N21",
2566 "Compaq CRD-8241B", NULL,
2571 "SanDisk SDP3B", NULL,
2572 "SanDisk SDP3B-64", NULL,
2573 "SANYO CD-ROM CRD", NULL,
2574 "HITACHI CDR-8", NULL,
2575 "HITACHI CDR-8335", NULL,
2576 "HITACHI CDR-8435", NULL,
2577 "Toshiba CD-ROM XM-6202B", NULL,
2578 "TOSHIBA CD-ROM XM-1702BC", NULL,
2580 "E-IDE CD-ROM CR-840", NULL,
2581 "CD-ROM Drive/F5A", NULL,
2582 "WPI CDD-820", NULL,
2583 "SAMSUNG CD-ROM SC-148C", NULL,
2584 "SAMSUNG CD-ROM SC", NULL,
2585 "SanDisk SDP3B-64", NULL,
2586 "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,
2587 "_NEC DV5800A", NULL,
2588 "SAMSUNG CD-ROM SN-124", "N001"
2591 static int ata_strim(char *s, size_t len)
2593 len = strnlen(s, len);
2595 /* ATAPI specifies that empty space is blank-filled; remove blanks */
2596 while ((len > 0) && (s[len - 1] == ' ')) {
2603 static int ata_dma_blacklisted(const struct ata_device *dev)
2605 unsigned char model_num[40];
2606 unsigned char model_rev[16];
2607 unsigned int nlen, rlen;
2610 ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
2612 ata_id_string(dev->id, model_rev, ATA_ID_FW_REV_OFS,
2614 nlen = ata_strim(model_num, sizeof(model_num));
2615 rlen = ata_strim(model_rev, sizeof(model_rev));
2617 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i += 2) {
2618 if (!strncmp(ata_dma_blacklist[i], model_num, nlen)) {
2619 if (ata_dma_blacklist[i+1] == NULL)
2621 if (!strncmp(ata_dma_blacklist[i], model_rev, rlen))
2629 * ata_dev_xfermask - Compute supported xfermask of the given device
2630 * @ap: Port on which the device to compute xfermask for resides
2631 * @dev: Device to compute xfermask for
2633 * Compute supported xfermask of @dev. This function is
2634 * responsible for applying all known limits including host
2635 * controller limits, device blacklist, etc...
2641 * Computed xfermask.
2643 static unsigned int ata_dev_xfermask(struct ata_port *ap,
2644 struct ata_device *dev)
2646 unsigned long xfer_mask;
2649 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
2652 /* use port-wide xfermask for now */
2653 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2654 struct ata_device *d = &ap->device[i];
2655 if (!ata_dev_present(d))
2657 xfer_mask &= ata_id_xfermask(d->id);
2658 if (ata_dma_blacklisted(d))
2659 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
2662 if (ata_dma_blacklisted(dev))
2663 printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, "
2664 "disabling DMA\n", ap->id, dev->devno);
2670 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
2671 * @ap: Port associated with device @dev
2672 * @dev: Device to which command will be sent
2674 * Issue SET FEATURES - XFER MODE command to device @dev
2678 * PCI/etc. bus probe sem.
2681 static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev)
2683 struct ata_taskfile tf;
2685 /* set up set-features taskfile */
2686 DPRINTK("set features - xfer mode\n");
2688 ata_tf_init(ap, &tf, dev->devno);
2689 tf.command = ATA_CMD_SET_FEATURES;
2690 tf.feature = SETFEATURES_XFER;
2691 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2692 tf.protocol = ATA_PROT_NODATA;
2693 tf.nsect = dev->xfer_mode;
2695 if (ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0)) {
2696 printk(KERN_ERR "ata%u: failed to set xfermode, disabled\n",
2698 ata_port_disable(ap);
2705 * ata_dev_init_params - Issue INIT DEV PARAMS command
2706 * @ap: Port associated with device @dev
2707 * @dev: Device to which command will be sent
2710 * Kernel thread context (may sleep)
2713 * 0 on success, AC_ERR_* mask otherwise.
2716 static unsigned int ata_dev_init_params(struct ata_port *ap,
2717 struct ata_device *dev)
2719 struct ata_taskfile tf;
2720 unsigned int err_mask;
2721 u16 sectors = dev->id[6];
2722 u16 heads = dev->id[3];
2724 /* Number of sectors per track 1-255. Number of heads 1-16 */
2725 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
2728 /* set up init dev params taskfile */
2729 DPRINTK("init dev params \n");
2731 ata_tf_init(ap, &tf, dev->devno);
2732 tf.command = ATA_CMD_INIT_DEV_PARAMS;
2733 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2734 tf.protocol = ATA_PROT_NODATA;
2736 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
2738 err_mask = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
2740 DPRINTK("EXIT, err_mask=%x\n", err_mask);
2745 * ata_sg_clean - Unmap DMA memory associated with command
2746 * @qc: Command containing DMA memory to be released
2748 * Unmap all mapped DMA memory associated with this command.
2751 * spin_lock_irqsave(host_set lock)
2754 static void ata_sg_clean(struct ata_queued_cmd *qc)
2756 struct ata_port *ap = qc->ap;
2757 struct scatterlist *sg = qc->__sg;
2758 int dir = qc->dma_dir;
2759 void *pad_buf = NULL;
2761 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
2762 WARN_ON(sg == NULL);
2764 if (qc->flags & ATA_QCFLAG_SINGLE)
2765 WARN_ON(qc->n_elem > 1);
2767 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
2769 /* if we padded the buffer out to 32-bit bound, and data
2770 * xfer direction is from-device, we must copy from the
2771 * pad buffer back into the supplied buffer
2773 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
2774 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
2776 if (qc->flags & ATA_QCFLAG_SG) {
2778 dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir);
2779 /* restore last sg */
2780 sg[qc->orig_n_elem - 1].length += qc->pad_len;
2782 struct scatterlist *psg = &qc->pad_sgent;
2783 void *addr = kmap_atomic(psg->page, KM_IRQ0);
2784 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
2785 kunmap_atomic(addr, KM_IRQ0);
2789 dma_unmap_single(ap->host_set->dev,
2790 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
2793 sg->length += qc->pad_len;
2795 memcpy(qc->buf_virt + sg->length - qc->pad_len,
2796 pad_buf, qc->pad_len);
2799 qc->flags &= ~ATA_QCFLAG_DMAMAP;
2804 * ata_fill_sg - Fill PCI IDE PRD table
2805 * @qc: Metadata associated with taskfile to be transferred
2807 * Fill PCI IDE PRD (scatter-gather) table with segments
2808 * associated with the current disk command.
2811 * spin_lock_irqsave(host_set lock)
2814 static void ata_fill_sg(struct ata_queued_cmd *qc)
2816 struct ata_port *ap = qc->ap;
2817 struct scatterlist *sg;
2820 WARN_ON(qc->__sg == NULL);
2821 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
2824 ata_for_each_sg(sg, qc) {
2828 /* determine if physical DMA addr spans 64K boundary.
2829 * Note h/w doesn't support 64-bit, so we unconditionally
2830 * truncate dma_addr_t to u32.
2832 addr = (u32) sg_dma_address(sg);
2833 sg_len = sg_dma_len(sg);
2836 offset = addr & 0xffff;
2838 if ((offset + sg_len) > 0x10000)
2839 len = 0x10000 - offset;
2841 ap->prd[idx].addr = cpu_to_le32(addr);
2842 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2843 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
2852 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2855 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
2856 * @qc: Metadata associated with taskfile to check
2858 * Allow low-level driver to filter ATA PACKET commands, returning
2859 * a status indicating whether or not it is OK to use DMA for the
2860 * supplied PACKET command.
2863 * spin_lock_irqsave(host_set lock)
2865 * RETURNS: 0 when ATAPI DMA can be used
2868 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
2870 struct ata_port *ap = qc->ap;
2871 int rc = 0; /* Assume ATAPI DMA is OK by default */
2873 if (ap->ops->check_atapi_dma)
2874 rc = ap->ops->check_atapi_dma(qc);
2879 * ata_qc_prep - Prepare taskfile for submission
2880 * @qc: Metadata associated with taskfile to be prepared
2882 * Prepare ATA taskfile for submission.
2885 * spin_lock_irqsave(host_set lock)
2887 void ata_qc_prep(struct ata_queued_cmd *qc)
2889 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2895 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
2898 * ata_sg_init_one - Associate command with memory buffer
2899 * @qc: Command to be associated
2900 * @buf: Memory buffer
2901 * @buflen: Length of memory buffer, in bytes.
2903 * Initialize the data-related elements of queued_cmd @qc
2904 * to point to a single memory buffer, @buf of byte length @buflen.
2907 * spin_lock_irqsave(host_set lock)
2910 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
2912 struct scatterlist *sg;
2914 qc->flags |= ATA_QCFLAG_SINGLE;
2916 memset(&qc->sgent, 0, sizeof(qc->sgent));
2917 qc->__sg = &qc->sgent;
2919 qc->orig_n_elem = 1;
2923 sg_init_one(sg, buf, buflen);
2927 * ata_sg_init - Associate command with scatter-gather table.
2928 * @qc: Command to be associated
2929 * @sg: Scatter-gather table.
2930 * @n_elem: Number of elements in s/g table.
2932 * Initialize the data-related elements of queued_cmd @qc
2933 * to point to a scatter-gather table @sg, containing @n_elem
2937 * spin_lock_irqsave(host_set lock)
2940 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
2941 unsigned int n_elem)
2943 qc->flags |= ATA_QCFLAG_SG;
2945 qc->n_elem = n_elem;
2946 qc->orig_n_elem = n_elem;
2950 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
2951 * @qc: Command with memory buffer to be mapped.
2953 * DMA-map the memory buffer associated with queued_cmd @qc.
2956 * spin_lock_irqsave(host_set lock)
2959 * Zero on success, negative on error.
2962 static int ata_sg_setup_one(struct ata_queued_cmd *qc)
2964 struct ata_port *ap = qc->ap;
2965 int dir = qc->dma_dir;
2966 struct scatterlist *sg = qc->__sg;
2967 dma_addr_t dma_address;
2970 /* we must lengthen transfers to end on a 32-bit boundary */
2971 qc->pad_len = sg->length & 3;
2973 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
2974 struct scatterlist *psg = &qc->pad_sgent;
2976 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
2978 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
2980 if (qc->tf.flags & ATA_TFLAG_WRITE)
2981 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
2984 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
2985 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
2987 sg->length -= qc->pad_len;
2988 if (sg->length == 0)
2991 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
2992 sg->length, qc->pad_len);
3000 dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt,
3002 if (dma_mapping_error(dma_address)) {
3004 sg->length += qc->pad_len;
3008 sg_dma_address(sg) = dma_address;
3009 sg_dma_len(sg) = sg->length;
3012 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
3013 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3019 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
3020 * @qc: Command with scatter-gather table to be mapped.
3022 * DMA-map the scatter-gather table associated with queued_cmd @qc.
3025 * spin_lock_irqsave(host_set lock)
3028 * Zero on success, negative on error.
3032 static int ata_sg_setup(struct ata_queued_cmd *qc)
3034 struct ata_port *ap = qc->ap;
3035 struct scatterlist *sg = qc->__sg;
3036 struct scatterlist *lsg = &sg[qc->n_elem - 1];
3037 int n_elem, pre_n_elem, dir, trim_sg = 0;
3039 VPRINTK("ENTER, ata%u\n", ap->id);
3040 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
3042 /* we must lengthen transfers to end on a 32-bit boundary */
3043 qc->pad_len = lsg->length & 3;
3045 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3046 struct scatterlist *psg = &qc->pad_sgent;
3047 unsigned int offset;
3049 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3051 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3054 * psg->page/offset are used to copy to-be-written
3055 * data in this function or read data in ata_sg_clean.
3057 offset = lsg->offset + lsg->length - qc->pad_len;
3058 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
3059 psg->offset = offset_in_page(offset);
3061 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3062 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3063 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
3064 kunmap_atomic(addr, KM_IRQ0);
3067 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3068 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3070 lsg->length -= qc->pad_len;
3071 if (lsg->length == 0)
3074 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
3075 qc->n_elem - 1, lsg->length, qc->pad_len);
3078 pre_n_elem = qc->n_elem;
3079 if (trim_sg && pre_n_elem)
3088 n_elem = dma_map_sg(ap->host_set->dev, sg, pre_n_elem, dir);
3090 /* restore last sg */
3091 lsg->length += qc->pad_len;
3095 DPRINTK("%d sg elements mapped\n", n_elem);
3098 qc->n_elem = n_elem;
3104 * ata_poll_qc_complete - turn irq back on and finish qc
3105 * @qc: Command to complete
3106 * @err_mask: ATA status register content
3109 * None. (grabs host lock)
3112 void ata_poll_qc_complete(struct ata_queued_cmd *qc)
3114 struct ata_port *ap = qc->ap;
3115 unsigned long flags;
3117 spin_lock_irqsave(&ap->host_set->lock, flags);
3118 ap->flags &= ~ATA_FLAG_NOINTR;
3120 ata_qc_complete(qc);
3121 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3125 * ata_pio_poll - poll using PIO, depending on current state
3126 * @ap: the target ata_port
3129 * None. (executing in kernel thread context)
3132 * timeout value to use
3135 static unsigned long ata_pio_poll(struct ata_port *ap)
3137 struct ata_queued_cmd *qc;
3139 unsigned int poll_state = HSM_ST_UNKNOWN;
3140 unsigned int reg_state = HSM_ST_UNKNOWN;
3142 qc = ata_qc_from_tag(ap, ap->active_tag);
3143 WARN_ON(qc == NULL);
3145 switch (ap->hsm_task_state) {
3148 poll_state = HSM_ST_POLL;
3152 case HSM_ST_LAST_POLL:
3153 poll_state = HSM_ST_LAST_POLL;
3154 reg_state = HSM_ST_LAST;
3161 status = ata_chk_status(ap);
3162 if (status & ATA_BUSY) {
3163 if (time_after(jiffies, ap->pio_task_timeout)) {
3164 qc->err_mask |= AC_ERR_TIMEOUT;
3165 ap->hsm_task_state = HSM_ST_TMOUT;
3168 ap->hsm_task_state = poll_state;
3169 return ATA_SHORT_PAUSE;
3172 ap->hsm_task_state = reg_state;
3177 * ata_pio_complete - check if drive is busy or idle
3178 * @ap: the target ata_port
3181 * None. (executing in kernel thread context)
3184 * Non-zero if qc completed, zero otherwise.
3187 static int ata_pio_complete (struct ata_port *ap)
3189 struct ata_queued_cmd *qc;
3193 * This is purely heuristic. This is a fast path. Sometimes when
3194 * we enter, BSY will be cleared in a chk-status or two. If not,
3195 * the drive is probably seeking or something. Snooze for a couple
3196 * msecs, then chk-status again. If still busy, fall back to
3197 * HSM_ST_POLL state.
3199 drv_stat = ata_busy_wait(ap, ATA_BUSY, 10);
3200 if (drv_stat & ATA_BUSY) {
3202 drv_stat = ata_busy_wait(ap, ATA_BUSY, 10);
3203 if (drv_stat & ATA_BUSY) {
3204 ap->hsm_task_state = HSM_ST_LAST_POLL;
3205 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
3210 qc = ata_qc_from_tag(ap, ap->active_tag);
3211 WARN_ON(qc == NULL);
3213 drv_stat = ata_wait_idle(ap);
3214 if (!ata_ok(drv_stat)) {
3215 qc->err_mask |= __ac_err_mask(drv_stat);
3216 ap->hsm_task_state = HSM_ST_ERR;
3220 ap->hsm_task_state = HSM_ST_IDLE;
3222 WARN_ON(qc->err_mask);
3223 ata_poll_qc_complete(qc);
3225 /* another command may start at this point */
3232 * swap_buf_le16 - swap halves of 16-bit words in place
3233 * @buf: Buffer to swap
3234 * @buf_words: Number of 16-bit words in buffer.
3236 * Swap halves of 16-bit words if needed to convert from
3237 * little-endian byte order to native cpu byte order, or
3241 * Inherited from caller.
3243 void swap_buf_le16(u16 *buf, unsigned int buf_words)
3248 for (i = 0; i < buf_words; i++)
3249 buf[i] = le16_to_cpu(buf[i]);
3250 #endif /* __BIG_ENDIAN */
3254 * ata_mmio_data_xfer - Transfer data by MMIO
3255 * @ap: port to read/write
3257 * @buflen: buffer length
3258 * @write_data: read/write
3260 * Transfer data from/to the device data register by MMIO.
3263 * Inherited from caller.
3266 static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
3267 unsigned int buflen, int write_data)
3270 unsigned int words = buflen >> 1;
3271 u16 *buf16 = (u16 *) buf;
3272 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
3274 /* Transfer multiple of 2 bytes */
3276 for (i = 0; i < words; i++)
3277 writew(le16_to_cpu(buf16[i]), mmio);
3279 for (i = 0; i < words; i++)
3280 buf16[i] = cpu_to_le16(readw(mmio));
3283 /* Transfer trailing 1 byte, if any. */
3284 if (unlikely(buflen & 0x01)) {
3285 u16 align_buf[1] = { 0 };
3286 unsigned char *trailing_buf = buf + buflen - 1;
3289 memcpy(align_buf, trailing_buf, 1);
3290 writew(le16_to_cpu(align_buf[0]), mmio);
3292 align_buf[0] = cpu_to_le16(readw(mmio));
3293 memcpy(trailing_buf, align_buf, 1);
3299 * ata_pio_data_xfer - Transfer data by PIO
3300 * @ap: port to read/write
3302 * @buflen: buffer length
3303 * @write_data: read/write
3305 * Transfer data from/to the device data register by PIO.
3308 * Inherited from caller.
3311 static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
3312 unsigned int buflen, int write_data)
3314 unsigned int words = buflen >> 1;
3316 /* Transfer multiple of 2 bytes */
3318 outsw(ap->ioaddr.data_addr, buf, words);
3320 insw(ap->ioaddr.data_addr, buf, words);
3322 /* Transfer trailing 1 byte, if any. */
3323 if (unlikely(buflen & 0x01)) {
3324 u16 align_buf[1] = { 0 };
3325 unsigned char *trailing_buf = buf + buflen - 1;
3328 memcpy(align_buf, trailing_buf, 1);
3329 outw(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
3331 align_buf[0] = cpu_to_le16(inw(ap->ioaddr.data_addr));
3332 memcpy(trailing_buf, align_buf, 1);
3338 * ata_data_xfer - Transfer data from/to the data register.
3339 * @ap: port to read/write
3341 * @buflen: buffer length
3342 * @do_write: read/write
3344 * Transfer data from/to the device data register.
3347 * Inherited from caller.
3350 static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
3351 unsigned int buflen, int do_write)
3353 /* Make the crap hardware pay the costs not the good stuff */
3354 if (unlikely(ap->flags & ATA_FLAG_IRQ_MASK)) {
3355 unsigned long flags;
3356 local_irq_save(flags);
3357 if (ap->flags & ATA_FLAG_MMIO)
3358 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3360 ata_pio_data_xfer(ap, buf, buflen, do_write);
3361 local_irq_restore(flags);
3363 if (ap->flags & ATA_FLAG_MMIO)
3364 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3366 ata_pio_data_xfer(ap, buf, buflen, do_write);
3371 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
3372 * @qc: Command on going
3374 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
3377 * Inherited from caller.
3380 static void ata_pio_sector(struct ata_queued_cmd *qc)
3382 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3383 struct scatterlist *sg = qc->__sg;
3384 struct ata_port *ap = qc->ap;
3386 unsigned int offset;
3389 if (qc->cursect == (qc->nsect - 1))
3390 ap->hsm_task_state = HSM_ST_LAST;
3392 page = sg[qc->cursg].page;
3393 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
3395 /* get the current page and offset */
3396 page = nth_page(page, (offset >> PAGE_SHIFT));
3397 offset %= PAGE_SIZE;
3399 buf = kmap(page) + offset;
3404 if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) {
3409 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3411 /* do the actual data transfer */
3412 do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3413 ata_data_xfer(ap, buf, ATA_SECT_SIZE, do_write);
3419 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
3420 * @qc: Command on going
3421 * @bytes: number of bytes
3423 * Transfer Transfer data from/to the ATAPI device.
3426 * Inherited from caller.
3430 static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
3432 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3433 struct scatterlist *sg = qc->__sg;
3434 struct ata_port *ap = qc->ap;
3437 unsigned int offset, count;
3439 if (qc->curbytes + bytes >= qc->nbytes)
3440 ap->hsm_task_state = HSM_ST_LAST;
3443 if (unlikely(qc->cursg >= qc->n_elem)) {
3445 * The end of qc->sg is reached and the device expects
3446 * more data to transfer. In order not to overrun qc->sg
3447 * and fulfill length specified in the byte count register,
3448 * - for read case, discard trailing data from the device
3449 * - for write case, padding zero data to the device
3451 u16 pad_buf[1] = { 0 };
3452 unsigned int words = bytes >> 1;
3455 if (words) /* warning if bytes > 1 */
3456 printk(KERN_WARNING "ata%u: %u bytes trailing data\n",
3459 for (i = 0; i < words; i++)
3460 ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write);
3462 ap->hsm_task_state = HSM_ST_LAST;
3466 sg = &qc->__sg[qc->cursg];
3469 offset = sg->offset + qc->cursg_ofs;
3471 /* get the current page and offset */
3472 page = nth_page(page, (offset >> PAGE_SHIFT));
3473 offset %= PAGE_SIZE;
3475 /* don't overrun current sg */
3476 count = min(sg->length - qc->cursg_ofs, bytes);
3478 /* don't cross page boundaries */
3479 count = min(count, (unsigned int)PAGE_SIZE - offset);
3481 buf = kmap(page) + offset;
3484 qc->curbytes += count;
3485 qc->cursg_ofs += count;
3487 if (qc->cursg_ofs == sg->length) {
3492 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3494 /* do the actual data transfer */
3495 ata_data_xfer(ap, buf, count, do_write);
3504 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
3505 * @qc: Command on going
3507 * Transfer Transfer data from/to the ATAPI device.
3510 * Inherited from caller.
3513 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3515 struct ata_port *ap = qc->ap;
3516 struct ata_device *dev = qc->dev;
3517 unsigned int ireason, bc_lo, bc_hi, bytes;
3518 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
3520 ap->ops->tf_read(ap, &qc->tf);
3521 ireason = qc->tf.nsect;
3522 bc_lo = qc->tf.lbam;
3523 bc_hi = qc->tf.lbah;
3524 bytes = (bc_hi << 8) | bc_lo;
3526 /* shall be cleared to zero, indicating xfer of data */
3527 if (ireason & (1 << 0))
3530 /* make sure transfer direction matches expected */
3531 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
3532 if (do_write != i_write)
3535 __atapi_pio_bytes(qc, bytes);
3540 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n",
3541 ap->id, dev->devno);
3542 qc->err_mask |= AC_ERR_HSM;
3543 ap->hsm_task_state = HSM_ST_ERR;
3547 * ata_pio_block - start PIO on a block
3548 * @ap: the target ata_port
3551 * None. (executing in kernel thread context)
3554 static void ata_pio_block(struct ata_port *ap)
3556 struct ata_queued_cmd *qc;
3560 * This is purely heuristic. This is a fast path.
3561 * Sometimes when we enter, BSY will be cleared in
3562 * a chk-status or two. If not, the drive is probably seeking
3563 * or something. Snooze for a couple msecs, then
3564 * chk-status again. If still busy, fall back to
3565 * HSM_ST_POLL state.
3567 status = ata_busy_wait(ap, ATA_BUSY, 5);
3568 if (status & ATA_BUSY) {
3570 status = ata_busy_wait(ap, ATA_BUSY, 10);
3571 if (status & ATA_BUSY) {
3572 ap->hsm_task_state = HSM_ST_POLL;
3573 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
3578 qc = ata_qc_from_tag(ap, ap->active_tag);
3579 WARN_ON(qc == NULL);
3582 if (status & (ATA_ERR | ATA_DF)) {
3583 qc->err_mask |= AC_ERR_DEV;
3584 ap->hsm_task_state = HSM_ST_ERR;
3588 /* transfer data if any */
3589 if (is_atapi_taskfile(&qc->tf)) {
3590 /* DRQ=0 means no more data to transfer */
3591 if ((status & ATA_DRQ) == 0) {
3592 ap->hsm_task_state = HSM_ST_LAST;
3596 atapi_pio_bytes(qc);
3598 /* handle BSY=0, DRQ=0 as error */
3599 if ((status & ATA_DRQ) == 0) {
3600 qc->err_mask |= AC_ERR_HSM;
3601 ap->hsm_task_state = HSM_ST_ERR;
3609 static void ata_pio_error(struct ata_port *ap)
3611 struct ata_queued_cmd *qc;
3613 qc = ata_qc_from_tag(ap, ap->active_tag);
3614 WARN_ON(qc == NULL);
3616 if (qc->tf.command != ATA_CMD_PACKET)
3617 printk(KERN_WARNING "ata%u: PIO error\n", ap->id);
3619 /* make sure qc->err_mask is available to
3620 * know what's wrong and recover
3622 WARN_ON(qc->err_mask == 0);
3624 ap->hsm_task_state = HSM_ST_IDLE;
3626 ata_poll_qc_complete(qc);
3629 static void ata_pio_task(void *_data)
3631 struct ata_port *ap = _data;
3632 unsigned long timeout;
3639 switch (ap->hsm_task_state) {
3648 qc_completed = ata_pio_complete(ap);
3652 case HSM_ST_LAST_POLL:
3653 timeout = ata_pio_poll(ap);
3663 ata_port_queue_task(ap, ata_pio_task, ap, timeout);
3664 else if (!qc_completed)
3669 * atapi_packet_task - Write CDB bytes to hardware
3670 * @_data: Port to which ATAPI device is attached.
3672 * When device has indicated its readiness to accept
3673 * a CDB, this function is called. Send the CDB.
3674 * If DMA is to be performed, exit immediately.
3675 * Otherwise, we are in polling mode, so poll
3676 * status under operation succeeds or fails.
3679 * Kernel thread context (may sleep)
3682 static void atapi_packet_task(void *_data)
3684 struct ata_port *ap = _data;
3685 struct ata_queued_cmd *qc;
3688 qc = ata_qc_from_tag(ap, ap->active_tag);
3689 WARN_ON(qc == NULL);
3690 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
3692 /* sleep-wait for BSY to clear */
3693 DPRINTK("busy wait\n");
3694 if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB)) {
3695 qc->err_mask |= AC_ERR_TIMEOUT;
3699 /* make sure DRQ is set */
3700 status = ata_chk_status(ap);
3701 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) {
3702 qc->err_mask |= AC_ERR_HSM;
3707 DPRINTK("send cdb\n");
3708 WARN_ON(qc->dev->cdb_len < 12);
3710 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA ||
3711 qc->tf.protocol == ATA_PROT_ATAPI_NODATA) {
3712 unsigned long flags;
3714 /* Once we're done issuing command and kicking bmdma,
3715 * irq handler takes over. To not lose irq, we need
3716 * to clear NOINTR flag before sending cdb, but
3717 * interrupt handler shouldn't be invoked before we're
3718 * finished. Hence, the following locking.
3720 spin_lock_irqsave(&ap->host_set->lock, flags);
3721 ap->flags &= ~ATA_FLAG_NOINTR;
3722 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
3723 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA)
3724 ap->ops->bmdma_start(qc); /* initiate bmdma */
3725 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3727 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
3729 /* PIO commands are handled by polling */
3730 ap->hsm_task_state = HSM_ST;
3731 ata_port_queue_task(ap, ata_pio_task, ap, 0);
3737 ata_poll_qc_complete(qc);
3741 * ata_qc_timeout - Handle timeout of queued command
3742 * @qc: Command that timed out
3744 * Some part of the kernel (currently, only the SCSI layer)
3745 * has noticed that the active command on port @ap has not
3746 * completed after a specified length of time. Handle this
3747 * condition by disabling DMA (if necessary) and completing
3748 * transactions, with error if necessary.
3750 * This also handles the case of the "lost interrupt", where
3751 * for some reason (possibly hardware bug, possibly driver bug)
3752 * an interrupt was not delivered to the driver, even though the
3753 * transaction completed successfully.
3756 * Inherited from SCSI layer (none, can sleep)
3759 static void ata_qc_timeout(struct ata_queued_cmd *qc)
3761 struct ata_port *ap = qc->ap;
3762 struct ata_host_set *host_set = ap->host_set;
3763 u8 host_stat = 0, drv_stat;
3764 unsigned long flags;
3768 ap->hsm_task_state = HSM_ST_IDLE;
3770 spin_lock_irqsave(&host_set->lock, flags);
3772 switch (qc->tf.protocol) {
3775 case ATA_PROT_ATAPI_DMA:
3776 host_stat = ap->ops->bmdma_status(ap);
3778 /* before we do anything else, clear DMA-Start bit */
3779 ap->ops->bmdma_stop(qc);
3785 drv_stat = ata_chk_status(ap);
3787 /* ack bmdma irq events */
3788 ap->ops->irq_clear(ap);
3790 printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n",
3791 ap->id, qc->tf.command, drv_stat, host_stat);
3793 /* complete taskfile transaction */
3794 qc->err_mask |= ac_err_mask(drv_stat);
3798 spin_unlock_irqrestore(&host_set->lock, flags);
3800 ata_eh_qc_complete(qc);
3806 * ata_eng_timeout - Handle timeout of queued command
3807 * @ap: Port on which timed-out command is active
3809 * Some part of the kernel (currently, only the SCSI layer)
3810 * has noticed that the active command on port @ap has not
3811 * completed after a specified length of time. Handle this
3812 * condition by disabling DMA (if necessary) and completing
3813 * transactions, with error if necessary.
3815 * This also handles the case of the "lost interrupt", where
3816 * for some reason (possibly hardware bug, possibly driver bug)
3817 * an interrupt was not delivered to the driver, even though the
3818 * transaction completed successfully.
3821 * Inherited from SCSI layer (none, can sleep)
3824 void ata_eng_timeout(struct ata_port *ap)
3828 ata_qc_timeout(ata_qc_from_tag(ap, ap->active_tag));
3834 * ata_qc_new - Request an available ATA command, for queueing
3835 * @ap: Port associated with device @dev
3836 * @dev: Device from whom we request an available command structure
3842 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
3844 struct ata_queued_cmd *qc = NULL;
3847 for (i = 0; i < ATA_MAX_QUEUE; i++)
3848 if (!test_and_set_bit(i, &ap->qactive)) {
3849 qc = ata_qc_from_tag(ap, i);
3860 * ata_qc_new_init - Request an available ATA command, and initialize it
3861 * @ap: Port associated with device @dev
3862 * @dev: Device from whom we request an available command structure
3868 struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
3869 struct ata_device *dev)
3871 struct ata_queued_cmd *qc;
3873 qc = ata_qc_new(ap);
3886 * ata_qc_free - free unused ata_queued_cmd
3887 * @qc: Command to complete
3889 * Designed to free unused ata_queued_cmd object
3890 * in case something prevents using it.
3893 * spin_lock_irqsave(host_set lock)
3895 void ata_qc_free(struct ata_queued_cmd *qc)
3897 struct ata_port *ap = qc->ap;
3900 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
3904 if (likely(ata_tag_valid(tag))) {
3905 if (tag == ap->active_tag)
3906 ap->active_tag = ATA_TAG_POISON;
3907 qc->tag = ATA_TAG_POISON;
3908 clear_bit(tag, &ap->qactive);
3912 void __ata_qc_complete(struct ata_queued_cmd *qc)
3914 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
3915 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
3917 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
3920 /* atapi: mark qc as inactive to prevent the interrupt handler
3921 * from completing the command twice later, before the error handler
3922 * is called. (when rc != 0 and atapi request sense is needed)
3924 qc->flags &= ~ATA_QCFLAG_ACTIVE;
3926 /* call completion callback */
3927 qc->complete_fn(qc);
3930 static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
3932 struct ata_port *ap = qc->ap;
3934 switch (qc->tf.protocol) {
3936 case ATA_PROT_ATAPI_DMA:
3939 case ATA_PROT_ATAPI:
3941 if (ap->flags & ATA_FLAG_PIO_DMA)
3954 * ata_qc_issue - issue taskfile to device
3955 * @qc: command to issue to device
3957 * Prepare an ATA command to submission to device.
3958 * This includes mapping the data into a DMA-able
3959 * area, filling in the S/G table, and finally
3960 * writing the taskfile to hardware, starting the command.
3963 * spin_lock_irqsave(host_set lock)
3966 * Zero on success, AC_ERR_* mask on failure
3969 unsigned int ata_qc_issue(struct ata_queued_cmd *qc)
3971 struct ata_port *ap = qc->ap;
3973 if (ata_should_dma_map(qc)) {
3974 if (qc->flags & ATA_QCFLAG_SG) {
3975 if (ata_sg_setup(qc))
3977 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
3978 if (ata_sg_setup_one(qc))
3982 qc->flags &= ~ATA_QCFLAG_DMAMAP;
3985 ap->ops->qc_prep(qc);
3987 qc->ap->active_tag = qc->tag;
3988 qc->flags |= ATA_QCFLAG_ACTIVE;
3990 return ap->ops->qc_issue(qc);
3993 qc->flags &= ~ATA_QCFLAG_DMAMAP;
3994 return AC_ERR_SYSTEM;
3999 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
4000 * @qc: command to issue to device
4002 * Using various libata functions and hooks, this function
4003 * starts an ATA command. ATA commands are grouped into
4004 * classes called "protocols", and issuing each type of protocol
4005 * is slightly different.
4007 * May be used as the qc_issue() entry in ata_port_operations.
4010 * spin_lock_irqsave(host_set lock)
4013 * Zero on success, AC_ERR_* mask on failure
4016 unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
4018 struct ata_port *ap = qc->ap;
4020 ata_dev_select(ap, qc->dev->devno, 1, 0);
4022 switch (qc->tf.protocol) {
4023 case ATA_PROT_NODATA:
4024 ata_tf_to_host(ap, &qc->tf);
4028 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4029 ap->ops->bmdma_setup(qc); /* set up bmdma */
4030 ap->ops->bmdma_start(qc); /* initiate bmdma */
4033 case ATA_PROT_PIO: /* load tf registers, initiate polling pio */
4034 ata_qc_set_polling(qc);
4035 ata_tf_to_host(ap, &qc->tf);
4036 ap->hsm_task_state = HSM_ST;
4037 ata_port_queue_task(ap, ata_pio_task, ap, 0);
4040 case ATA_PROT_ATAPI:
4041 ata_qc_set_polling(qc);
4042 ata_tf_to_host(ap, &qc->tf);
4043 ata_port_queue_task(ap, atapi_packet_task, ap, 0);
4046 case ATA_PROT_ATAPI_NODATA:
4047 ap->flags |= ATA_FLAG_NOINTR;
4048 ata_tf_to_host(ap, &qc->tf);
4049 ata_port_queue_task(ap, atapi_packet_task, ap, 0);
4052 case ATA_PROT_ATAPI_DMA:
4053 ap->flags |= ATA_FLAG_NOINTR;
4054 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4055 ap->ops->bmdma_setup(qc); /* set up bmdma */
4056 ata_port_queue_task(ap, atapi_packet_task, ap, 0);
4061 return AC_ERR_SYSTEM;
4068 * ata_bmdma_setup_mmio - Set up PCI IDE BMDMA transaction
4069 * @qc: Info associated with this ATA transaction.
4072 * spin_lock_irqsave(host_set lock)
4075 static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc)
4077 struct ata_port *ap = qc->ap;
4078 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
4080 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
4082 /* load PRD table addr. */
4083 mb(); /* make sure PRD table writes are visible to controller */
4084 writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS);
4086 /* specify data direction, triple-check start bit is clear */
4087 dmactl = readb(mmio + ATA_DMA_CMD);
4088 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
4090 dmactl |= ATA_DMA_WR;
4091 writeb(dmactl, mmio + ATA_DMA_CMD);
4093 /* issue r/w command */
4094 ap->ops->exec_command(ap, &qc->tf);
4098 * ata_bmdma_start_mmio - Start a PCI IDE BMDMA transaction
4099 * @qc: Info associated with this ATA transaction.
4102 * spin_lock_irqsave(host_set lock)
4105 static void ata_bmdma_start_mmio (struct ata_queued_cmd *qc)
4107 struct ata_port *ap = qc->ap;
4108 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
4111 /* start host DMA transaction */
4112 dmactl = readb(mmio + ATA_DMA_CMD);
4113 writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD);
4115 /* Strictly, one may wish to issue a readb() here, to
4116 * flush the mmio write. However, control also passes
4117 * to the hardware at this point, and it will interrupt
4118 * us when we are to resume control. So, in effect,
4119 * we don't care when the mmio write flushes.
4120 * Further, a read of the DMA status register _immediately_
4121 * following the write may not be what certain flaky hardware
4122 * is expected, so I think it is best to not add a readb()
4123 * without first all the MMIO ATA cards/mobos.
4124 * Or maybe I'm just being paranoid.
4129 * ata_bmdma_setup_pio - Set up PCI IDE BMDMA transaction (PIO)
4130 * @qc: Info associated with this ATA transaction.
4133 * spin_lock_irqsave(host_set lock)
4136 static void ata_bmdma_setup_pio (struct ata_queued_cmd *qc)
4138 struct ata_port *ap = qc->ap;
4139 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
4142 /* load PRD table addr. */
4143 outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
4145 /* specify data direction, triple-check start bit is clear */
4146 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
4147 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
4149 dmactl |= ATA_DMA_WR;
4150 outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
4152 /* issue r/w command */
4153 ap->ops->exec_command(ap, &qc->tf);
4157 * ata_bmdma_start_pio - Start a PCI IDE BMDMA transaction (PIO)
4158 * @qc: Info associated with this ATA transaction.
4161 * spin_lock_irqsave(host_set lock)
4164 static void ata_bmdma_start_pio (struct ata_queued_cmd *qc)
4166 struct ata_port *ap = qc->ap;
4169 /* start host DMA transaction */
4170 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
4171 outb(dmactl | ATA_DMA_START,
4172 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
4177 * ata_bmdma_start - Start a PCI IDE BMDMA transaction
4178 * @qc: Info associated with this ATA transaction.
4180 * Writes the ATA_DMA_START flag to the DMA command register.
4182 * May be used as the bmdma_start() entry in ata_port_operations.
4185 * spin_lock_irqsave(host_set lock)
4187 void ata_bmdma_start(struct ata_queued_cmd *qc)
4189 if (qc->ap->flags & ATA_FLAG_MMIO)
4190 ata_bmdma_start_mmio(qc);
4192 ata_bmdma_start_pio(qc);
4197 * ata_bmdma_setup - Set up PCI IDE BMDMA transaction
4198 * @qc: Info associated with this ATA transaction.
4200 * Writes address of PRD table to device's PRD Table Address
4201 * register, sets the DMA control register, and calls
4202 * ops->exec_command() to start the transfer.
4204 * May be used as the bmdma_setup() entry in ata_port_operations.
4207 * spin_lock_irqsave(host_set lock)
4209 void ata_bmdma_setup(struct ata_queued_cmd *qc)
4211 if (qc->ap->flags & ATA_FLAG_MMIO)
4212 ata_bmdma_setup_mmio(qc);
4214 ata_bmdma_setup_pio(qc);
4219 * ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt.
4220 * @ap: Port associated with this ATA transaction.
4222 * Clear interrupt and error flags in DMA status register.
4224 * May be used as the irq_clear() entry in ata_port_operations.
4227 * spin_lock_irqsave(host_set lock)
4230 void ata_bmdma_irq_clear(struct ata_port *ap)
4232 if (!ap->ioaddr.bmdma_addr)
4235 if (ap->flags & ATA_FLAG_MMIO) {
4236 void __iomem *mmio =
4237 ((void __iomem *) ap->ioaddr.bmdma_addr) + ATA_DMA_STATUS;
4238 writeb(readb(mmio), mmio);
4240 unsigned long addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
4241 outb(inb(addr), addr);
4247 * ata_bmdma_status - Read PCI IDE BMDMA status
4248 * @ap: Port associated with this ATA transaction.
4250 * Read and return BMDMA status register.
4252 * May be used as the bmdma_status() entry in ata_port_operations.
4255 * spin_lock_irqsave(host_set lock)
4258 u8 ata_bmdma_status(struct ata_port *ap)
4261 if (ap->flags & ATA_FLAG_MMIO) {
4262 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
4263 host_stat = readb(mmio + ATA_DMA_STATUS);
4265 host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
4271 * ata_bmdma_stop - Stop PCI IDE BMDMA transfer
4272 * @qc: Command we are ending DMA for
4274 * Clears the ATA_DMA_START flag in the dma control register
4276 * May be used as the bmdma_stop() entry in ata_port_operations.
4279 * spin_lock_irqsave(host_set lock)
4282 void ata_bmdma_stop(struct ata_queued_cmd *qc)
4284 struct ata_port *ap = qc->ap;
4285 if (ap->flags & ATA_FLAG_MMIO) {
4286 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
4288 /* clear start/stop bit */
4289 writeb(readb(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
4290 mmio + ATA_DMA_CMD);
4292 /* clear start/stop bit */
4293 outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
4294 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
4297 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
4298 ata_altstatus(ap); /* dummy read */
4302 * ata_host_intr - Handle host interrupt for given (port, task)
4303 * @ap: Port on which interrupt arrived (possibly...)
4304 * @qc: Taskfile currently active in engine
4306 * Handle host interrupt for given queued command. Currently,
4307 * only DMA interrupts are handled. All other commands are
4308 * handled via polling with interrupts disabled (nIEN bit).
4311 * spin_lock_irqsave(host_set lock)
4314 * One if interrupt was handled, zero if not (shared irq).
4317 inline unsigned int ata_host_intr (struct ata_port *ap,
4318 struct ata_queued_cmd *qc)
4320 u8 status, host_stat;
4322 switch (qc->tf.protocol) {
4325 case ATA_PROT_ATAPI_DMA:
4326 case ATA_PROT_ATAPI:
4327 /* check status of DMA engine */
4328 host_stat = ap->ops->bmdma_status(ap);
4329 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
4331 /* if it's not our irq... */
4332 if (!(host_stat & ATA_DMA_INTR))
4335 /* before we do anything else, clear DMA-Start bit */
4336 ap->ops->bmdma_stop(qc);
4340 case ATA_PROT_ATAPI_NODATA:
4341 case ATA_PROT_NODATA:
4342 /* check altstatus */
4343 status = ata_altstatus(ap);
4344 if (status & ATA_BUSY)
4347 /* check main status, clearing INTRQ */
4348 status = ata_chk_status(ap);
4349 if (unlikely(status & ATA_BUSY))
4351 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
4352 ap->id, qc->tf.protocol, status);
4354 /* ack bmdma irq events */
4355 ap->ops->irq_clear(ap);
4357 /* complete taskfile transaction */
4358 qc->err_mask |= ac_err_mask(status);
4359 ata_qc_complete(qc);
4366 return 1; /* irq handled */
4369 ap->stats.idle_irq++;
4372 if ((ap->stats.idle_irq % 1000) == 0) {
4373 ata_irq_ack(ap, 0); /* debug trap */
4374 printk(KERN_WARNING "ata%d: irq trap\n", ap->id);
4378 return 0; /* irq not handled */
4382 * ata_interrupt - Default ATA host interrupt handler
4383 * @irq: irq line (unused)
4384 * @dev_instance: pointer to our ata_host_set information structure
4387 * Default interrupt handler for PCI IDE devices. Calls
4388 * ata_host_intr() for each port that is not disabled.
4391 * Obtains host_set lock during operation.
4394 * IRQ_NONE or IRQ_HANDLED.
4397 irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
4399 struct ata_host_set *host_set = dev_instance;
4401 unsigned int handled = 0;
4402 unsigned long flags;
4404 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
4405 spin_lock_irqsave(&host_set->lock, flags);
4407 for (i = 0; i < host_set->n_ports; i++) {
4408 struct ata_port *ap;
4410 ap = host_set->ports[i];
4412 !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) {
4413 struct ata_queued_cmd *qc;
4415 qc = ata_qc_from_tag(ap, ap->active_tag);
4416 if (qc && (!(qc->tf.ctl & ATA_NIEN)) &&
4417 (qc->flags & ATA_QCFLAG_ACTIVE))
4418 handled |= ata_host_intr(ap, qc);
4422 spin_unlock_irqrestore(&host_set->lock, flags);
4424 return IRQ_RETVAL(handled);
4429 * Execute a 'simple' command, that only consists of the opcode 'cmd' itself,
4430 * without filling any other registers
4432 static int ata_do_simple_cmd(struct ata_port *ap, struct ata_device *dev,
4435 struct ata_taskfile tf;
4438 ata_tf_init(ap, &tf, dev->devno);
4441 tf.flags |= ATA_TFLAG_DEVICE;
4442 tf.protocol = ATA_PROT_NODATA;
4444 err = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
4446 printk(KERN_ERR "%s: ata command failed: %d\n",
4452 static int ata_flush_cache(struct ata_port *ap, struct ata_device *dev)
4456 if (!ata_try_flush_cache(dev))
4459 if (ata_id_has_flush_ext(dev->id))
4460 cmd = ATA_CMD_FLUSH_EXT;
4462 cmd = ATA_CMD_FLUSH;
4464 return ata_do_simple_cmd(ap, dev, cmd);
4467 static int ata_standby_drive(struct ata_port *ap, struct ata_device *dev)
4469 return ata_do_simple_cmd(ap, dev, ATA_CMD_STANDBYNOW1);
4472 static int ata_start_drive(struct ata_port *ap, struct ata_device *dev)
4474 return ata_do_simple_cmd(ap, dev, ATA_CMD_IDLEIMMEDIATE);
4478 * ata_device_resume - wakeup a previously suspended devices
4479 * @ap: port the device is connected to
4480 * @dev: the device to resume
4482 * Kick the drive back into action, by sending it an idle immediate
4483 * command and making sure its transfer mode matches between drive
4487 int ata_device_resume(struct ata_port *ap, struct ata_device *dev)
4489 if (ap->flags & ATA_FLAG_SUSPENDED) {
4490 ap->flags &= ~ATA_FLAG_SUSPENDED;
4493 if (!ata_dev_present(dev))
4495 if (dev->class == ATA_DEV_ATA)
4496 ata_start_drive(ap, dev);
4502 * ata_device_suspend - prepare a device for suspend
4503 * @ap: port the device is connected to
4504 * @dev: the device to suspend
4506 * Flush the cache on the drive, if appropriate, then issue a
4507 * standbynow command.
4509 int ata_device_suspend(struct ata_port *ap, struct ata_device *dev)
4511 if (!ata_dev_present(dev))
4513 if (dev->class == ATA_DEV_ATA)
4514 ata_flush_cache(ap, dev);
4516 ata_standby_drive(ap, dev);
4517 ap->flags |= ATA_FLAG_SUSPENDED;
4522 * ata_port_start - Set port up for dma.
4523 * @ap: Port to initialize
4525 * Called just after data structures for each port are
4526 * initialized. Allocates space for PRD table.
4528 * May be used as the port_start() entry in ata_port_operations.
4531 * Inherited from caller.
4534 int ata_port_start (struct ata_port *ap)
4536 struct device *dev = ap->host_set->dev;
4539 ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
4543 rc = ata_pad_alloc(ap, dev);
4545 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
4549 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
4556 * ata_port_stop - Undo ata_port_start()
4557 * @ap: Port to shut down
4559 * Frees the PRD table.
4561 * May be used as the port_stop() entry in ata_port_operations.
4564 * Inherited from caller.
4567 void ata_port_stop (struct ata_port *ap)
4569 struct device *dev = ap->host_set->dev;
4571 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
4572 ata_pad_free(ap, dev);
4575 void ata_host_stop (struct ata_host_set *host_set)
4577 if (host_set->mmio_base)
4578 iounmap(host_set->mmio_base);
4583 * ata_host_remove - Unregister SCSI host structure with upper layers
4584 * @ap: Port to unregister
4585 * @do_unregister: 1 if we fully unregister, 0 to just stop the port
4588 * Inherited from caller.
4591 static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
4593 struct Scsi_Host *sh = ap->host;
4598 scsi_remove_host(sh);
4600 ap->ops->port_stop(ap);
4604 * ata_host_init - Initialize an ata_port structure
4605 * @ap: Structure to initialize
4606 * @host: associated SCSI mid-layer structure
4607 * @host_set: Collection of hosts to which @ap belongs
4608 * @ent: Probe information provided by low-level driver
4609 * @port_no: Port number associated with this ata_port
4611 * Initialize a new ata_port structure, and its associated
4615 * Inherited from caller.
4618 static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
4619 struct ata_host_set *host_set,
4620 const struct ata_probe_ent *ent, unsigned int port_no)
4626 host->max_channel = 1;
4627 host->unique_id = ata_unique_id++;
4628 host->max_cmd_len = 12;
4630 ap->flags = ATA_FLAG_PORT_DISABLED;
4631 ap->id = host->unique_id;
4633 ap->ctl = ATA_DEVCTL_OBS;
4634 ap->host_set = host_set;
4635 ap->port_no = port_no;
4637 ent->legacy_mode ? ent->hard_port_no : port_no;
4638 ap->pio_mask = ent->pio_mask;
4639 ap->mwdma_mask = ent->mwdma_mask;
4640 ap->udma_mask = ent->udma_mask;
4641 ap->flags |= ent->host_flags;
4642 ap->ops = ent->port_ops;
4643 ap->cbl = ATA_CBL_NONE;
4644 ap->active_tag = ATA_TAG_POISON;
4645 ap->last_ctl = 0xFF;
4647 INIT_WORK(&ap->port_task, NULL, NULL);
4648 INIT_LIST_HEAD(&ap->eh_done_q);
4650 for (i = 0; i < ATA_MAX_DEVICES; i++)
4651 ap->device[i].devno = i;
4654 ap->stats.unhandled_irq = 1;
4655 ap->stats.idle_irq = 1;
4658 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
4662 * ata_host_add - Attach low-level ATA driver to system
4663 * @ent: Information provided by low-level driver
4664 * @host_set: Collections of ports to which we add
4665 * @port_no: Port number associated with this host
4667 * Attach low-level ATA driver to system.
4670 * PCI/etc. bus probe sem.
4673 * New ata_port on success, for NULL on error.
4676 static struct ata_port * ata_host_add(const struct ata_probe_ent *ent,
4677 struct ata_host_set *host_set,
4678 unsigned int port_no)
4680 struct Scsi_Host *host;
4681 struct ata_port *ap;
4685 host = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
4689 host->transportt = &ata_scsi_transport_template;
4691 ap = (struct ata_port *) &host->hostdata[0];
4693 ata_host_init(ap, host, host_set, ent, port_no);
4695 rc = ap->ops->port_start(ap);
4702 scsi_host_put(host);
4707 * ata_device_add - Register hardware device with ATA and SCSI layers
4708 * @ent: Probe information describing hardware device to be registered
4710 * This function processes the information provided in the probe
4711 * information struct @ent, allocates the necessary ATA and SCSI
4712 * host information structures, initializes them, and registers
4713 * everything with requisite kernel subsystems.
4715 * This function requests irqs, probes the ATA bus, and probes
4719 * PCI/etc. bus probe sem.
4722 * Number of ports registered. Zero on error (no ports registered).
4725 int ata_device_add(const struct ata_probe_ent *ent)
4727 unsigned int count = 0, i;
4728 struct device *dev = ent->dev;
4729 struct ata_host_set *host_set;
4732 /* alloc a container for our list of ATA ports (buses) */
4733 host_set = kzalloc(sizeof(struct ata_host_set) +
4734 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
4737 spin_lock_init(&host_set->lock);
4739 host_set->dev = dev;
4740 host_set->n_ports = ent->n_ports;
4741 host_set->irq = ent->irq;
4742 host_set->mmio_base = ent->mmio_base;
4743 host_set->private_data = ent->private_data;
4744 host_set->ops = ent->port_ops;
4746 /* register each port bound to this device */
4747 for (i = 0; i < ent->n_ports; i++) {
4748 struct ata_port *ap;
4749 unsigned long xfer_mode_mask;
4751 ap = ata_host_add(ent, host_set, i);
4755 host_set->ports[i] = ap;
4756 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
4757 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
4758 (ap->pio_mask << ATA_SHIFT_PIO);
4760 /* print per-port info to dmesg */
4761 printk(KERN_INFO "ata%u: %cATA max %s cmd 0x%lX ctl 0x%lX "
4762 "bmdma 0x%lX irq %lu\n",
4764 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
4765 ata_mode_string(xfer_mode_mask),
4766 ap->ioaddr.cmd_addr,
4767 ap->ioaddr.ctl_addr,
4768 ap->ioaddr.bmdma_addr,
4772 host_set->ops->irq_clear(ap);
4779 /* obtain irq, that is shared between channels */
4780 if (request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
4781 DRV_NAME, host_set))
4784 /* perform each probe synchronously */
4785 DPRINTK("probe begin\n");
4786 for (i = 0; i < count; i++) {
4787 struct ata_port *ap;
4790 ap = host_set->ports[i];
4792 DPRINTK("ata%u: bus probe begin\n", ap->id);
4793 rc = ata_bus_probe(ap);
4794 DPRINTK("ata%u: bus probe end\n", ap->id);
4797 /* FIXME: do something useful here?
4798 * Current libata behavior will
4799 * tear down everything when
4800 * the module is removed
4801 * or the h/w is unplugged.
4805 rc = scsi_add_host(ap->host, dev);
4807 printk(KERN_ERR "ata%u: scsi_add_host failed\n",
4809 /* FIXME: do something useful here */
4810 /* FIXME: handle unconditional calls to
4811 * scsi_scan_host and ata_host_remove, below,
4817 /* probes are done, now scan each port's disk(s) */
4818 DPRINTK("host probe begin\n");
4819 for (i = 0; i < count; i++) {
4820 struct ata_port *ap = host_set->ports[i];
4822 ata_scsi_scan_host(ap);
4825 dev_set_drvdata(dev, host_set);
4827 VPRINTK("EXIT, returning %u\n", ent->n_ports);
4828 return ent->n_ports; /* success */
4831 for (i = 0; i < count; i++) {
4832 ata_host_remove(host_set->ports[i], 1);
4833 scsi_host_put(host_set->ports[i]->host);
4837 VPRINTK("EXIT, returning 0\n");
4842 * ata_host_set_remove - PCI layer callback for device removal
4843 * @host_set: ATA host set that was removed
4845 * Unregister all objects associated with this host set. Free those
4849 * Inherited from calling layer (may sleep).
4852 void ata_host_set_remove(struct ata_host_set *host_set)
4854 struct ata_port *ap;
4857 for (i = 0; i < host_set->n_ports; i++) {
4858 ap = host_set->ports[i];
4859 scsi_remove_host(ap->host);
4862 free_irq(host_set->irq, host_set);
4864 for (i = 0; i < host_set->n_ports; i++) {
4865 ap = host_set->ports[i];
4867 ata_scsi_release(ap->host);
4869 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
4870 struct ata_ioports *ioaddr = &ap->ioaddr;
4872 if (ioaddr->cmd_addr == 0x1f0)
4873 release_region(0x1f0, 8);
4874 else if (ioaddr->cmd_addr == 0x170)
4875 release_region(0x170, 8);
4878 scsi_host_put(ap->host);
4881 if (host_set->ops->host_stop)
4882 host_set->ops->host_stop(host_set);
4888 * ata_scsi_release - SCSI layer callback hook for host unload
4889 * @host: libata host to be unloaded
4891 * Performs all duties necessary to shut down a libata port...
4892 * Kill port kthread, disable port, and release resources.
4895 * Inherited from SCSI layer.
4901 int ata_scsi_release(struct Scsi_Host *host)
4903 struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
4908 ap->ops->port_disable(ap);
4909 ata_host_remove(ap, 0);
4910 for (i = 0; i < ATA_MAX_DEVICES; i++)
4911 kfree(ap->device[i].id);
4918 * ata_std_ports - initialize ioaddr with standard port offsets.
4919 * @ioaddr: IO address structure to be initialized
4921 * Utility function which initializes data_addr, error_addr,
4922 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
4923 * device_addr, status_addr, and command_addr to standard offsets
4924 * relative to cmd_addr.
4926 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
4929 void ata_std_ports(struct ata_ioports *ioaddr)
4931 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
4932 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
4933 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
4934 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
4935 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
4936 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
4937 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
4938 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
4939 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
4940 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
4946 void ata_pci_host_stop (struct ata_host_set *host_set)
4948 struct pci_dev *pdev = to_pci_dev(host_set->dev);
4950 pci_iounmap(pdev, host_set->mmio_base);
4954 * ata_pci_remove_one - PCI layer callback for device removal
4955 * @pdev: PCI device that was removed
4957 * PCI layer indicates to libata via this hook that
4958 * hot-unplug or module unload event has occurred.
4959 * Handle this by unregistering all objects associated
4960 * with this PCI device. Free those objects. Then finally
4961 * release PCI resources and disable device.
4964 * Inherited from PCI layer (may sleep).
4967 void ata_pci_remove_one (struct pci_dev *pdev)
4969 struct device *dev = pci_dev_to_dev(pdev);
4970 struct ata_host_set *host_set = dev_get_drvdata(dev);
4972 ata_host_set_remove(host_set);
4973 pci_release_regions(pdev);
4974 pci_disable_device(pdev);
4975 dev_set_drvdata(dev, NULL);
4978 /* move to PCI subsystem */
4979 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
4981 unsigned long tmp = 0;
4983 switch (bits->width) {
4986 pci_read_config_byte(pdev, bits->reg, &tmp8);
4992 pci_read_config_word(pdev, bits->reg, &tmp16);
4998 pci_read_config_dword(pdev, bits->reg, &tmp32);
5009 return (tmp == bits->val) ? 1 : 0;
5012 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state)
5014 pci_save_state(pdev);
5015 pci_disable_device(pdev);
5016 pci_set_power_state(pdev, PCI_D3hot);
5020 int ata_pci_device_resume(struct pci_dev *pdev)
5022 pci_set_power_state(pdev, PCI_D0);
5023 pci_restore_state(pdev);
5024 pci_enable_device(pdev);
5025 pci_set_master(pdev);
5028 #endif /* CONFIG_PCI */
5031 static int __init ata_init(void)
5033 ata_wq = create_workqueue("ata");
5037 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
5041 static void __exit ata_exit(void)
5043 destroy_workqueue(ata_wq);
5046 module_init(ata_init);
5047 module_exit(ata_exit);
5049 static unsigned long ratelimit_time;
5050 static spinlock_t ata_ratelimit_lock = SPIN_LOCK_UNLOCKED;
5052 int ata_ratelimit(void)
5055 unsigned long flags;
5057 spin_lock_irqsave(&ata_ratelimit_lock, flags);
5059 if (time_after(jiffies, ratelimit_time)) {
5061 ratelimit_time = jiffies + (HZ/5);
5065 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
5071 * libata is essentially a library of internal helper functions for
5072 * low-level ATA host controller drivers. As such, the API/ABI is
5073 * likely to change as new drivers are added and updated.
5074 * Do not depend on ABI/API stability.
5077 EXPORT_SYMBOL_GPL(ata_std_bios_param);
5078 EXPORT_SYMBOL_GPL(ata_std_ports);
5079 EXPORT_SYMBOL_GPL(ata_device_add);
5080 EXPORT_SYMBOL_GPL(ata_host_set_remove);
5081 EXPORT_SYMBOL_GPL(ata_sg_init);
5082 EXPORT_SYMBOL_GPL(ata_sg_init_one);
5083 EXPORT_SYMBOL_GPL(__ata_qc_complete);
5084 EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
5085 EXPORT_SYMBOL_GPL(ata_eng_timeout);
5086 EXPORT_SYMBOL_GPL(ata_tf_load);
5087 EXPORT_SYMBOL_GPL(ata_tf_read);
5088 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
5089 EXPORT_SYMBOL_GPL(ata_std_dev_select);
5090 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
5091 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
5092 EXPORT_SYMBOL_GPL(ata_check_status);
5093 EXPORT_SYMBOL_GPL(ata_altstatus);
5094 EXPORT_SYMBOL_GPL(ata_exec_command);
5095 EXPORT_SYMBOL_GPL(ata_port_start);
5096 EXPORT_SYMBOL_GPL(ata_port_stop);
5097 EXPORT_SYMBOL_GPL(ata_host_stop);
5098 EXPORT_SYMBOL_GPL(ata_interrupt);
5099 EXPORT_SYMBOL_GPL(ata_qc_prep);
5100 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
5101 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
5102 EXPORT_SYMBOL_GPL(ata_bmdma_start);
5103 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
5104 EXPORT_SYMBOL_GPL(ata_bmdma_status);
5105 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
5106 EXPORT_SYMBOL_GPL(ata_port_probe);
5107 EXPORT_SYMBOL_GPL(sata_phy_reset);
5108 EXPORT_SYMBOL_GPL(__sata_phy_reset);
5109 EXPORT_SYMBOL_GPL(ata_bus_reset);
5110 EXPORT_SYMBOL_GPL(ata_std_probeinit);
5111 EXPORT_SYMBOL_GPL(ata_std_softreset);
5112 EXPORT_SYMBOL_GPL(sata_std_hardreset);
5113 EXPORT_SYMBOL_GPL(ata_std_postreset);
5114 EXPORT_SYMBOL_GPL(ata_std_probe_reset);
5115 EXPORT_SYMBOL_GPL(ata_drive_probe_reset);
5116 EXPORT_SYMBOL_GPL(ata_dev_revalidate);
5117 EXPORT_SYMBOL_GPL(ata_port_disable);
5118 EXPORT_SYMBOL_GPL(ata_ratelimit);
5119 EXPORT_SYMBOL_GPL(ata_busy_sleep);
5120 EXPORT_SYMBOL_GPL(ata_port_queue_task);
5121 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
5122 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
5123 EXPORT_SYMBOL_GPL(ata_scsi_error);
5124 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
5125 EXPORT_SYMBOL_GPL(ata_scsi_release);
5126 EXPORT_SYMBOL_GPL(ata_host_intr);
5127 EXPORT_SYMBOL_GPL(ata_dev_classify);
5128 EXPORT_SYMBOL_GPL(ata_id_string);
5129 EXPORT_SYMBOL_GPL(ata_id_c_string);
5130 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
5131 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
5132 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
5134 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
5135 EXPORT_SYMBOL_GPL(ata_timing_compute);
5136 EXPORT_SYMBOL_GPL(ata_timing_merge);
5139 EXPORT_SYMBOL_GPL(pci_test_config_bits);
5140 EXPORT_SYMBOL_GPL(ata_pci_host_stop);
5141 EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
5142 EXPORT_SYMBOL_GPL(ata_pci_init_one);
5143 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
5144 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
5145 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
5146 EXPORT_SYMBOL_GPL(ata_pci_default_filter);
5147 EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
5148 #endif /* CONFIG_PCI */
5150 EXPORT_SYMBOL_GPL(ata_device_suspend);
5151 EXPORT_SYMBOL_GPL(ata_device_resume);
5152 EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
5153 EXPORT_SYMBOL_GPL(ata_scsi_device_resume);