2 * libata-core.c - helper library for ATA
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
35 #include <linux/config.h>
36 #include <linux/kernel.h>
37 #include <linux/module.h>
38 #include <linux/pci.h>
39 #include <linux/init.h>
40 #include <linux/list.h>
42 #include <linux/highmem.h>
43 #include <linux/spinlock.h>
44 #include <linux/blkdev.h>
45 #include <linux/delay.h>
46 #include <linux/timer.h>
47 #include <linux/interrupt.h>
48 #include <linux/completion.h>
49 #include <linux/suspend.h>
50 #include <linux/workqueue.h>
51 #include <linux/jiffies.h>
52 #include <linux/scatterlist.h>
53 #include <scsi/scsi.h>
54 #include "scsi_priv.h"
55 #include <scsi/scsi_cmnd.h>
56 #include <scsi/scsi_host.h>
57 #include <linux/libata.h>
59 #include <asm/semaphore.h>
60 #include <asm/byteorder.h>
64 static unsigned int ata_dev_init_params(struct ata_port *ap,
65 struct ata_device *dev);
66 static void ata_set_mode(struct ata_port *ap);
67 static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev);
68 static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev);
70 static unsigned int ata_unique_id = 1;
71 static struct workqueue_struct *ata_wq;
73 int atapi_enabled = 1;
74 module_param(atapi_enabled, int, 0444);
75 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
78 module_param_named(fua, libata_fua, int, 0444);
79 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
81 MODULE_AUTHOR("Jeff Garzik");
82 MODULE_DESCRIPTION("Library module for ATA devices");
83 MODULE_LICENSE("GPL");
84 MODULE_VERSION(DRV_VERSION);
88 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
89 * @tf: Taskfile to convert
90 * @fis: Buffer into which data will output
91 * @pmp: Port multiplier port
93 * Converts a standard ATA taskfile to a Serial ATA
94 * FIS structure (Register - Host to Device).
97 * Inherited from caller.
100 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
102 fis[0] = 0x27; /* Register - Host to Device FIS */
103 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
104 bit 7 indicates Command FIS */
105 fis[2] = tf->command;
106 fis[3] = tf->feature;
113 fis[8] = tf->hob_lbal;
114 fis[9] = tf->hob_lbam;
115 fis[10] = tf->hob_lbah;
116 fis[11] = tf->hob_feature;
119 fis[13] = tf->hob_nsect;
130 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
131 * @fis: Buffer from which data will be input
132 * @tf: Taskfile to output
134 * Converts a serial ATA FIS structure to a standard ATA taskfile.
137 * Inherited from caller.
140 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
142 tf->command = fis[2]; /* status */
143 tf->feature = fis[3]; /* error */
150 tf->hob_lbal = fis[8];
151 tf->hob_lbam = fis[9];
152 tf->hob_lbah = fis[10];
155 tf->hob_nsect = fis[13];
158 static const u8 ata_rw_cmds[] = {
162 ATA_CMD_READ_MULTI_EXT,
163 ATA_CMD_WRITE_MULTI_EXT,
167 ATA_CMD_WRITE_MULTI_FUA_EXT,
171 ATA_CMD_PIO_READ_EXT,
172 ATA_CMD_PIO_WRITE_EXT,
185 ATA_CMD_WRITE_FUA_EXT
189 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
190 * @qc: command to examine and configure
192 * Examine the device configuration and tf->flags to calculate
193 * the proper read/write commands and protocol to use.
198 int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
200 struct ata_taskfile *tf = &qc->tf;
201 struct ata_device *dev = qc->dev;
204 int index, fua, lba48, write;
206 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
207 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
208 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
210 if (dev->flags & ATA_DFLAG_PIO) {
211 tf->protocol = ATA_PROT_PIO;
212 index = dev->multi_count ? 0 : 8;
213 } else if (lba48 && (qc->ap->flags & ATA_FLAG_PIO_LBA48)) {
214 /* Unable to use DMA due to host limitation */
215 tf->protocol = ATA_PROT_PIO;
216 index = dev->multi_count ? 0 : 8;
218 tf->protocol = ATA_PROT_DMA;
222 cmd = ata_rw_cmds[index + fua + lba48 + write];
231 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
232 * @pio_mask: pio_mask
233 * @mwdma_mask: mwdma_mask
234 * @udma_mask: udma_mask
236 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
237 * unsigned int xfer_mask.
245 static unsigned int ata_pack_xfermask(unsigned int pio_mask,
246 unsigned int mwdma_mask,
247 unsigned int udma_mask)
249 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
250 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
251 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
255 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
256 * @xfer_mask: xfer_mask to unpack
257 * @pio_mask: resulting pio_mask
258 * @mwdma_mask: resulting mwdma_mask
259 * @udma_mask: resulting udma_mask
261 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
262 * Any NULL distination masks will be ignored.
264 static void ata_unpack_xfermask(unsigned int xfer_mask,
265 unsigned int *pio_mask,
266 unsigned int *mwdma_mask,
267 unsigned int *udma_mask)
270 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
272 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
274 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
277 static const struct ata_xfer_ent {
278 unsigned int shift, bits;
281 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
282 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
283 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
288 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
289 * @xfer_mask: xfer_mask of interest
291 * Return matching XFER_* value for @xfer_mask. Only the highest
292 * bit of @xfer_mask is considered.
298 * Matching XFER_* value, 0 if no match found.
300 static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
302 int highbit = fls(xfer_mask) - 1;
303 const struct ata_xfer_ent *ent;
305 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
306 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
307 return ent->base + highbit - ent->shift;
312 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
313 * @xfer_mode: XFER_* of interest
315 * Return matching xfer_mask for @xfer_mode.
321 * Matching xfer_mask, 0 if no match found.
323 static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
325 const struct ata_xfer_ent *ent;
327 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
328 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
329 return 1 << (ent->shift + xfer_mode - ent->base);
334 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
335 * @xfer_mode: XFER_* of interest
337 * Return matching xfer_shift for @xfer_mode.
343 * Matching xfer_shift, -1 if no match found.
345 static int ata_xfer_mode2shift(unsigned int xfer_mode)
347 const struct ata_xfer_ent *ent;
349 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
350 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
356 * ata_mode_string - convert xfer_mask to string
357 * @xfer_mask: mask of bits supported; only highest bit counts.
359 * Determine string which represents the highest speed
360 * (highest bit in @modemask).
366 * Constant C string representing highest speed listed in
367 * @mode_mask, or the constant C string "<n/a>".
369 static const char *ata_mode_string(unsigned int xfer_mask)
371 static const char * const xfer_mode_str[] = {
391 highbit = fls(xfer_mask) - 1;
392 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
393 return xfer_mode_str[highbit];
398 * ata_pio_devchk - PATA device presence detection
399 * @ap: ATA channel to examine
400 * @device: Device to examine (starting at zero)
402 * This technique was originally described in
403 * Hale Landis's ATADRVR (www.ata-atapi.com), and
404 * later found its way into the ATA/ATAPI spec.
406 * Write a pattern to the ATA shadow registers,
407 * and if a device is present, it will respond by
408 * correctly storing and echoing back the
409 * ATA shadow register contents.
415 static unsigned int ata_pio_devchk(struct ata_port *ap,
418 struct ata_ioports *ioaddr = &ap->ioaddr;
421 ap->ops->dev_select(ap, device);
423 outb(0x55, ioaddr->nsect_addr);
424 outb(0xaa, ioaddr->lbal_addr);
426 outb(0xaa, ioaddr->nsect_addr);
427 outb(0x55, ioaddr->lbal_addr);
429 outb(0x55, ioaddr->nsect_addr);
430 outb(0xaa, ioaddr->lbal_addr);
432 nsect = inb(ioaddr->nsect_addr);
433 lbal = inb(ioaddr->lbal_addr);
435 if ((nsect == 0x55) && (lbal == 0xaa))
436 return 1; /* we found a device */
438 return 0; /* nothing found */
442 * ata_mmio_devchk - PATA device presence detection
443 * @ap: ATA channel to examine
444 * @device: Device to examine (starting at zero)
446 * This technique was originally described in
447 * Hale Landis's ATADRVR (www.ata-atapi.com), and
448 * later found its way into the ATA/ATAPI spec.
450 * Write a pattern to the ATA shadow registers,
451 * and if a device is present, it will respond by
452 * correctly storing and echoing back the
453 * ATA shadow register contents.
459 static unsigned int ata_mmio_devchk(struct ata_port *ap,
462 struct ata_ioports *ioaddr = &ap->ioaddr;
465 ap->ops->dev_select(ap, device);
467 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
468 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
470 writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
471 writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
473 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
474 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
476 nsect = readb((void __iomem *) ioaddr->nsect_addr);
477 lbal = readb((void __iomem *) ioaddr->lbal_addr);
479 if ((nsect == 0x55) && (lbal == 0xaa))
480 return 1; /* we found a device */
482 return 0; /* nothing found */
486 * ata_devchk - PATA device presence detection
487 * @ap: ATA channel to examine
488 * @device: Device to examine (starting at zero)
490 * Dispatch ATA device presence detection, depending
491 * on whether we are using PIO or MMIO to talk to the
492 * ATA shadow registers.
498 static unsigned int ata_devchk(struct ata_port *ap,
501 if (ap->flags & ATA_FLAG_MMIO)
502 return ata_mmio_devchk(ap, device);
503 return ata_pio_devchk(ap, device);
507 * ata_dev_classify - determine device type based on ATA-spec signature
508 * @tf: ATA taskfile register set for device to be identified
510 * Determine from taskfile register contents whether a device is
511 * ATA or ATAPI, as per "Signature and persistence" section
512 * of ATA/PI spec (volume 1, sect 5.14).
518 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
519 * the event of failure.
522 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
524 /* Apple's open source Darwin code hints that some devices only
525 * put a proper signature into the LBA mid/high registers,
526 * So, we only check those. It's sufficient for uniqueness.
529 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
530 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
531 DPRINTK("found ATA device by sig\n");
535 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
536 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
537 DPRINTK("found ATAPI device by sig\n");
538 return ATA_DEV_ATAPI;
541 DPRINTK("unknown device\n");
542 return ATA_DEV_UNKNOWN;
546 * ata_dev_try_classify - Parse returned ATA device signature
547 * @ap: ATA channel to examine
548 * @device: Device to examine (starting at zero)
549 * @r_err: Value of error register on completion
551 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
552 * an ATA/ATAPI-defined set of values is placed in the ATA
553 * shadow registers, indicating the results of device detection
556 * Select the ATA device, and read the values from the ATA shadow
557 * registers. Then parse according to the Error register value,
558 * and the spec-defined values examined by ata_dev_classify().
564 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
568 ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
570 struct ata_taskfile tf;
574 ap->ops->dev_select(ap, device);
576 memset(&tf, 0, sizeof(tf));
578 ap->ops->tf_read(ap, &tf);
583 /* see if device passed diags */
586 else if ((device == 0) && (err == 0x81))
591 /* determine if device is ATA or ATAPI */
592 class = ata_dev_classify(&tf);
594 if (class == ATA_DEV_UNKNOWN)
596 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
602 * ata_id_string - Convert IDENTIFY DEVICE page into string
603 * @id: IDENTIFY DEVICE results we will examine
604 * @s: string into which data is output
605 * @ofs: offset into identify device page
606 * @len: length of string to return. must be an even number.
608 * The strings in the IDENTIFY DEVICE page are broken up into
609 * 16-bit chunks. Run through the string, and output each
610 * 8-bit chunk linearly, regardless of platform.
616 void ata_id_string(const u16 *id, unsigned char *s,
617 unsigned int ofs, unsigned int len)
636 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
637 * @id: IDENTIFY DEVICE results we will examine
638 * @s: string into which data is output
639 * @ofs: offset into identify device page
640 * @len: length of string to return. must be an odd number.
642 * This function is identical to ata_id_string except that it
643 * trims trailing spaces and terminates the resulting string with
644 * null. @len must be actual maximum length (even number) + 1.
649 void ata_id_c_string(const u16 *id, unsigned char *s,
650 unsigned int ofs, unsigned int len)
656 ata_id_string(id, s, ofs, len - 1);
658 p = s + strnlen(s, len - 1);
659 while (p > s && p[-1] == ' ')
664 static u64 ata_id_n_sectors(const u16 *id)
666 if (ata_id_has_lba(id)) {
667 if (ata_id_has_lba48(id))
668 return ata_id_u64(id, 100);
670 return ata_id_u32(id, 60);
672 if (ata_id_current_chs_valid(id))
673 return ata_id_u32(id, 57);
675 return id[1] * id[3] * id[6];
680 * ata_noop_dev_select - Select device 0/1 on ATA bus
681 * @ap: ATA channel to manipulate
682 * @device: ATA device (numbered from zero) to select
684 * This function performs no actual function.
686 * May be used as the dev_select() entry in ata_port_operations.
691 void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
697 * ata_std_dev_select - Select device 0/1 on ATA bus
698 * @ap: ATA channel to manipulate
699 * @device: ATA device (numbered from zero) to select
701 * Use the method defined in the ATA specification to
702 * make either device 0, or device 1, active on the
703 * ATA channel. Works with both PIO and MMIO.
705 * May be used as the dev_select() entry in ata_port_operations.
711 void ata_std_dev_select (struct ata_port *ap, unsigned int device)
716 tmp = ATA_DEVICE_OBS;
718 tmp = ATA_DEVICE_OBS | ATA_DEV1;
720 if (ap->flags & ATA_FLAG_MMIO) {
721 writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
723 outb(tmp, ap->ioaddr.device_addr);
725 ata_pause(ap); /* needed; also flushes, for mmio */
729 * ata_dev_select - Select device 0/1 on ATA bus
730 * @ap: ATA channel to manipulate
731 * @device: ATA device (numbered from zero) to select
732 * @wait: non-zero to wait for Status register BSY bit to clear
733 * @can_sleep: non-zero if context allows sleeping
735 * Use the method defined in the ATA specification to
736 * make either device 0, or device 1, active on the
739 * This is a high-level version of ata_std_dev_select(),
740 * which additionally provides the services of inserting
741 * the proper pauses and status polling, where needed.
747 void ata_dev_select(struct ata_port *ap, unsigned int device,
748 unsigned int wait, unsigned int can_sleep)
750 VPRINTK("ENTER, ata%u: device %u, wait %u\n",
751 ap->id, device, wait);
756 ap->ops->dev_select(ap, device);
759 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
766 * ata_dump_id - IDENTIFY DEVICE info debugging output
767 * @id: IDENTIFY DEVICE page to dump
769 * Dump selected 16-bit words from the given IDENTIFY DEVICE
776 static inline void ata_dump_id(const u16 *id)
778 DPRINTK("49==0x%04x "
788 DPRINTK("80==0x%04x "
798 DPRINTK("88==0x%04x "
805 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
806 * @id: IDENTIFY data to compute xfer mask from
808 * Compute the xfermask for this device. This is not as trivial
809 * as it seems if we must consider early devices correctly.
811 * FIXME: pre IDE drive timing (do we care ?).
819 static unsigned int ata_id_xfermask(const u16 *id)
821 unsigned int pio_mask, mwdma_mask, udma_mask;
823 /* Usual case. Word 53 indicates word 64 is valid */
824 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
825 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
829 /* If word 64 isn't valid then Word 51 high byte holds
830 * the PIO timing number for the maximum. Turn it into
833 pio_mask = (2 << (id[ATA_ID_OLD_PIO_MODES] & 0xFF)) - 1 ;
835 /* But wait.. there's more. Design your standards by
836 * committee and you too can get a free iordy field to
837 * process. However its the speeds not the modes that
838 * are supported... Note drivers using the timing API
839 * will get this right anyway
843 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
846 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
847 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
849 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
853 * ata_port_queue_task - Queue port_task
854 * @ap: The ata_port to queue port_task for
856 * Schedule @fn(@data) for execution after @delay jiffies using
857 * port_task. There is one port_task per port and it's the
858 * user(low level driver)'s responsibility to make sure that only
859 * one task is active at any given time.
861 * libata core layer takes care of synchronization between
862 * port_task and EH. ata_port_queue_task() may be ignored for EH
866 * Inherited from caller.
868 void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
873 if (ap->flags & ATA_FLAG_FLUSH_PORT_TASK)
876 PREPARE_WORK(&ap->port_task, fn, data);
879 rc = queue_work(ata_wq, &ap->port_task);
881 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
883 /* rc == 0 means that another user is using port task */
888 * ata_port_flush_task - Flush port_task
889 * @ap: The ata_port to flush port_task for
891 * After this function completes, port_task is guranteed not to
892 * be running or scheduled.
895 * Kernel thread context (may sleep)
897 void ata_port_flush_task(struct ata_port *ap)
903 spin_lock_irqsave(&ap->host_set->lock, flags);
904 ap->flags |= ATA_FLAG_FLUSH_PORT_TASK;
905 spin_unlock_irqrestore(&ap->host_set->lock, flags);
907 DPRINTK("flush #1\n");
908 flush_workqueue(ata_wq);
911 * At this point, if a task is running, it's guaranteed to see
912 * the FLUSH flag; thus, it will never queue pio tasks again.
915 if (!cancel_delayed_work(&ap->port_task)) {
916 DPRINTK("flush #2\n");
917 flush_workqueue(ata_wq);
920 spin_lock_irqsave(&ap->host_set->lock, flags);
921 ap->flags &= ~ATA_FLAG_FLUSH_PORT_TASK;
922 spin_unlock_irqrestore(&ap->host_set->lock, flags);
927 void ata_qc_complete_internal(struct ata_queued_cmd *qc)
929 struct completion *waiting = qc->private_data;
931 qc->ap->ops->tf_read(qc->ap, &qc->tf);
936 * ata_exec_internal - execute libata internal command
937 * @ap: Port to which the command is sent
938 * @dev: Device to which the command is sent
939 * @tf: Taskfile registers for the command and the result
940 * @dma_dir: Data tranfer direction of the command
941 * @buf: Data buffer of the command
942 * @buflen: Length of data buffer
944 * Executes libata internal command with timeout. @tf contains
945 * command on entry and result on return. Timeout and error
946 * conditions are reported via return value. No recovery action
947 * is taken after a command times out. It's caller's duty to
948 * clean up after timeout.
951 * None. Should be called with kernel context, might sleep.
955 ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
956 struct ata_taskfile *tf,
957 int dma_dir, void *buf, unsigned int buflen)
959 u8 command = tf->command;
960 struct ata_queued_cmd *qc;
961 DECLARE_COMPLETION(wait);
963 unsigned int err_mask;
965 spin_lock_irqsave(&ap->host_set->lock, flags);
967 qc = ata_qc_new_init(ap, dev);
971 qc->dma_dir = dma_dir;
972 if (dma_dir != DMA_NONE) {
973 ata_sg_init_one(qc, buf, buflen);
974 qc->nsect = buflen / ATA_SECT_SIZE;
977 qc->private_data = &wait;
978 qc->complete_fn = ata_qc_complete_internal;
980 qc->err_mask = ata_qc_issue(qc);
984 spin_unlock_irqrestore(&ap->host_set->lock, flags);
986 if (!wait_for_completion_timeout(&wait, ATA_TMOUT_INTERNAL)) {
987 ata_port_flush_task(ap);
989 spin_lock_irqsave(&ap->host_set->lock, flags);
991 /* We're racing with irq here. If we lose, the
992 * following test prevents us from completing the qc
993 * again. If completion irq occurs after here but
994 * before the caller cleans up, it will result in a
995 * spurious interrupt. We can live with that.
997 if (qc->flags & ATA_QCFLAG_ACTIVE) {
998 qc->err_mask = AC_ERR_TIMEOUT;
1000 printk(KERN_WARNING "ata%u: qc timeout (cmd 0x%x)\n",
1004 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1008 err_mask = qc->err_mask;
1012 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1013 * Until those drivers are fixed, we detect the condition
1014 * here, fail the command with AC_ERR_SYSTEM and reenable the
1017 * Note that this doesn't change any behavior as internal
1018 * command failure results in disabling the device in the
1019 * higher layer for LLDDs without new reset/EH callbacks.
1021 * Kill the following code as soon as those drivers are fixed.
1023 if (ap->flags & ATA_FLAG_PORT_DISABLED) {
1024 err_mask |= AC_ERR_SYSTEM;
1032 * ata_pio_need_iordy - check if iordy needed
1035 * Check if the current speed of the device requires IORDY. Used
1036 * by various controllers for chip configuration.
1039 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1042 int speed = adev->pio_mode - XFER_PIO_0;
1049 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1051 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1052 pio = adev->id[ATA_ID_EIDE_PIO];
1053 /* Is the speed faster than the drive allows non IORDY ? */
1055 /* This is cycle times not frequency - watch the logic! */
1056 if (pio > 240) /* PIO2 is 240nS per cycle */
1065 * ata_dev_read_id - Read ID data from the specified device
1066 * @ap: port on which target device resides
1067 * @dev: target device
1068 * @p_class: pointer to class of the target device (may be changed)
1069 * @post_reset: is this read ID post-reset?
1070 * @p_id: read IDENTIFY page (newly allocated)
1072 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1073 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1074 * devices. This function also takes care of EDD signature
1075 * misreporting (to be removed once EDD support is gone) and
1076 * issues ATA_CMD_INIT_DEV_PARAMS for pre-ATA4 drives.
1079 * Kernel thread context (may sleep)
1082 * 0 on success, -errno otherwise.
1084 static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev,
1085 unsigned int *p_class, int post_reset, u16 **p_id)
1087 unsigned int class = *p_class;
1088 unsigned int using_edd;
1089 struct ata_taskfile tf;
1090 unsigned int err_mask = 0;
1095 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
1097 if (ap->ops->probe_reset ||
1098 ap->flags & (ATA_FLAG_SRST | ATA_FLAG_SATA_RESET))
1103 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1105 id = kmalloc(sizeof(id[0]) * ATA_ID_WORDS, GFP_KERNEL);
1108 reason = "out of memory";
1113 ata_tf_init(ap, &tf, dev->devno);
1117 tf.command = ATA_CMD_ID_ATA;
1120 tf.command = ATA_CMD_ID_ATAPI;
1124 reason = "unsupported class";
1128 tf.protocol = ATA_PROT_PIO;
1130 err_mask = ata_exec_internal(ap, dev, &tf, DMA_FROM_DEVICE,
1131 id, sizeof(id[0]) * ATA_ID_WORDS);
1135 reason = "I/O error";
1137 if (err_mask & ~AC_ERR_DEV)
1141 * arg! EDD works for all test cases, but seems to return
1142 * the ATA signature for some ATAPI devices. Until the
1143 * reason for this is found and fixed, we fix up the mess
1144 * here. If IDENTIFY DEVICE returns command aborted
1145 * (as ATAPI devices do), then we issue an
1146 * IDENTIFY PACKET DEVICE.
1148 * ATA software reset (SRST, the default) does not appear
1149 * to have this problem.
1151 if ((using_edd) && (class == ATA_DEV_ATA)) {
1152 u8 err = tf.feature;
1153 if (err & ATA_ABORTED) {
1154 class = ATA_DEV_ATAPI;
1161 swap_buf_le16(id, ATA_ID_WORDS);
1164 if ((class == ATA_DEV_ATA) != ata_id_is_ata(id)) {
1166 reason = "device reports illegal type";
1170 if (post_reset && class == ATA_DEV_ATA) {
1172 * The exact sequence expected by certain pre-ATA4 drives is:
1175 * INITIALIZE DEVICE PARAMETERS
1177 * Some drives were very specific about that exact sequence.
1179 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1180 err_mask = ata_dev_init_params(ap, dev);
1183 reason = "INIT_DEV_PARAMS failed";
1187 /* current CHS translation info (id[53-58]) might be
1188 * changed. reread the identify device info.
1200 printk(KERN_WARNING "ata%u: dev %u failed to IDENTIFY (%s)\n",
1201 ap->id, dev->devno, reason);
1206 static inline u8 ata_dev_knobble(const struct ata_port *ap,
1207 struct ata_device *dev)
1209 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1213 * ata_dev_configure - Configure the specified ATA/ATAPI device
1214 * @ap: Port on which target device resides
1215 * @dev: Target device to configure
1216 * @print_info: Enable device info printout
1218 * Configure @dev according to @dev->id. Generic and low-level
1219 * driver specific fixups are also applied.
1222 * Kernel thread context (may sleep)
1225 * 0 on success, -errno otherwise
1227 static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1230 const u16 *id = dev->id;
1231 unsigned int xfer_mask;
1234 if (!ata_dev_present(dev)) {
1235 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
1236 ap->id, dev->devno);
1240 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
1242 /* print device capabilities */
1244 printk(KERN_DEBUG "ata%u: dev %u cfg 49:%04x 82:%04x 83:%04x "
1245 "84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
1246 ap->id, dev->devno, id[49], id[82], id[83],
1247 id[84], id[85], id[86], id[87], id[88]);
1249 /* initialize to-be-configured parameters */
1251 dev->max_sectors = 0;
1259 * common ATA, ATAPI feature tests
1262 /* find max transfer mode; for printk only */
1263 xfer_mask = ata_id_xfermask(id);
1267 /* ATA-specific feature tests */
1268 if (dev->class == ATA_DEV_ATA) {
1269 dev->n_sectors = ata_id_n_sectors(id);
1271 if (ata_id_has_lba(id)) {
1272 const char *lba_desc;
1275 dev->flags |= ATA_DFLAG_LBA;
1276 if (ata_id_has_lba48(id)) {
1277 dev->flags |= ATA_DFLAG_LBA48;
1281 /* print device info to dmesg */
1283 printk(KERN_INFO "ata%u: dev %u ATA-%d, "
1284 "max %s, %Lu sectors: %s\n",
1286 ata_id_major_version(id),
1287 ata_mode_string(xfer_mask),
1288 (unsigned long long)dev->n_sectors,
1293 /* Default translation */
1294 dev->cylinders = id[1];
1296 dev->sectors = id[6];
1298 if (ata_id_current_chs_valid(id)) {
1299 /* Current CHS translation is valid. */
1300 dev->cylinders = id[54];
1301 dev->heads = id[55];
1302 dev->sectors = id[56];
1305 /* print device info to dmesg */
1307 printk(KERN_INFO "ata%u: dev %u ATA-%d, "
1308 "max %s, %Lu sectors: CHS %u/%u/%u\n",
1310 ata_id_major_version(id),
1311 ata_mode_string(xfer_mask),
1312 (unsigned long long)dev->n_sectors,
1313 dev->cylinders, dev->heads, dev->sectors);
1319 /* ATAPI-specific feature tests */
1320 else if (dev->class == ATA_DEV_ATAPI) {
1321 rc = atapi_cdb_len(id);
1322 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1323 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id);
1327 dev->cdb_len = (unsigned int) rc;
1329 /* print device info to dmesg */
1331 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n",
1332 ap->id, dev->devno, ata_mode_string(xfer_mask));
1335 ap->host->max_cmd_len = 0;
1336 for (i = 0; i < ATA_MAX_DEVICES; i++)
1337 ap->host->max_cmd_len = max_t(unsigned int,
1338 ap->host->max_cmd_len,
1339 ap->device[i].cdb_len);
1341 /* limit bridge transfers to udma5, 200 sectors */
1342 if (ata_dev_knobble(ap, dev)) {
1344 printk(KERN_INFO "ata%u(%u): applying bridge limits\n",
1345 ap->id, dev->devno);
1346 dev->udma_mask &= ATA_UDMA5;
1347 dev->max_sectors = ATA_MAX_SECTORS;
1350 if (ap->ops->dev_config)
1351 ap->ops->dev_config(ap, dev);
1353 DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap));
1357 printk(KERN_WARNING "ata%u: dev %u not supported, ignoring\n",
1358 ap->id, dev->devno);
1359 DPRINTK("EXIT, err\n");
1364 * ata_bus_probe - Reset and probe ATA bus
1367 * Master ATA bus probing function. Initiates a hardware-dependent
1368 * bus reset, then attempts to identify any devices found on
1372 * PCI/etc. bus probe sem.
1375 * Zero on success, non-zero on error.
1378 static int ata_bus_probe(struct ata_port *ap)
1380 unsigned int classes[ATA_MAX_DEVICES];
1381 unsigned int i, rc, found = 0;
1385 /* reset and determine device classes */
1386 for (i = 0; i < ATA_MAX_DEVICES; i++)
1387 classes[i] = ATA_DEV_UNKNOWN;
1389 if (ap->ops->probe_reset) {
1390 rc = ap->ops->probe_reset(ap, classes);
1392 printk("ata%u: reset failed (errno=%d)\n", ap->id, rc);
1396 ap->ops->phy_reset(ap);
1398 if (!(ap->flags & ATA_FLAG_PORT_DISABLED))
1399 for (i = 0; i < ATA_MAX_DEVICES; i++)
1400 classes[i] = ap->device[i].class;
1405 for (i = 0; i < ATA_MAX_DEVICES; i++)
1406 if (classes[i] == ATA_DEV_UNKNOWN)
1407 classes[i] = ATA_DEV_NONE;
1409 /* read IDENTIFY page and configure devices */
1410 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1411 struct ata_device *dev = &ap->device[i];
1413 dev->class = classes[i];
1415 if (!ata_dev_present(dev))
1418 WARN_ON(dev->id != NULL);
1419 if (ata_dev_read_id(ap, dev, &dev->class, 1, &dev->id)) {
1420 dev->class = ATA_DEV_NONE;
1424 if (ata_dev_configure(ap, dev, 1)) {
1425 dev->class++; /* disable device */
1433 goto err_out_disable;
1436 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1437 goto err_out_disable;
1442 ap->ops->port_disable(ap);
1447 * ata_port_probe - Mark port as enabled
1448 * @ap: Port for which we indicate enablement
1450 * Modify @ap data structure such that the system
1451 * thinks that the entire port is enabled.
1453 * LOCKING: host_set lock, or some other form of
1457 void ata_port_probe(struct ata_port *ap)
1459 ap->flags &= ~ATA_FLAG_PORT_DISABLED;
1463 * sata_print_link_status - Print SATA link status
1464 * @ap: SATA port to printk link status about
1466 * This function prints link speed and status of a SATA link.
1471 static void sata_print_link_status(struct ata_port *ap)
1476 if (!ap->ops->scr_read)
1479 sstatus = scr_read(ap, SCR_STATUS);
1481 if (sata_dev_present(ap)) {
1482 tmp = (sstatus >> 4) & 0xf;
1485 else if (tmp & (1 << 1))
1488 speed = "<unknown>";
1489 printk(KERN_INFO "ata%u: SATA link up %s Gbps (SStatus %X)\n",
1490 ap->id, speed, sstatus);
1492 printk(KERN_INFO "ata%u: SATA link down (SStatus %X)\n",
1498 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1499 * @ap: SATA port associated with target SATA PHY.
1501 * This function issues commands to standard SATA Sxxx
1502 * PHY registers, to wake up the phy (and device), and
1503 * clear any reset condition.
1506 * PCI/etc. bus probe sem.
1509 void __sata_phy_reset(struct ata_port *ap)
1512 unsigned long timeout = jiffies + (HZ * 5);
1514 if (ap->flags & ATA_FLAG_SATA_RESET) {
1515 /* issue phy wake/reset */
1516 scr_write_flush(ap, SCR_CONTROL, 0x301);
1517 /* Couldn't find anything in SATA I/II specs, but
1518 * AHCI-1.1 10.4.2 says at least 1 ms. */
1521 scr_write_flush(ap, SCR_CONTROL, 0x300); /* phy wake/clear reset */
1523 /* wait for phy to become ready, if necessary */
1526 sstatus = scr_read(ap, SCR_STATUS);
1527 if ((sstatus & 0xf) != 1)
1529 } while (time_before(jiffies, timeout));
1531 /* print link status */
1532 sata_print_link_status(ap);
1534 /* TODO: phy layer with polling, timeouts, etc. */
1535 if (sata_dev_present(ap))
1538 ata_port_disable(ap);
1540 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1543 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1544 ata_port_disable(ap);
1548 ap->cbl = ATA_CBL_SATA;
1552 * sata_phy_reset - Reset SATA bus.
1553 * @ap: SATA port associated with target SATA PHY.
1555 * This function resets the SATA bus, and then probes
1556 * the bus for devices.
1559 * PCI/etc. bus probe sem.
1562 void sata_phy_reset(struct ata_port *ap)
1564 __sata_phy_reset(ap);
1565 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1571 * ata_port_disable - Disable port.
1572 * @ap: Port to be disabled.
1574 * Modify @ap data structure such that the system
1575 * thinks that the entire port is disabled, and should
1576 * never attempt to probe or communicate with devices
1579 * LOCKING: host_set lock, or some other form of
1583 void ata_port_disable(struct ata_port *ap)
1585 ap->device[0].class = ATA_DEV_NONE;
1586 ap->device[1].class = ATA_DEV_NONE;
1587 ap->flags |= ATA_FLAG_PORT_DISABLED;
1591 * This mode timing computation functionality is ported over from
1592 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
1595 * PIO 0-5, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
1596 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
1597 * for PIO 5, which is a nonstandard extension and UDMA6, which
1598 * is currently supported only by Maxtor drives.
1601 static const struct ata_timing ata_timing[] = {
1603 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
1604 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
1605 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
1606 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
1608 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
1609 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
1610 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
1612 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
1614 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
1615 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
1616 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
1618 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
1619 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
1620 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
1622 /* { XFER_PIO_5, 20, 50, 30, 100, 50, 30, 100, 0 }, */
1623 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
1624 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
1626 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
1627 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
1628 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
1630 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
1635 #define ENOUGH(v,unit) (((v)-1)/(unit)+1)
1636 #define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
1638 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
1640 q->setup = EZ(t->setup * 1000, T);
1641 q->act8b = EZ(t->act8b * 1000, T);
1642 q->rec8b = EZ(t->rec8b * 1000, T);
1643 q->cyc8b = EZ(t->cyc8b * 1000, T);
1644 q->active = EZ(t->active * 1000, T);
1645 q->recover = EZ(t->recover * 1000, T);
1646 q->cycle = EZ(t->cycle * 1000, T);
1647 q->udma = EZ(t->udma * 1000, UT);
1650 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
1651 struct ata_timing *m, unsigned int what)
1653 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
1654 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
1655 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
1656 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
1657 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
1658 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
1659 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
1660 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
1663 static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
1665 const struct ata_timing *t;
1667 for (t = ata_timing; t->mode != speed; t++)
1668 if (t->mode == 0xFF)
1673 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1674 struct ata_timing *t, int T, int UT)
1676 const struct ata_timing *s;
1677 struct ata_timing p;
1683 if (!(s = ata_timing_find_mode(speed)))
1686 memcpy(t, s, sizeof(*s));
1689 * If the drive is an EIDE drive, it can tell us it needs extended
1690 * PIO/MW_DMA cycle timing.
1693 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
1694 memset(&p, 0, sizeof(p));
1695 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
1696 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
1697 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
1698 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
1699 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
1701 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
1705 * Convert the timing to bus clock counts.
1708 ata_timing_quantize(t, t, T, UT);
1711 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
1712 * S.M.A.R.T * and some other commands. We have to ensure that the
1713 * DMA cycle timing is slower/equal than the fastest PIO timing.
1716 if (speed > XFER_PIO_4) {
1717 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
1718 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
1722 * Lengthen active & recovery time so that cycle time is correct.
1725 if (t->act8b + t->rec8b < t->cyc8b) {
1726 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
1727 t->rec8b = t->cyc8b - t->act8b;
1730 if (t->active + t->recover < t->cycle) {
1731 t->active += (t->cycle - (t->active + t->recover)) / 2;
1732 t->recover = t->cycle - t->active;
1738 static void ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev)
1740 if (!ata_dev_present(dev) || (ap->flags & ATA_FLAG_PORT_DISABLED))
1743 if (dev->xfer_shift == ATA_SHIFT_PIO)
1744 dev->flags |= ATA_DFLAG_PIO;
1746 ata_dev_set_xfermode(ap, dev);
1748 if (ata_dev_revalidate(ap, dev, 0)) {
1749 printk(KERN_ERR "ata%u: failed to revalidate after set "
1750 "xfermode, disabled\n", ap->id);
1751 ata_port_disable(ap);
1754 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
1755 dev->xfer_shift, (int)dev->xfer_mode);
1757 printk(KERN_INFO "ata%u: dev %u configured for %s\n",
1759 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
1762 static int ata_host_set_pio(struct ata_port *ap)
1766 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1767 struct ata_device *dev = &ap->device[i];
1769 if (!ata_dev_present(dev))
1772 if (!dev->pio_mode) {
1773 printk(KERN_WARNING "ata%u: no PIO support for device %d.\n", ap->id, i);
1777 dev->xfer_mode = dev->pio_mode;
1778 dev->xfer_shift = ATA_SHIFT_PIO;
1779 if (ap->ops->set_piomode)
1780 ap->ops->set_piomode(ap, dev);
1786 static void ata_host_set_dma(struct ata_port *ap)
1790 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1791 struct ata_device *dev = &ap->device[i];
1793 if (!ata_dev_present(dev) || !dev->dma_mode)
1796 dev->xfer_mode = dev->dma_mode;
1797 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
1798 if (ap->ops->set_dmamode)
1799 ap->ops->set_dmamode(ap, dev);
1804 * ata_set_mode - Program timings and issue SET FEATURES - XFER
1805 * @ap: port on which timings will be programmed
1807 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.).
1810 * PCI/etc. bus probe sem.
1812 static void ata_set_mode(struct ata_port *ap)
1816 /* step 1: calculate xfer_mask */
1817 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1818 struct ata_device *dev = &ap->device[i];
1819 unsigned int pio_mask, dma_mask;
1821 if (!ata_dev_present(dev))
1824 ata_dev_xfermask(ap, dev);
1826 /* TODO: let LLDD filter dev->*_mask here */
1828 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
1829 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
1830 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
1831 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
1834 /* step 2: always set host PIO timings */
1835 rc = ata_host_set_pio(ap);
1839 /* step 3: set host DMA timings */
1840 ata_host_set_dma(ap);
1842 /* step 4: update devices' xfer mode */
1843 for (i = 0; i < ATA_MAX_DEVICES; i++)
1844 ata_dev_set_mode(ap, &ap->device[i]);
1846 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1849 if (ap->ops->post_set_mode)
1850 ap->ops->post_set_mode(ap);
1855 ata_port_disable(ap);
1859 * ata_tf_to_host - issue ATA taskfile to host controller
1860 * @ap: port to which command is being issued
1861 * @tf: ATA taskfile register set
1863 * Issues ATA taskfile register set to ATA host controller,
1864 * with proper synchronization with interrupt handler and
1868 * spin_lock_irqsave(host_set lock)
1871 static inline void ata_tf_to_host(struct ata_port *ap,
1872 const struct ata_taskfile *tf)
1874 ap->ops->tf_load(ap, tf);
1875 ap->ops->exec_command(ap, tf);
1879 * ata_busy_sleep - sleep until BSY clears, or timeout
1880 * @ap: port containing status register to be polled
1881 * @tmout_pat: impatience timeout
1882 * @tmout: overall timeout
1884 * Sleep until ATA Status register bit BSY clears,
1885 * or a timeout occurs.
1890 unsigned int ata_busy_sleep (struct ata_port *ap,
1891 unsigned long tmout_pat, unsigned long tmout)
1893 unsigned long timer_start, timeout;
1896 status = ata_busy_wait(ap, ATA_BUSY, 300);
1897 timer_start = jiffies;
1898 timeout = timer_start + tmout_pat;
1899 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1901 status = ata_busy_wait(ap, ATA_BUSY, 3);
1904 if (status & ATA_BUSY)
1905 printk(KERN_WARNING "ata%u is slow to respond, "
1906 "please be patient\n", ap->id);
1908 timeout = timer_start + tmout;
1909 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1911 status = ata_chk_status(ap);
1914 if (status & ATA_BUSY) {
1915 printk(KERN_ERR "ata%u failed to respond (%lu secs)\n",
1916 ap->id, tmout / HZ);
1923 static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
1925 struct ata_ioports *ioaddr = &ap->ioaddr;
1926 unsigned int dev0 = devmask & (1 << 0);
1927 unsigned int dev1 = devmask & (1 << 1);
1928 unsigned long timeout;
1930 /* if device 0 was found in ata_devchk, wait for its
1934 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1936 /* if device 1 was found in ata_devchk, wait for
1937 * register access, then wait for BSY to clear
1939 timeout = jiffies + ATA_TMOUT_BOOT;
1943 ap->ops->dev_select(ap, 1);
1944 if (ap->flags & ATA_FLAG_MMIO) {
1945 nsect = readb((void __iomem *) ioaddr->nsect_addr);
1946 lbal = readb((void __iomem *) ioaddr->lbal_addr);
1948 nsect = inb(ioaddr->nsect_addr);
1949 lbal = inb(ioaddr->lbal_addr);
1951 if ((nsect == 1) && (lbal == 1))
1953 if (time_after(jiffies, timeout)) {
1957 msleep(50); /* give drive a breather */
1960 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1962 /* is all this really necessary? */
1963 ap->ops->dev_select(ap, 0);
1965 ap->ops->dev_select(ap, 1);
1967 ap->ops->dev_select(ap, 0);
1971 * ata_bus_edd - Issue EXECUTE DEVICE DIAGNOSTIC command.
1972 * @ap: Port to reset and probe
1974 * Use the EXECUTE DEVICE DIAGNOSTIC command to reset and
1975 * probe the bus. Not often used these days.
1978 * PCI/etc. bus probe sem.
1979 * Obtains host_set lock.
1983 static unsigned int ata_bus_edd(struct ata_port *ap)
1985 struct ata_taskfile tf;
1986 unsigned long flags;
1988 /* set up execute-device-diag (bus reset) taskfile */
1989 /* also, take interrupts to a known state (disabled) */
1990 DPRINTK("execute-device-diag\n");
1991 ata_tf_init(ap, &tf, 0);
1993 tf.command = ATA_CMD_EDD;
1994 tf.protocol = ATA_PROT_NODATA;
1997 spin_lock_irqsave(&ap->host_set->lock, flags);
1998 ata_tf_to_host(ap, &tf);
1999 spin_unlock_irqrestore(&ap->host_set->lock, flags);
2001 /* spec says at least 2ms. but who knows with those
2002 * crazy ATAPI devices...
2006 return ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2009 static unsigned int ata_bus_softreset(struct ata_port *ap,
2010 unsigned int devmask)
2012 struct ata_ioports *ioaddr = &ap->ioaddr;
2014 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
2016 /* software reset. causes dev0 to be selected */
2017 if (ap->flags & ATA_FLAG_MMIO) {
2018 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2019 udelay(20); /* FIXME: flush */
2020 writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
2021 udelay(20); /* FIXME: flush */
2022 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2024 outb(ap->ctl, ioaddr->ctl_addr);
2026 outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2028 outb(ap->ctl, ioaddr->ctl_addr);
2031 /* spec mandates ">= 2ms" before checking status.
2032 * We wait 150ms, because that was the magic delay used for
2033 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
2034 * between when the ATA command register is written, and then
2035 * status is checked. Because waiting for "a while" before
2036 * checking status is fine, post SRST, we perform this magic
2037 * delay here as well.
2039 * Old drivers/ide uses the 2mS rule and then waits for ready
2044 /* Before we perform post reset processing we want to see if
2045 the bus shows 0xFF because the odd clown forgets the D7 pulldown
2048 if (ata_check_status(ap) == 0xFF)
2049 return 1; /* Positive is failure for some reason */
2051 ata_bus_post_reset(ap, devmask);
2057 * ata_bus_reset - reset host port and associated ATA channel
2058 * @ap: port to reset
2060 * This is typically the first time we actually start issuing
2061 * commands to the ATA channel. We wait for BSY to clear, then
2062 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2063 * result. Determine what devices, if any, are on the channel
2064 * by looking at the device 0/1 error register. Look at the signature
2065 * stored in each device's taskfile registers, to determine if
2066 * the device is ATA or ATAPI.
2069 * PCI/etc. bus probe sem.
2070 * Obtains host_set lock.
2073 * Sets ATA_FLAG_PORT_DISABLED if bus reset fails.
2076 void ata_bus_reset(struct ata_port *ap)
2078 struct ata_ioports *ioaddr = &ap->ioaddr;
2079 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2081 unsigned int dev0, dev1 = 0, rc = 0, devmask = 0;
2083 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
2085 /* determine if device 0/1 are present */
2086 if (ap->flags & ATA_FLAG_SATA_RESET)
2089 dev0 = ata_devchk(ap, 0);
2091 dev1 = ata_devchk(ap, 1);
2095 devmask |= (1 << 0);
2097 devmask |= (1 << 1);
2099 /* select device 0 again */
2100 ap->ops->dev_select(ap, 0);
2102 /* issue bus reset */
2103 if (ap->flags & ATA_FLAG_SRST)
2104 rc = ata_bus_softreset(ap, devmask);
2105 else if ((ap->flags & ATA_FLAG_SATA_RESET) == 0) {
2106 /* set up device control */
2107 if (ap->flags & ATA_FLAG_MMIO)
2108 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2110 outb(ap->ctl, ioaddr->ctl_addr);
2111 rc = ata_bus_edd(ap);
2118 * determine by signature whether we have ATA or ATAPI devices
2120 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
2121 if ((slave_possible) && (err != 0x81))
2122 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
2124 /* re-enable interrupts */
2125 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2128 /* is double-select really necessary? */
2129 if (ap->device[1].class != ATA_DEV_NONE)
2130 ap->ops->dev_select(ap, 1);
2131 if (ap->device[0].class != ATA_DEV_NONE)
2132 ap->ops->dev_select(ap, 0);
2134 /* if no devices were detected, disable this port */
2135 if ((ap->device[0].class == ATA_DEV_NONE) &&
2136 (ap->device[1].class == ATA_DEV_NONE))
2139 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2140 /* set up device control for ATA_FLAG_SATA_RESET */
2141 if (ap->flags & ATA_FLAG_MMIO)
2142 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2144 outb(ap->ctl, ioaddr->ctl_addr);
2151 printk(KERN_ERR "ata%u: disabling port\n", ap->id);
2152 ap->ops->port_disable(ap);
2157 static int sata_phy_resume(struct ata_port *ap)
2159 unsigned long timeout = jiffies + (HZ * 5);
2162 scr_write_flush(ap, SCR_CONTROL, 0x300);
2164 /* Wait for phy to become ready, if necessary. */
2167 sstatus = scr_read(ap, SCR_STATUS);
2168 if ((sstatus & 0xf) != 1)
2170 } while (time_before(jiffies, timeout));
2176 * ata_std_probeinit - initialize probing
2177 * @ap: port to be probed
2179 * @ap is about to be probed. Initialize it. This function is
2180 * to be used as standard callback for ata_drive_probe_reset().
2182 * NOTE!!! Do not use this function as probeinit if a low level
2183 * driver implements only hardreset. Just pass NULL as probeinit
2184 * in that case. Using this function is probably okay but doing
2185 * so makes reset sequence different from the original
2186 * ->phy_reset implementation and Jeff nervous. :-P
2188 extern void ata_std_probeinit(struct ata_port *ap)
2190 if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read) {
2191 sata_phy_resume(ap);
2192 if (sata_dev_present(ap))
2193 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2198 * ata_std_softreset - reset host port via ATA SRST
2199 * @ap: port to reset
2200 * @verbose: fail verbosely
2201 * @classes: resulting classes of attached devices
2203 * Reset host port using ATA SRST. This function is to be used
2204 * as standard callback for ata_drive_*_reset() functions.
2207 * Kernel thread context (may sleep)
2210 * 0 on success, -errno otherwise.
2212 int ata_std_softreset(struct ata_port *ap, int verbose, unsigned int *classes)
2214 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2215 unsigned int devmask = 0, err_mask;
2220 if (ap->ops->scr_read && !sata_dev_present(ap)) {
2221 classes[0] = ATA_DEV_NONE;
2225 /* determine if device 0/1 are present */
2226 if (ata_devchk(ap, 0))
2227 devmask |= (1 << 0);
2228 if (slave_possible && ata_devchk(ap, 1))
2229 devmask |= (1 << 1);
2231 /* select device 0 again */
2232 ap->ops->dev_select(ap, 0);
2234 /* issue bus reset */
2235 DPRINTK("about to softreset, devmask=%x\n", devmask);
2236 err_mask = ata_bus_softreset(ap, devmask);
2239 printk(KERN_ERR "ata%u: SRST failed (err_mask=0x%x)\n",
2242 DPRINTK("EXIT, softreset failed (err_mask=0x%x)\n",
2247 /* determine by signature whether we have ATA or ATAPI devices */
2248 classes[0] = ata_dev_try_classify(ap, 0, &err);
2249 if (slave_possible && err != 0x81)
2250 classes[1] = ata_dev_try_classify(ap, 1, &err);
2253 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2258 * sata_std_hardreset - reset host port via SATA phy reset
2259 * @ap: port to reset
2260 * @verbose: fail verbosely
2261 * @class: resulting class of attached device
2263 * SATA phy-reset host port using DET bits of SControl register.
2264 * This function is to be used as standard callback for
2265 * ata_drive_*_reset().
2268 * Kernel thread context (may sleep)
2271 * 0 on success, -errno otherwise.
2273 int sata_std_hardreset(struct ata_port *ap, int verbose, unsigned int *class)
2277 /* Issue phy wake/reset */
2278 scr_write_flush(ap, SCR_CONTROL, 0x301);
2281 * Couldn't find anything in SATA I/II specs, but AHCI-1.1
2282 * 10.4.2 says at least 1 ms.
2286 /* Bring phy back */
2287 sata_phy_resume(ap);
2289 /* TODO: phy layer with polling, timeouts, etc. */
2290 if (!sata_dev_present(ap)) {
2291 *class = ATA_DEV_NONE;
2292 DPRINTK("EXIT, link offline\n");
2296 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2298 printk(KERN_ERR "ata%u: COMRESET failed "
2299 "(device not ready)\n", ap->id);
2301 DPRINTK("EXIT, device not ready\n");
2305 ap->ops->dev_select(ap, 0); /* probably unnecessary */
2307 *class = ata_dev_try_classify(ap, 0, NULL);
2309 DPRINTK("EXIT, class=%u\n", *class);
2314 * ata_std_postreset - standard postreset callback
2315 * @ap: the target ata_port
2316 * @classes: classes of attached devices
2318 * This function is invoked after a successful reset. Note that
2319 * the device might have been reset more than once using
2320 * different reset methods before postreset is invoked.
2322 * This function is to be used as standard callback for
2323 * ata_drive_*_reset().
2326 * Kernel thread context (may sleep)
2328 void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
2332 /* set cable type if it isn't already set */
2333 if (ap->cbl == ATA_CBL_NONE && ap->flags & ATA_FLAG_SATA)
2334 ap->cbl = ATA_CBL_SATA;
2336 /* print link status */
2337 if (ap->cbl == ATA_CBL_SATA)
2338 sata_print_link_status(ap);
2340 /* re-enable interrupts */
2341 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2344 /* is double-select really necessary? */
2345 if (classes[0] != ATA_DEV_NONE)
2346 ap->ops->dev_select(ap, 1);
2347 if (classes[1] != ATA_DEV_NONE)
2348 ap->ops->dev_select(ap, 0);
2350 /* bail out if no device is present */
2351 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2352 DPRINTK("EXIT, no device\n");
2356 /* set up device control */
2357 if (ap->ioaddr.ctl_addr) {
2358 if (ap->flags & ATA_FLAG_MMIO)
2359 writeb(ap->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
2361 outb(ap->ctl, ap->ioaddr.ctl_addr);
2368 * ata_std_probe_reset - standard probe reset method
2369 * @ap: prot to perform probe-reset
2370 * @classes: resulting classes of attached devices
2372 * The stock off-the-shelf ->probe_reset method.
2375 * Kernel thread context (may sleep)
2378 * 0 on success, -errno otherwise.
2380 int ata_std_probe_reset(struct ata_port *ap, unsigned int *classes)
2382 ata_reset_fn_t hardreset;
2385 if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read)
2386 hardreset = sata_std_hardreset;
2388 return ata_drive_probe_reset(ap, ata_std_probeinit,
2389 ata_std_softreset, hardreset,
2390 ata_std_postreset, classes);
2393 static int do_probe_reset(struct ata_port *ap, ata_reset_fn_t reset,
2394 ata_postreset_fn_t postreset,
2395 unsigned int *classes)
2399 for (i = 0; i < ATA_MAX_DEVICES; i++)
2400 classes[i] = ATA_DEV_UNKNOWN;
2402 rc = reset(ap, 0, classes);
2406 /* If any class isn't ATA_DEV_UNKNOWN, consider classification
2407 * is complete and convert all ATA_DEV_UNKNOWN to
2410 for (i = 0; i < ATA_MAX_DEVICES; i++)
2411 if (classes[i] != ATA_DEV_UNKNOWN)
2414 if (i < ATA_MAX_DEVICES)
2415 for (i = 0; i < ATA_MAX_DEVICES; i++)
2416 if (classes[i] == ATA_DEV_UNKNOWN)
2417 classes[i] = ATA_DEV_NONE;
2420 postreset(ap, classes);
2422 return classes[0] != ATA_DEV_UNKNOWN ? 0 : -ENODEV;
2426 * ata_drive_probe_reset - Perform probe reset with given methods
2427 * @ap: port to reset
2428 * @probeinit: probeinit method (can be NULL)
2429 * @softreset: softreset method (can be NULL)
2430 * @hardreset: hardreset method (can be NULL)
2431 * @postreset: postreset method (can be NULL)
2432 * @classes: resulting classes of attached devices
2434 * Reset the specified port and classify attached devices using
2435 * given methods. This function prefers softreset but tries all
2436 * possible reset sequences to reset and classify devices. This
2437 * function is intended to be used for constructing ->probe_reset
2438 * callback by low level drivers.
2440 * Reset methods should follow the following rules.
2442 * - Return 0 on sucess, -errno on failure.
2443 * - If classification is supported, fill classes[] with
2444 * recognized class codes.
2445 * - If classification is not supported, leave classes[] alone.
2446 * - If verbose is non-zero, print error message on failure;
2447 * otherwise, shut up.
2450 * Kernel thread context (may sleep)
2453 * 0 on success, -EINVAL if no reset method is avaliable, -ENODEV
2454 * if classification fails, and any error code from reset
2457 int ata_drive_probe_reset(struct ata_port *ap, ata_probeinit_fn_t probeinit,
2458 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
2459 ata_postreset_fn_t postreset, unsigned int *classes)
2467 rc = do_probe_reset(ap, softreset, postreset, classes);
2475 rc = do_probe_reset(ap, hardreset, postreset, classes);
2476 if (rc == 0 || rc != -ENODEV)
2480 rc = do_probe_reset(ap, softreset, postreset, classes);
2486 * ata_dev_same_device - Determine whether new ID matches configured device
2487 * @ap: port on which the device to compare against resides
2488 * @dev: device to compare against
2489 * @new_class: class of the new device
2490 * @new_id: IDENTIFY page of the new device
2492 * Compare @new_class and @new_id against @dev and determine
2493 * whether @dev is the device indicated by @new_class and
2500 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
2502 static int ata_dev_same_device(struct ata_port *ap, struct ata_device *dev,
2503 unsigned int new_class, const u16 *new_id)
2505 const u16 *old_id = dev->id;
2506 unsigned char model[2][41], serial[2][21];
2509 if (dev->class != new_class) {
2511 "ata%u: dev %u class mismatch %d != %d\n",
2512 ap->id, dev->devno, dev->class, new_class);
2516 ata_id_c_string(old_id, model[0], ATA_ID_PROD_OFS, sizeof(model[0]));
2517 ata_id_c_string(new_id, model[1], ATA_ID_PROD_OFS, sizeof(model[1]));
2518 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO_OFS, sizeof(serial[0]));
2519 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO_OFS, sizeof(serial[1]));
2520 new_n_sectors = ata_id_n_sectors(new_id);
2522 if (strcmp(model[0], model[1])) {
2524 "ata%u: dev %u model number mismatch '%s' != '%s'\n",
2525 ap->id, dev->devno, model[0], model[1]);
2529 if (strcmp(serial[0], serial[1])) {
2531 "ata%u: dev %u serial number mismatch '%s' != '%s'\n",
2532 ap->id, dev->devno, serial[0], serial[1]);
2536 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
2538 "ata%u: dev %u n_sectors mismatch %llu != %llu\n",
2539 ap->id, dev->devno, (unsigned long long)dev->n_sectors,
2540 (unsigned long long)new_n_sectors);
2548 * ata_dev_revalidate - Revalidate ATA device
2549 * @ap: port on which the device to revalidate resides
2550 * @dev: device to revalidate
2551 * @post_reset: is this revalidation after reset?
2553 * Re-read IDENTIFY page and make sure @dev is still attached to
2557 * Kernel thread context (may sleep)
2560 * 0 on success, negative errno otherwise
2562 int ata_dev_revalidate(struct ata_port *ap, struct ata_device *dev,
2569 if (!ata_dev_present(dev))
2575 /* allocate & read ID data */
2576 rc = ata_dev_read_id(ap, dev, &class, post_reset, &id);
2580 /* is the device still there? */
2581 if (!ata_dev_same_device(ap, dev, class, id)) {
2589 /* configure device according to the new ID */
2590 return ata_dev_configure(ap, dev, 0);
2593 printk(KERN_ERR "ata%u: dev %u revalidation failed (errno=%d)\n",
2594 ap->id, dev->devno, rc);
2599 static const char * const ata_dma_blacklist [] = {
2600 "WDC AC11000H", NULL,
2601 "WDC AC22100H", NULL,
2602 "WDC AC32500H", NULL,
2603 "WDC AC33100H", NULL,
2604 "WDC AC31600H", NULL,
2605 "WDC AC32100H", "24.09P07",
2606 "WDC AC23200L", "21.10N21",
2607 "Compaq CRD-8241B", NULL,
2612 "SanDisk SDP3B", NULL,
2613 "SanDisk SDP3B-64", NULL,
2614 "SANYO CD-ROM CRD", NULL,
2615 "HITACHI CDR-8", NULL,
2616 "HITACHI CDR-8335", NULL,
2617 "HITACHI CDR-8435", NULL,
2618 "Toshiba CD-ROM XM-6202B", NULL,
2619 "TOSHIBA CD-ROM XM-1702BC", NULL,
2621 "E-IDE CD-ROM CR-840", NULL,
2622 "CD-ROM Drive/F5A", NULL,
2623 "WPI CDD-820", NULL,
2624 "SAMSUNG CD-ROM SC-148C", NULL,
2625 "SAMSUNG CD-ROM SC", NULL,
2626 "SanDisk SDP3B-64", NULL,
2627 "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,
2628 "_NEC DV5800A", NULL,
2629 "SAMSUNG CD-ROM SN-124", "N001"
2632 static int ata_strim(char *s, size_t len)
2634 len = strnlen(s, len);
2636 /* ATAPI specifies that empty space is blank-filled; remove blanks */
2637 while ((len > 0) && (s[len - 1] == ' ')) {
2644 static int ata_dma_blacklisted(const struct ata_device *dev)
2646 unsigned char model_num[40];
2647 unsigned char model_rev[16];
2648 unsigned int nlen, rlen;
2651 ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
2653 ata_id_string(dev->id, model_rev, ATA_ID_FW_REV_OFS,
2655 nlen = ata_strim(model_num, sizeof(model_num));
2656 rlen = ata_strim(model_rev, sizeof(model_rev));
2658 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i += 2) {
2659 if (!strncmp(ata_dma_blacklist[i], model_num, nlen)) {
2660 if (ata_dma_blacklist[i+1] == NULL)
2662 if (!strncmp(ata_dma_blacklist[i], model_rev, rlen))
2670 * ata_dev_xfermask - Compute supported xfermask of the given device
2671 * @ap: Port on which the device to compute xfermask for resides
2672 * @dev: Device to compute xfermask for
2674 * Compute supported xfermask of @dev and store it in
2675 * dev->*_mask. This function is responsible for applying all
2676 * known limits including host controller limits, device
2682 static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev)
2684 unsigned long xfer_mask;
2687 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
2690 /* use port-wide xfermask for now */
2691 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2692 struct ata_device *d = &ap->device[i];
2693 if (!ata_dev_present(d))
2695 xfer_mask &= ata_pack_xfermask(d->pio_mask, d->mwdma_mask,
2697 xfer_mask &= ata_id_xfermask(d->id);
2698 if (ata_dma_blacklisted(d))
2699 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
2702 if (ata_dma_blacklisted(dev))
2703 printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, "
2704 "disabling DMA\n", ap->id, dev->devno);
2706 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2711 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
2712 * @ap: Port associated with device @dev
2713 * @dev: Device to which command will be sent
2715 * Issue SET FEATURES - XFER MODE command to device @dev
2719 * PCI/etc. bus probe sem.
2722 static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev)
2724 struct ata_taskfile tf;
2726 /* set up set-features taskfile */
2727 DPRINTK("set features - xfer mode\n");
2729 ata_tf_init(ap, &tf, dev->devno);
2730 tf.command = ATA_CMD_SET_FEATURES;
2731 tf.feature = SETFEATURES_XFER;
2732 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2733 tf.protocol = ATA_PROT_NODATA;
2734 tf.nsect = dev->xfer_mode;
2736 if (ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0)) {
2737 printk(KERN_ERR "ata%u: failed to set xfermode, disabled\n",
2739 ata_port_disable(ap);
2746 * ata_dev_init_params - Issue INIT DEV PARAMS command
2747 * @ap: Port associated with device @dev
2748 * @dev: Device to which command will be sent
2751 * Kernel thread context (may sleep)
2754 * 0 on success, AC_ERR_* mask otherwise.
2757 static unsigned int ata_dev_init_params(struct ata_port *ap,
2758 struct ata_device *dev)
2760 struct ata_taskfile tf;
2761 unsigned int err_mask;
2762 u16 sectors = dev->id[6];
2763 u16 heads = dev->id[3];
2765 /* Number of sectors per track 1-255. Number of heads 1-16 */
2766 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
2769 /* set up init dev params taskfile */
2770 DPRINTK("init dev params \n");
2772 ata_tf_init(ap, &tf, dev->devno);
2773 tf.command = ATA_CMD_INIT_DEV_PARAMS;
2774 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2775 tf.protocol = ATA_PROT_NODATA;
2777 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
2779 err_mask = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
2781 DPRINTK("EXIT, err_mask=%x\n", err_mask);
2786 * ata_sg_clean - Unmap DMA memory associated with command
2787 * @qc: Command containing DMA memory to be released
2789 * Unmap all mapped DMA memory associated with this command.
2792 * spin_lock_irqsave(host_set lock)
2795 static void ata_sg_clean(struct ata_queued_cmd *qc)
2797 struct ata_port *ap = qc->ap;
2798 struct scatterlist *sg = qc->__sg;
2799 int dir = qc->dma_dir;
2800 void *pad_buf = NULL;
2802 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
2803 WARN_ON(sg == NULL);
2805 if (qc->flags & ATA_QCFLAG_SINGLE)
2806 WARN_ON(qc->n_elem > 1);
2808 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
2810 /* if we padded the buffer out to 32-bit bound, and data
2811 * xfer direction is from-device, we must copy from the
2812 * pad buffer back into the supplied buffer
2814 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
2815 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
2817 if (qc->flags & ATA_QCFLAG_SG) {
2819 dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir);
2820 /* restore last sg */
2821 sg[qc->orig_n_elem - 1].length += qc->pad_len;
2823 struct scatterlist *psg = &qc->pad_sgent;
2824 void *addr = kmap_atomic(psg->page, KM_IRQ0);
2825 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
2826 kunmap_atomic(addr, KM_IRQ0);
2830 dma_unmap_single(ap->host_set->dev,
2831 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
2834 sg->length += qc->pad_len;
2836 memcpy(qc->buf_virt + sg->length - qc->pad_len,
2837 pad_buf, qc->pad_len);
2840 qc->flags &= ~ATA_QCFLAG_DMAMAP;
2845 * ata_fill_sg - Fill PCI IDE PRD table
2846 * @qc: Metadata associated with taskfile to be transferred
2848 * Fill PCI IDE PRD (scatter-gather) table with segments
2849 * associated with the current disk command.
2852 * spin_lock_irqsave(host_set lock)
2855 static void ata_fill_sg(struct ata_queued_cmd *qc)
2857 struct ata_port *ap = qc->ap;
2858 struct scatterlist *sg;
2861 WARN_ON(qc->__sg == NULL);
2862 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
2865 ata_for_each_sg(sg, qc) {
2869 /* determine if physical DMA addr spans 64K boundary.
2870 * Note h/w doesn't support 64-bit, so we unconditionally
2871 * truncate dma_addr_t to u32.
2873 addr = (u32) sg_dma_address(sg);
2874 sg_len = sg_dma_len(sg);
2877 offset = addr & 0xffff;
2879 if ((offset + sg_len) > 0x10000)
2880 len = 0x10000 - offset;
2882 ap->prd[idx].addr = cpu_to_le32(addr);
2883 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2884 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
2893 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2896 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
2897 * @qc: Metadata associated with taskfile to check
2899 * Allow low-level driver to filter ATA PACKET commands, returning
2900 * a status indicating whether or not it is OK to use DMA for the
2901 * supplied PACKET command.
2904 * spin_lock_irqsave(host_set lock)
2906 * RETURNS: 0 when ATAPI DMA can be used
2909 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
2911 struct ata_port *ap = qc->ap;
2912 int rc = 0; /* Assume ATAPI DMA is OK by default */
2914 if (ap->ops->check_atapi_dma)
2915 rc = ap->ops->check_atapi_dma(qc);
2920 * ata_qc_prep - Prepare taskfile for submission
2921 * @qc: Metadata associated with taskfile to be prepared
2923 * Prepare ATA taskfile for submission.
2926 * spin_lock_irqsave(host_set lock)
2928 void ata_qc_prep(struct ata_queued_cmd *qc)
2930 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2936 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
2939 * ata_sg_init_one - Associate command with memory buffer
2940 * @qc: Command to be associated
2941 * @buf: Memory buffer
2942 * @buflen: Length of memory buffer, in bytes.
2944 * Initialize the data-related elements of queued_cmd @qc
2945 * to point to a single memory buffer, @buf of byte length @buflen.
2948 * spin_lock_irqsave(host_set lock)
2951 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
2953 struct scatterlist *sg;
2955 qc->flags |= ATA_QCFLAG_SINGLE;
2957 memset(&qc->sgent, 0, sizeof(qc->sgent));
2958 qc->__sg = &qc->sgent;
2960 qc->orig_n_elem = 1;
2964 sg_init_one(sg, buf, buflen);
2968 * ata_sg_init - Associate command with scatter-gather table.
2969 * @qc: Command to be associated
2970 * @sg: Scatter-gather table.
2971 * @n_elem: Number of elements in s/g table.
2973 * Initialize the data-related elements of queued_cmd @qc
2974 * to point to a scatter-gather table @sg, containing @n_elem
2978 * spin_lock_irqsave(host_set lock)
2981 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
2982 unsigned int n_elem)
2984 qc->flags |= ATA_QCFLAG_SG;
2986 qc->n_elem = n_elem;
2987 qc->orig_n_elem = n_elem;
2991 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
2992 * @qc: Command with memory buffer to be mapped.
2994 * DMA-map the memory buffer associated with queued_cmd @qc.
2997 * spin_lock_irqsave(host_set lock)
3000 * Zero on success, negative on error.
3003 static int ata_sg_setup_one(struct ata_queued_cmd *qc)
3005 struct ata_port *ap = qc->ap;
3006 int dir = qc->dma_dir;
3007 struct scatterlist *sg = qc->__sg;
3008 dma_addr_t dma_address;
3011 /* we must lengthen transfers to end on a 32-bit boundary */
3012 qc->pad_len = sg->length & 3;
3014 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3015 struct scatterlist *psg = &qc->pad_sgent;
3017 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3019 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3021 if (qc->tf.flags & ATA_TFLAG_WRITE)
3022 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
3025 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3026 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3028 sg->length -= qc->pad_len;
3029 if (sg->length == 0)
3032 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
3033 sg->length, qc->pad_len);
3041 dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt,
3043 if (dma_mapping_error(dma_address)) {
3045 sg->length += qc->pad_len;
3049 sg_dma_address(sg) = dma_address;
3050 sg_dma_len(sg) = sg->length;
3053 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
3054 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3060 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
3061 * @qc: Command with scatter-gather table to be mapped.
3063 * DMA-map the scatter-gather table associated with queued_cmd @qc.
3066 * spin_lock_irqsave(host_set lock)
3069 * Zero on success, negative on error.
3073 static int ata_sg_setup(struct ata_queued_cmd *qc)
3075 struct ata_port *ap = qc->ap;
3076 struct scatterlist *sg = qc->__sg;
3077 struct scatterlist *lsg = &sg[qc->n_elem - 1];
3078 int n_elem, pre_n_elem, dir, trim_sg = 0;
3080 VPRINTK("ENTER, ata%u\n", ap->id);
3081 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
3083 /* we must lengthen transfers to end on a 32-bit boundary */
3084 qc->pad_len = lsg->length & 3;
3086 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3087 struct scatterlist *psg = &qc->pad_sgent;
3088 unsigned int offset;
3090 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3092 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3095 * psg->page/offset are used to copy to-be-written
3096 * data in this function or read data in ata_sg_clean.
3098 offset = lsg->offset + lsg->length - qc->pad_len;
3099 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
3100 psg->offset = offset_in_page(offset);
3102 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3103 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3104 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
3105 kunmap_atomic(addr, KM_IRQ0);
3108 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3109 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3111 lsg->length -= qc->pad_len;
3112 if (lsg->length == 0)
3115 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
3116 qc->n_elem - 1, lsg->length, qc->pad_len);
3119 pre_n_elem = qc->n_elem;
3120 if (trim_sg && pre_n_elem)
3129 n_elem = dma_map_sg(ap->host_set->dev, sg, pre_n_elem, dir);
3131 /* restore last sg */
3132 lsg->length += qc->pad_len;
3136 DPRINTK("%d sg elements mapped\n", n_elem);
3139 qc->n_elem = n_elem;
3145 * ata_poll_qc_complete - turn irq back on and finish qc
3146 * @qc: Command to complete
3147 * @err_mask: ATA status register content
3150 * None. (grabs host lock)
3153 void ata_poll_qc_complete(struct ata_queued_cmd *qc)
3155 struct ata_port *ap = qc->ap;
3156 unsigned long flags;
3158 spin_lock_irqsave(&ap->host_set->lock, flags);
3159 ap->flags &= ~ATA_FLAG_NOINTR;
3161 ata_qc_complete(qc);
3162 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3166 * ata_pio_poll - poll using PIO, depending on current state
3167 * @ap: the target ata_port
3170 * None. (executing in kernel thread context)
3173 * timeout value to use
3176 static unsigned long ata_pio_poll(struct ata_port *ap)
3178 struct ata_queued_cmd *qc;
3180 unsigned int poll_state = HSM_ST_UNKNOWN;
3181 unsigned int reg_state = HSM_ST_UNKNOWN;
3183 qc = ata_qc_from_tag(ap, ap->active_tag);
3184 WARN_ON(qc == NULL);
3186 switch (ap->hsm_task_state) {
3189 poll_state = HSM_ST_POLL;
3193 case HSM_ST_LAST_POLL:
3194 poll_state = HSM_ST_LAST_POLL;
3195 reg_state = HSM_ST_LAST;
3202 status = ata_chk_status(ap);
3203 if (status & ATA_BUSY) {
3204 if (time_after(jiffies, ap->pio_task_timeout)) {
3205 qc->err_mask |= AC_ERR_TIMEOUT;
3206 ap->hsm_task_state = HSM_ST_TMOUT;
3209 ap->hsm_task_state = poll_state;
3210 return ATA_SHORT_PAUSE;
3213 ap->hsm_task_state = reg_state;
3218 * ata_pio_complete - check if drive is busy or idle
3219 * @ap: the target ata_port
3222 * None. (executing in kernel thread context)
3225 * Non-zero if qc completed, zero otherwise.
3228 static int ata_pio_complete (struct ata_port *ap)
3230 struct ata_queued_cmd *qc;
3234 * This is purely heuristic. This is a fast path. Sometimes when
3235 * we enter, BSY will be cleared in a chk-status or two. If not,
3236 * the drive is probably seeking or something. Snooze for a couple
3237 * msecs, then chk-status again. If still busy, fall back to
3238 * HSM_ST_POLL state.
3240 drv_stat = ata_busy_wait(ap, ATA_BUSY, 10);
3241 if (drv_stat & ATA_BUSY) {
3243 drv_stat = ata_busy_wait(ap, ATA_BUSY, 10);
3244 if (drv_stat & ATA_BUSY) {
3245 ap->hsm_task_state = HSM_ST_LAST_POLL;
3246 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
3251 qc = ata_qc_from_tag(ap, ap->active_tag);
3252 WARN_ON(qc == NULL);
3254 drv_stat = ata_wait_idle(ap);
3255 if (!ata_ok(drv_stat)) {
3256 qc->err_mask |= __ac_err_mask(drv_stat);
3257 ap->hsm_task_state = HSM_ST_ERR;
3261 ap->hsm_task_state = HSM_ST_IDLE;
3263 WARN_ON(qc->err_mask);
3264 ata_poll_qc_complete(qc);
3266 /* another command may start at this point */
3273 * swap_buf_le16 - swap halves of 16-bit words in place
3274 * @buf: Buffer to swap
3275 * @buf_words: Number of 16-bit words in buffer.
3277 * Swap halves of 16-bit words if needed to convert from
3278 * little-endian byte order to native cpu byte order, or
3282 * Inherited from caller.
3284 void swap_buf_le16(u16 *buf, unsigned int buf_words)
3289 for (i = 0; i < buf_words; i++)
3290 buf[i] = le16_to_cpu(buf[i]);
3291 #endif /* __BIG_ENDIAN */
3295 * ata_mmio_data_xfer - Transfer data by MMIO
3296 * @ap: port to read/write
3298 * @buflen: buffer length
3299 * @write_data: read/write
3301 * Transfer data from/to the device data register by MMIO.
3304 * Inherited from caller.
3307 static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
3308 unsigned int buflen, int write_data)
3311 unsigned int words = buflen >> 1;
3312 u16 *buf16 = (u16 *) buf;
3313 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
3315 /* Transfer multiple of 2 bytes */
3317 for (i = 0; i < words; i++)
3318 writew(le16_to_cpu(buf16[i]), mmio);
3320 for (i = 0; i < words; i++)
3321 buf16[i] = cpu_to_le16(readw(mmio));
3324 /* Transfer trailing 1 byte, if any. */
3325 if (unlikely(buflen & 0x01)) {
3326 u16 align_buf[1] = { 0 };
3327 unsigned char *trailing_buf = buf + buflen - 1;
3330 memcpy(align_buf, trailing_buf, 1);
3331 writew(le16_to_cpu(align_buf[0]), mmio);
3333 align_buf[0] = cpu_to_le16(readw(mmio));
3334 memcpy(trailing_buf, align_buf, 1);
3340 * ata_pio_data_xfer - Transfer data by PIO
3341 * @ap: port to read/write
3343 * @buflen: buffer length
3344 * @write_data: read/write
3346 * Transfer data from/to the device data register by PIO.
3349 * Inherited from caller.
3352 static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
3353 unsigned int buflen, int write_data)
3355 unsigned int words = buflen >> 1;
3357 /* Transfer multiple of 2 bytes */
3359 outsw(ap->ioaddr.data_addr, buf, words);
3361 insw(ap->ioaddr.data_addr, buf, words);
3363 /* Transfer trailing 1 byte, if any. */
3364 if (unlikely(buflen & 0x01)) {
3365 u16 align_buf[1] = { 0 };
3366 unsigned char *trailing_buf = buf + buflen - 1;
3369 memcpy(align_buf, trailing_buf, 1);
3370 outw(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
3372 align_buf[0] = cpu_to_le16(inw(ap->ioaddr.data_addr));
3373 memcpy(trailing_buf, align_buf, 1);
3379 * ata_data_xfer - Transfer data from/to the data register.
3380 * @ap: port to read/write
3382 * @buflen: buffer length
3383 * @do_write: read/write
3385 * Transfer data from/to the device data register.
3388 * Inherited from caller.
3391 static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
3392 unsigned int buflen, int do_write)
3394 /* Make the crap hardware pay the costs not the good stuff */
3395 if (unlikely(ap->flags & ATA_FLAG_IRQ_MASK)) {
3396 unsigned long flags;
3397 local_irq_save(flags);
3398 if (ap->flags & ATA_FLAG_MMIO)
3399 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3401 ata_pio_data_xfer(ap, buf, buflen, do_write);
3402 local_irq_restore(flags);
3404 if (ap->flags & ATA_FLAG_MMIO)
3405 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3407 ata_pio_data_xfer(ap, buf, buflen, do_write);
3412 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
3413 * @qc: Command on going
3415 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
3418 * Inherited from caller.
3421 static void ata_pio_sector(struct ata_queued_cmd *qc)
3423 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3424 struct scatterlist *sg = qc->__sg;
3425 struct ata_port *ap = qc->ap;
3427 unsigned int offset;
3430 if (qc->cursect == (qc->nsect - 1))
3431 ap->hsm_task_state = HSM_ST_LAST;
3433 page = sg[qc->cursg].page;
3434 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
3436 /* get the current page and offset */
3437 page = nth_page(page, (offset >> PAGE_SHIFT));
3438 offset %= PAGE_SIZE;
3440 buf = kmap(page) + offset;
3445 if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) {
3450 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3452 /* do the actual data transfer */
3453 do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3454 ata_data_xfer(ap, buf, ATA_SECT_SIZE, do_write);
3460 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
3461 * @qc: Command on going
3462 * @bytes: number of bytes
3464 * Transfer Transfer data from/to the ATAPI device.
3467 * Inherited from caller.
3471 static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
3473 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3474 struct scatterlist *sg = qc->__sg;
3475 struct ata_port *ap = qc->ap;
3478 unsigned int offset, count;
3480 if (qc->curbytes + bytes >= qc->nbytes)
3481 ap->hsm_task_state = HSM_ST_LAST;
3484 if (unlikely(qc->cursg >= qc->n_elem)) {
3486 * The end of qc->sg is reached and the device expects
3487 * more data to transfer. In order not to overrun qc->sg
3488 * and fulfill length specified in the byte count register,
3489 * - for read case, discard trailing data from the device
3490 * - for write case, padding zero data to the device
3492 u16 pad_buf[1] = { 0 };
3493 unsigned int words = bytes >> 1;
3496 if (words) /* warning if bytes > 1 */
3497 printk(KERN_WARNING "ata%u: %u bytes trailing data\n",
3500 for (i = 0; i < words; i++)
3501 ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write);
3503 ap->hsm_task_state = HSM_ST_LAST;
3507 sg = &qc->__sg[qc->cursg];
3510 offset = sg->offset + qc->cursg_ofs;
3512 /* get the current page and offset */
3513 page = nth_page(page, (offset >> PAGE_SHIFT));
3514 offset %= PAGE_SIZE;
3516 /* don't overrun current sg */
3517 count = min(sg->length - qc->cursg_ofs, bytes);
3519 /* don't cross page boundaries */
3520 count = min(count, (unsigned int)PAGE_SIZE - offset);
3522 buf = kmap(page) + offset;
3525 qc->curbytes += count;
3526 qc->cursg_ofs += count;
3528 if (qc->cursg_ofs == sg->length) {
3533 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3535 /* do the actual data transfer */
3536 ata_data_xfer(ap, buf, count, do_write);
3545 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
3546 * @qc: Command on going
3548 * Transfer Transfer data from/to the ATAPI device.
3551 * Inherited from caller.
3554 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3556 struct ata_port *ap = qc->ap;
3557 struct ata_device *dev = qc->dev;
3558 unsigned int ireason, bc_lo, bc_hi, bytes;
3559 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
3561 ap->ops->tf_read(ap, &qc->tf);
3562 ireason = qc->tf.nsect;
3563 bc_lo = qc->tf.lbam;
3564 bc_hi = qc->tf.lbah;
3565 bytes = (bc_hi << 8) | bc_lo;
3567 /* shall be cleared to zero, indicating xfer of data */
3568 if (ireason & (1 << 0))
3571 /* make sure transfer direction matches expected */
3572 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
3573 if (do_write != i_write)
3576 __atapi_pio_bytes(qc, bytes);
3581 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n",
3582 ap->id, dev->devno);
3583 qc->err_mask |= AC_ERR_HSM;
3584 ap->hsm_task_state = HSM_ST_ERR;
3588 * ata_pio_block - start PIO on a block
3589 * @ap: the target ata_port
3592 * None. (executing in kernel thread context)
3595 static void ata_pio_block(struct ata_port *ap)
3597 struct ata_queued_cmd *qc;
3601 * This is purely heuristic. This is a fast path.
3602 * Sometimes when we enter, BSY will be cleared in
3603 * a chk-status or two. If not, the drive is probably seeking
3604 * or something. Snooze for a couple msecs, then
3605 * chk-status again. If still busy, fall back to
3606 * HSM_ST_POLL state.
3608 status = ata_busy_wait(ap, ATA_BUSY, 5);
3609 if (status & ATA_BUSY) {
3611 status = ata_busy_wait(ap, ATA_BUSY, 10);
3612 if (status & ATA_BUSY) {
3613 ap->hsm_task_state = HSM_ST_POLL;
3614 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
3619 qc = ata_qc_from_tag(ap, ap->active_tag);
3620 WARN_ON(qc == NULL);
3623 if (status & (ATA_ERR | ATA_DF)) {
3624 qc->err_mask |= AC_ERR_DEV;
3625 ap->hsm_task_state = HSM_ST_ERR;
3629 /* transfer data if any */
3630 if (is_atapi_taskfile(&qc->tf)) {
3631 /* DRQ=0 means no more data to transfer */
3632 if ((status & ATA_DRQ) == 0) {
3633 ap->hsm_task_state = HSM_ST_LAST;
3637 atapi_pio_bytes(qc);
3639 /* handle BSY=0, DRQ=0 as error */
3640 if ((status & ATA_DRQ) == 0) {
3641 qc->err_mask |= AC_ERR_HSM;
3642 ap->hsm_task_state = HSM_ST_ERR;
3650 static void ata_pio_error(struct ata_port *ap)
3652 struct ata_queued_cmd *qc;
3654 qc = ata_qc_from_tag(ap, ap->active_tag);
3655 WARN_ON(qc == NULL);
3657 if (qc->tf.command != ATA_CMD_PACKET)
3658 printk(KERN_WARNING "ata%u: PIO error\n", ap->id);
3660 /* make sure qc->err_mask is available to
3661 * know what's wrong and recover
3663 WARN_ON(qc->err_mask == 0);
3665 ap->hsm_task_state = HSM_ST_IDLE;
3667 ata_poll_qc_complete(qc);
3670 static void ata_pio_task(void *_data)
3672 struct ata_port *ap = _data;
3673 unsigned long timeout;
3680 switch (ap->hsm_task_state) {
3689 qc_completed = ata_pio_complete(ap);
3693 case HSM_ST_LAST_POLL:
3694 timeout = ata_pio_poll(ap);
3704 ata_port_queue_task(ap, ata_pio_task, ap, timeout);
3705 else if (!qc_completed)
3710 * atapi_packet_task - Write CDB bytes to hardware
3711 * @_data: Port to which ATAPI device is attached.
3713 * When device has indicated its readiness to accept
3714 * a CDB, this function is called. Send the CDB.
3715 * If DMA is to be performed, exit immediately.
3716 * Otherwise, we are in polling mode, so poll
3717 * status under operation succeeds or fails.
3720 * Kernel thread context (may sleep)
3723 static void atapi_packet_task(void *_data)
3725 struct ata_port *ap = _data;
3726 struct ata_queued_cmd *qc;
3729 qc = ata_qc_from_tag(ap, ap->active_tag);
3730 WARN_ON(qc == NULL);
3731 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
3733 /* sleep-wait for BSY to clear */
3734 DPRINTK("busy wait\n");
3735 if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB)) {
3736 qc->err_mask |= AC_ERR_TIMEOUT;
3740 /* make sure DRQ is set */
3741 status = ata_chk_status(ap);
3742 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) {
3743 qc->err_mask |= AC_ERR_HSM;
3748 DPRINTK("send cdb\n");
3749 WARN_ON(qc->dev->cdb_len < 12);
3751 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA ||
3752 qc->tf.protocol == ATA_PROT_ATAPI_NODATA) {
3753 unsigned long flags;
3755 /* Once we're done issuing command and kicking bmdma,
3756 * irq handler takes over. To not lose irq, we need
3757 * to clear NOINTR flag before sending cdb, but
3758 * interrupt handler shouldn't be invoked before we're
3759 * finished. Hence, the following locking.
3761 spin_lock_irqsave(&ap->host_set->lock, flags);
3762 ap->flags &= ~ATA_FLAG_NOINTR;
3763 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
3764 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA)
3765 ap->ops->bmdma_start(qc); /* initiate bmdma */
3766 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3768 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
3770 /* PIO commands are handled by polling */
3771 ap->hsm_task_state = HSM_ST;
3772 ata_port_queue_task(ap, ata_pio_task, ap, 0);
3778 ata_poll_qc_complete(qc);
3782 * ata_qc_timeout - Handle timeout of queued command
3783 * @qc: Command that timed out
3785 * Some part of the kernel (currently, only the SCSI layer)
3786 * has noticed that the active command on port @ap has not
3787 * completed after a specified length of time. Handle this
3788 * condition by disabling DMA (if necessary) and completing
3789 * transactions, with error if necessary.
3791 * This also handles the case of the "lost interrupt", where
3792 * for some reason (possibly hardware bug, possibly driver bug)
3793 * an interrupt was not delivered to the driver, even though the
3794 * transaction completed successfully.
3797 * Inherited from SCSI layer (none, can sleep)
3800 static void ata_qc_timeout(struct ata_queued_cmd *qc)
3802 struct ata_port *ap = qc->ap;
3803 struct ata_host_set *host_set = ap->host_set;
3804 u8 host_stat = 0, drv_stat;
3805 unsigned long flags;
3809 ap->hsm_task_state = HSM_ST_IDLE;
3811 spin_lock_irqsave(&host_set->lock, flags);
3813 switch (qc->tf.protocol) {
3816 case ATA_PROT_ATAPI_DMA:
3817 host_stat = ap->ops->bmdma_status(ap);
3819 /* before we do anything else, clear DMA-Start bit */
3820 ap->ops->bmdma_stop(qc);
3826 drv_stat = ata_chk_status(ap);
3828 /* ack bmdma irq events */
3829 ap->ops->irq_clear(ap);
3831 printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n",
3832 ap->id, qc->tf.command, drv_stat, host_stat);
3834 /* complete taskfile transaction */
3835 qc->err_mask |= ac_err_mask(drv_stat);
3839 spin_unlock_irqrestore(&host_set->lock, flags);
3841 ata_eh_qc_complete(qc);
3847 * ata_eng_timeout - Handle timeout of queued command
3848 * @ap: Port on which timed-out command is active
3850 * Some part of the kernel (currently, only the SCSI layer)
3851 * has noticed that the active command on port @ap has not
3852 * completed after a specified length of time. Handle this
3853 * condition by disabling DMA (if necessary) and completing
3854 * transactions, with error if necessary.
3856 * This also handles the case of the "lost interrupt", where
3857 * for some reason (possibly hardware bug, possibly driver bug)
3858 * an interrupt was not delivered to the driver, even though the
3859 * transaction completed successfully.
3862 * Inherited from SCSI layer (none, can sleep)
3865 void ata_eng_timeout(struct ata_port *ap)
3869 ata_qc_timeout(ata_qc_from_tag(ap, ap->active_tag));
3875 * ata_qc_new - Request an available ATA command, for queueing
3876 * @ap: Port associated with device @dev
3877 * @dev: Device from whom we request an available command structure
3883 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
3885 struct ata_queued_cmd *qc = NULL;
3888 for (i = 0; i < ATA_MAX_QUEUE; i++)
3889 if (!test_and_set_bit(i, &ap->qactive)) {
3890 qc = ata_qc_from_tag(ap, i);
3901 * ata_qc_new_init - Request an available ATA command, and initialize it
3902 * @ap: Port associated with device @dev
3903 * @dev: Device from whom we request an available command structure
3909 struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
3910 struct ata_device *dev)
3912 struct ata_queued_cmd *qc;
3914 qc = ata_qc_new(ap);
3927 * ata_qc_free - free unused ata_queued_cmd
3928 * @qc: Command to complete
3930 * Designed to free unused ata_queued_cmd object
3931 * in case something prevents using it.
3934 * spin_lock_irqsave(host_set lock)
3936 void ata_qc_free(struct ata_queued_cmd *qc)
3938 struct ata_port *ap = qc->ap;
3941 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
3945 if (likely(ata_tag_valid(tag))) {
3946 if (tag == ap->active_tag)
3947 ap->active_tag = ATA_TAG_POISON;
3948 qc->tag = ATA_TAG_POISON;
3949 clear_bit(tag, &ap->qactive);
3953 void __ata_qc_complete(struct ata_queued_cmd *qc)
3955 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
3956 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
3958 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
3961 /* atapi: mark qc as inactive to prevent the interrupt handler
3962 * from completing the command twice later, before the error handler
3963 * is called. (when rc != 0 and atapi request sense is needed)
3965 qc->flags &= ~ATA_QCFLAG_ACTIVE;
3967 /* call completion callback */
3968 qc->complete_fn(qc);
3971 static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
3973 struct ata_port *ap = qc->ap;
3975 switch (qc->tf.protocol) {
3977 case ATA_PROT_ATAPI_DMA:
3980 case ATA_PROT_ATAPI:
3982 if (ap->flags & ATA_FLAG_PIO_DMA)
3995 * ata_qc_issue - issue taskfile to device
3996 * @qc: command to issue to device
3998 * Prepare an ATA command to submission to device.
3999 * This includes mapping the data into a DMA-able
4000 * area, filling in the S/G table, and finally
4001 * writing the taskfile to hardware, starting the command.
4004 * spin_lock_irqsave(host_set lock)
4007 * Zero on success, AC_ERR_* mask on failure
4010 unsigned int ata_qc_issue(struct ata_queued_cmd *qc)
4012 struct ata_port *ap = qc->ap;
4014 if (ata_should_dma_map(qc)) {
4015 if (qc->flags & ATA_QCFLAG_SG) {
4016 if (ata_sg_setup(qc))
4018 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
4019 if (ata_sg_setup_one(qc))
4023 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4026 ap->ops->qc_prep(qc);
4028 qc->ap->active_tag = qc->tag;
4029 qc->flags |= ATA_QCFLAG_ACTIVE;
4031 return ap->ops->qc_issue(qc);
4034 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4035 return AC_ERR_SYSTEM;
4040 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
4041 * @qc: command to issue to device
4043 * Using various libata functions and hooks, this function
4044 * starts an ATA command. ATA commands are grouped into
4045 * classes called "protocols", and issuing each type of protocol
4046 * is slightly different.
4048 * May be used as the qc_issue() entry in ata_port_operations.
4051 * spin_lock_irqsave(host_set lock)
4054 * Zero on success, AC_ERR_* mask on failure
4057 unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
4059 struct ata_port *ap = qc->ap;
4061 ata_dev_select(ap, qc->dev->devno, 1, 0);
4063 switch (qc->tf.protocol) {
4064 case ATA_PROT_NODATA:
4065 ata_tf_to_host(ap, &qc->tf);
4069 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4070 ap->ops->bmdma_setup(qc); /* set up bmdma */
4071 ap->ops->bmdma_start(qc); /* initiate bmdma */
4074 case ATA_PROT_PIO: /* load tf registers, initiate polling pio */
4075 ata_qc_set_polling(qc);
4076 ata_tf_to_host(ap, &qc->tf);
4077 ap->hsm_task_state = HSM_ST;
4078 ata_port_queue_task(ap, ata_pio_task, ap, 0);
4081 case ATA_PROT_ATAPI:
4082 ata_qc_set_polling(qc);
4083 ata_tf_to_host(ap, &qc->tf);
4084 ata_port_queue_task(ap, atapi_packet_task, ap, 0);
4087 case ATA_PROT_ATAPI_NODATA:
4088 ap->flags |= ATA_FLAG_NOINTR;
4089 ata_tf_to_host(ap, &qc->tf);
4090 ata_port_queue_task(ap, atapi_packet_task, ap, 0);
4093 case ATA_PROT_ATAPI_DMA:
4094 ap->flags |= ATA_FLAG_NOINTR;
4095 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4096 ap->ops->bmdma_setup(qc); /* set up bmdma */
4097 ata_port_queue_task(ap, atapi_packet_task, ap, 0);
4102 return AC_ERR_SYSTEM;
4109 * ata_host_intr - Handle host interrupt for given (port, task)
4110 * @ap: Port on which interrupt arrived (possibly...)
4111 * @qc: Taskfile currently active in engine
4113 * Handle host interrupt for given queued command. Currently,
4114 * only DMA interrupts are handled. All other commands are
4115 * handled via polling with interrupts disabled (nIEN bit).
4118 * spin_lock_irqsave(host_set lock)
4121 * One if interrupt was handled, zero if not (shared irq).
4124 inline unsigned int ata_host_intr (struct ata_port *ap,
4125 struct ata_queued_cmd *qc)
4127 u8 status, host_stat;
4129 switch (qc->tf.protocol) {
4132 case ATA_PROT_ATAPI_DMA:
4133 case ATA_PROT_ATAPI:
4134 /* check status of DMA engine */
4135 host_stat = ap->ops->bmdma_status(ap);
4136 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
4138 /* if it's not our irq... */
4139 if (!(host_stat & ATA_DMA_INTR))
4142 /* before we do anything else, clear DMA-Start bit */
4143 ap->ops->bmdma_stop(qc);
4147 case ATA_PROT_ATAPI_NODATA:
4148 case ATA_PROT_NODATA:
4149 /* check altstatus */
4150 status = ata_altstatus(ap);
4151 if (status & ATA_BUSY)
4154 /* check main status, clearing INTRQ */
4155 status = ata_chk_status(ap);
4156 if (unlikely(status & ATA_BUSY))
4158 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
4159 ap->id, qc->tf.protocol, status);
4161 /* ack bmdma irq events */
4162 ap->ops->irq_clear(ap);
4164 /* complete taskfile transaction */
4165 qc->err_mask |= ac_err_mask(status);
4166 ata_qc_complete(qc);
4173 return 1; /* irq handled */
4176 ap->stats.idle_irq++;
4179 if ((ap->stats.idle_irq % 1000) == 0) {
4180 ata_irq_ack(ap, 0); /* debug trap */
4181 printk(KERN_WARNING "ata%d: irq trap\n", ap->id);
4185 return 0; /* irq not handled */
4189 * ata_interrupt - Default ATA host interrupt handler
4190 * @irq: irq line (unused)
4191 * @dev_instance: pointer to our ata_host_set information structure
4194 * Default interrupt handler for PCI IDE devices. Calls
4195 * ata_host_intr() for each port that is not disabled.
4198 * Obtains host_set lock during operation.
4201 * IRQ_NONE or IRQ_HANDLED.
4204 irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
4206 struct ata_host_set *host_set = dev_instance;
4208 unsigned int handled = 0;
4209 unsigned long flags;
4211 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
4212 spin_lock_irqsave(&host_set->lock, flags);
4214 for (i = 0; i < host_set->n_ports; i++) {
4215 struct ata_port *ap;
4217 ap = host_set->ports[i];
4219 !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) {
4220 struct ata_queued_cmd *qc;
4222 qc = ata_qc_from_tag(ap, ap->active_tag);
4223 if (qc && (!(qc->tf.ctl & ATA_NIEN)) &&
4224 (qc->flags & ATA_QCFLAG_ACTIVE))
4225 handled |= ata_host_intr(ap, qc);
4229 spin_unlock_irqrestore(&host_set->lock, flags);
4231 return IRQ_RETVAL(handled);
4236 * Execute a 'simple' command, that only consists of the opcode 'cmd' itself,
4237 * without filling any other registers
4239 static int ata_do_simple_cmd(struct ata_port *ap, struct ata_device *dev,
4242 struct ata_taskfile tf;
4245 ata_tf_init(ap, &tf, dev->devno);
4248 tf.flags |= ATA_TFLAG_DEVICE;
4249 tf.protocol = ATA_PROT_NODATA;
4251 err = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
4253 printk(KERN_ERR "%s: ata command failed: %d\n",
4259 static int ata_flush_cache(struct ata_port *ap, struct ata_device *dev)
4263 if (!ata_try_flush_cache(dev))
4266 if (ata_id_has_flush_ext(dev->id))
4267 cmd = ATA_CMD_FLUSH_EXT;
4269 cmd = ATA_CMD_FLUSH;
4271 return ata_do_simple_cmd(ap, dev, cmd);
4274 static int ata_standby_drive(struct ata_port *ap, struct ata_device *dev)
4276 return ata_do_simple_cmd(ap, dev, ATA_CMD_STANDBYNOW1);
4279 static int ata_start_drive(struct ata_port *ap, struct ata_device *dev)
4281 return ata_do_simple_cmd(ap, dev, ATA_CMD_IDLEIMMEDIATE);
4285 * ata_device_resume - wakeup a previously suspended devices
4286 * @ap: port the device is connected to
4287 * @dev: the device to resume
4289 * Kick the drive back into action, by sending it an idle immediate
4290 * command and making sure its transfer mode matches between drive
4294 int ata_device_resume(struct ata_port *ap, struct ata_device *dev)
4296 if (ap->flags & ATA_FLAG_SUSPENDED) {
4297 ap->flags &= ~ATA_FLAG_SUSPENDED;
4300 if (!ata_dev_present(dev))
4302 if (dev->class == ATA_DEV_ATA)
4303 ata_start_drive(ap, dev);
4309 * ata_device_suspend - prepare a device for suspend
4310 * @ap: port the device is connected to
4311 * @dev: the device to suspend
4313 * Flush the cache on the drive, if appropriate, then issue a
4314 * standbynow command.
4316 int ata_device_suspend(struct ata_port *ap, struct ata_device *dev)
4318 if (!ata_dev_present(dev))
4320 if (dev->class == ATA_DEV_ATA)
4321 ata_flush_cache(ap, dev);
4323 ata_standby_drive(ap, dev);
4324 ap->flags |= ATA_FLAG_SUSPENDED;
4329 * ata_port_start - Set port up for dma.
4330 * @ap: Port to initialize
4332 * Called just after data structures for each port are
4333 * initialized. Allocates space for PRD table.
4335 * May be used as the port_start() entry in ata_port_operations.
4338 * Inherited from caller.
4341 int ata_port_start (struct ata_port *ap)
4343 struct device *dev = ap->host_set->dev;
4346 ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
4350 rc = ata_pad_alloc(ap, dev);
4352 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
4356 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
4363 * ata_port_stop - Undo ata_port_start()
4364 * @ap: Port to shut down
4366 * Frees the PRD table.
4368 * May be used as the port_stop() entry in ata_port_operations.
4371 * Inherited from caller.
4374 void ata_port_stop (struct ata_port *ap)
4376 struct device *dev = ap->host_set->dev;
4378 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
4379 ata_pad_free(ap, dev);
4382 void ata_host_stop (struct ata_host_set *host_set)
4384 if (host_set->mmio_base)
4385 iounmap(host_set->mmio_base);
4390 * ata_host_remove - Unregister SCSI host structure with upper layers
4391 * @ap: Port to unregister
4392 * @do_unregister: 1 if we fully unregister, 0 to just stop the port
4395 * Inherited from caller.
4398 static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
4400 struct Scsi_Host *sh = ap->host;
4405 scsi_remove_host(sh);
4407 ap->ops->port_stop(ap);
4411 * ata_host_init - Initialize an ata_port structure
4412 * @ap: Structure to initialize
4413 * @host: associated SCSI mid-layer structure
4414 * @host_set: Collection of hosts to which @ap belongs
4415 * @ent: Probe information provided by low-level driver
4416 * @port_no: Port number associated with this ata_port
4418 * Initialize a new ata_port structure, and its associated
4422 * Inherited from caller.
4425 static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
4426 struct ata_host_set *host_set,
4427 const struct ata_probe_ent *ent, unsigned int port_no)
4433 host->max_channel = 1;
4434 host->unique_id = ata_unique_id++;
4435 host->max_cmd_len = 12;
4437 ap->flags = ATA_FLAG_PORT_DISABLED;
4438 ap->id = host->unique_id;
4440 ap->ctl = ATA_DEVCTL_OBS;
4441 ap->host_set = host_set;
4442 ap->port_no = port_no;
4444 ent->legacy_mode ? ent->hard_port_no : port_no;
4445 ap->pio_mask = ent->pio_mask;
4446 ap->mwdma_mask = ent->mwdma_mask;
4447 ap->udma_mask = ent->udma_mask;
4448 ap->flags |= ent->host_flags;
4449 ap->ops = ent->port_ops;
4450 ap->cbl = ATA_CBL_NONE;
4451 ap->active_tag = ATA_TAG_POISON;
4452 ap->last_ctl = 0xFF;
4454 INIT_WORK(&ap->port_task, NULL, NULL);
4455 INIT_LIST_HEAD(&ap->eh_done_q);
4457 for (i = 0; i < ATA_MAX_DEVICES; i++) {
4458 struct ata_device *dev = &ap->device[i];
4460 dev->pio_mask = UINT_MAX;
4461 dev->mwdma_mask = UINT_MAX;
4462 dev->udma_mask = UINT_MAX;
4466 ap->stats.unhandled_irq = 1;
4467 ap->stats.idle_irq = 1;
4470 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
4474 * ata_host_add - Attach low-level ATA driver to system
4475 * @ent: Information provided by low-level driver
4476 * @host_set: Collections of ports to which we add
4477 * @port_no: Port number associated with this host
4479 * Attach low-level ATA driver to system.
4482 * PCI/etc. bus probe sem.
4485 * New ata_port on success, for NULL on error.
4488 static struct ata_port * ata_host_add(const struct ata_probe_ent *ent,
4489 struct ata_host_set *host_set,
4490 unsigned int port_no)
4492 struct Scsi_Host *host;
4493 struct ata_port *ap;
4497 host = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
4501 host->transportt = &ata_scsi_transport_template;
4503 ap = (struct ata_port *) &host->hostdata[0];
4505 ata_host_init(ap, host, host_set, ent, port_no);
4507 rc = ap->ops->port_start(ap);
4514 scsi_host_put(host);
4519 * ata_device_add - Register hardware device with ATA and SCSI layers
4520 * @ent: Probe information describing hardware device to be registered
4522 * This function processes the information provided in the probe
4523 * information struct @ent, allocates the necessary ATA and SCSI
4524 * host information structures, initializes them, and registers
4525 * everything with requisite kernel subsystems.
4527 * This function requests irqs, probes the ATA bus, and probes
4531 * PCI/etc. bus probe sem.
4534 * Number of ports registered. Zero on error (no ports registered).
4537 int ata_device_add(const struct ata_probe_ent *ent)
4539 unsigned int count = 0, i;
4540 struct device *dev = ent->dev;
4541 struct ata_host_set *host_set;
4544 /* alloc a container for our list of ATA ports (buses) */
4545 host_set = kzalloc(sizeof(struct ata_host_set) +
4546 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
4549 spin_lock_init(&host_set->lock);
4551 host_set->dev = dev;
4552 host_set->n_ports = ent->n_ports;
4553 host_set->irq = ent->irq;
4554 host_set->mmio_base = ent->mmio_base;
4555 host_set->private_data = ent->private_data;
4556 host_set->ops = ent->port_ops;
4558 /* register each port bound to this device */
4559 for (i = 0; i < ent->n_ports; i++) {
4560 struct ata_port *ap;
4561 unsigned long xfer_mode_mask;
4563 ap = ata_host_add(ent, host_set, i);
4567 host_set->ports[i] = ap;
4568 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
4569 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
4570 (ap->pio_mask << ATA_SHIFT_PIO);
4572 /* print per-port info to dmesg */
4573 printk(KERN_INFO "ata%u: %cATA max %s cmd 0x%lX ctl 0x%lX "
4574 "bmdma 0x%lX irq %lu\n",
4576 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
4577 ata_mode_string(xfer_mode_mask),
4578 ap->ioaddr.cmd_addr,
4579 ap->ioaddr.ctl_addr,
4580 ap->ioaddr.bmdma_addr,
4584 host_set->ops->irq_clear(ap);
4591 /* obtain irq, that is shared between channels */
4592 if (request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
4593 DRV_NAME, host_set))
4596 /* perform each probe synchronously */
4597 DPRINTK("probe begin\n");
4598 for (i = 0; i < count; i++) {
4599 struct ata_port *ap;
4602 ap = host_set->ports[i];
4604 DPRINTK("ata%u: bus probe begin\n", ap->id);
4605 rc = ata_bus_probe(ap);
4606 DPRINTK("ata%u: bus probe end\n", ap->id);
4609 /* FIXME: do something useful here?
4610 * Current libata behavior will
4611 * tear down everything when
4612 * the module is removed
4613 * or the h/w is unplugged.
4617 rc = scsi_add_host(ap->host, dev);
4619 printk(KERN_ERR "ata%u: scsi_add_host failed\n",
4621 /* FIXME: do something useful here */
4622 /* FIXME: handle unconditional calls to
4623 * scsi_scan_host and ata_host_remove, below,
4629 /* probes are done, now scan each port's disk(s) */
4630 DPRINTK("host probe begin\n");
4631 for (i = 0; i < count; i++) {
4632 struct ata_port *ap = host_set->ports[i];
4634 ata_scsi_scan_host(ap);
4637 dev_set_drvdata(dev, host_set);
4639 VPRINTK("EXIT, returning %u\n", ent->n_ports);
4640 return ent->n_ports; /* success */
4643 for (i = 0; i < count; i++) {
4644 ata_host_remove(host_set->ports[i], 1);
4645 scsi_host_put(host_set->ports[i]->host);
4649 VPRINTK("EXIT, returning 0\n");
4654 * ata_host_set_remove - PCI layer callback for device removal
4655 * @host_set: ATA host set that was removed
4657 * Unregister all objects associated with this host set. Free those
4661 * Inherited from calling layer (may sleep).
4664 void ata_host_set_remove(struct ata_host_set *host_set)
4666 struct ata_port *ap;
4669 for (i = 0; i < host_set->n_ports; i++) {
4670 ap = host_set->ports[i];
4671 scsi_remove_host(ap->host);
4674 free_irq(host_set->irq, host_set);
4676 for (i = 0; i < host_set->n_ports; i++) {
4677 ap = host_set->ports[i];
4679 ata_scsi_release(ap->host);
4681 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
4682 struct ata_ioports *ioaddr = &ap->ioaddr;
4684 if (ioaddr->cmd_addr == 0x1f0)
4685 release_region(0x1f0, 8);
4686 else if (ioaddr->cmd_addr == 0x170)
4687 release_region(0x170, 8);
4690 scsi_host_put(ap->host);
4693 if (host_set->ops->host_stop)
4694 host_set->ops->host_stop(host_set);
4700 * ata_scsi_release - SCSI layer callback hook for host unload
4701 * @host: libata host to be unloaded
4703 * Performs all duties necessary to shut down a libata port...
4704 * Kill port kthread, disable port, and release resources.
4707 * Inherited from SCSI layer.
4713 int ata_scsi_release(struct Scsi_Host *host)
4715 struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
4720 ap->ops->port_disable(ap);
4721 ata_host_remove(ap, 0);
4722 for (i = 0; i < ATA_MAX_DEVICES; i++)
4723 kfree(ap->device[i].id);
4730 * ata_std_ports - initialize ioaddr with standard port offsets.
4731 * @ioaddr: IO address structure to be initialized
4733 * Utility function which initializes data_addr, error_addr,
4734 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
4735 * device_addr, status_addr, and command_addr to standard offsets
4736 * relative to cmd_addr.
4738 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
4741 void ata_std_ports(struct ata_ioports *ioaddr)
4743 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
4744 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
4745 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
4746 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
4747 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
4748 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
4749 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
4750 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
4751 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
4752 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
4758 void ata_pci_host_stop (struct ata_host_set *host_set)
4760 struct pci_dev *pdev = to_pci_dev(host_set->dev);
4762 pci_iounmap(pdev, host_set->mmio_base);
4766 * ata_pci_remove_one - PCI layer callback for device removal
4767 * @pdev: PCI device that was removed
4769 * PCI layer indicates to libata via this hook that
4770 * hot-unplug or module unload event has occurred.
4771 * Handle this by unregistering all objects associated
4772 * with this PCI device. Free those objects. Then finally
4773 * release PCI resources and disable device.
4776 * Inherited from PCI layer (may sleep).
4779 void ata_pci_remove_one (struct pci_dev *pdev)
4781 struct device *dev = pci_dev_to_dev(pdev);
4782 struct ata_host_set *host_set = dev_get_drvdata(dev);
4784 ata_host_set_remove(host_set);
4785 pci_release_regions(pdev);
4786 pci_disable_device(pdev);
4787 dev_set_drvdata(dev, NULL);
4790 /* move to PCI subsystem */
4791 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
4793 unsigned long tmp = 0;
4795 switch (bits->width) {
4798 pci_read_config_byte(pdev, bits->reg, &tmp8);
4804 pci_read_config_word(pdev, bits->reg, &tmp16);
4810 pci_read_config_dword(pdev, bits->reg, &tmp32);
4821 return (tmp == bits->val) ? 1 : 0;
4824 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state)
4826 pci_save_state(pdev);
4827 pci_disable_device(pdev);
4828 pci_set_power_state(pdev, PCI_D3hot);
4832 int ata_pci_device_resume(struct pci_dev *pdev)
4834 pci_set_power_state(pdev, PCI_D0);
4835 pci_restore_state(pdev);
4836 pci_enable_device(pdev);
4837 pci_set_master(pdev);
4840 #endif /* CONFIG_PCI */
4843 static int __init ata_init(void)
4845 ata_wq = create_workqueue("ata");
4849 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
4853 static void __exit ata_exit(void)
4855 destroy_workqueue(ata_wq);
4858 module_init(ata_init);
4859 module_exit(ata_exit);
4861 static unsigned long ratelimit_time;
4862 static spinlock_t ata_ratelimit_lock = SPIN_LOCK_UNLOCKED;
4864 int ata_ratelimit(void)
4867 unsigned long flags;
4869 spin_lock_irqsave(&ata_ratelimit_lock, flags);
4871 if (time_after(jiffies, ratelimit_time)) {
4873 ratelimit_time = jiffies + (HZ/5);
4877 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
4883 * libata is essentially a library of internal helper functions for
4884 * low-level ATA host controller drivers. As such, the API/ABI is
4885 * likely to change as new drivers are added and updated.
4886 * Do not depend on ABI/API stability.
4889 EXPORT_SYMBOL_GPL(ata_std_bios_param);
4890 EXPORT_SYMBOL_GPL(ata_std_ports);
4891 EXPORT_SYMBOL_GPL(ata_device_add);
4892 EXPORT_SYMBOL_GPL(ata_host_set_remove);
4893 EXPORT_SYMBOL_GPL(ata_sg_init);
4894 EXPORT_SYMBOL_GPL(ata_sg_init_one);
4895 EXPORT_SYMBOL_GPL(__ata_qc_complete);
4896 EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
4897 EXPORT_SYMBOL_GPL(ata_eng_timeout);
4898 EXPORT_SYMBOL_GPL(ata_tf_load);
4899 EXPORT_SYMBOL_GPL(ata_tf_read);
4900 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
4901 EXPORT_SYMBOL_GPL(ata_std_dev_select);
4902 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
4903 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
4904 EXPORT_SYMBOL_GPL(ata_check_status);
4905 EXPORT_SYMBOL_GPL(ata_altstatus);
4906 EXPORT_SYMBOL_GPL(ata_exec_command);
4907 EXPORT_SYMBOL_GPL(ata_port_start);
4908 EXPORT_SYMBOL_GPL(ata_port_stop);
4909 EXPORT_SYMBOL_GPL(ata_host_stop);
4910 EXPORT_SYMBOL_GPL(ata_interrupt);
4911 EXPORT_SYMBOL_GPL(ata_qc_prep);
4912 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
4913 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
4914 EXPORT_SYMBOL_GPL(ata_bmdma_start);
4915 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
4916 EXPORT_SYMBOL_GPL(ata_bmdma_status);
4917 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
4918 EXPORT_SYMBOL_GPL(ata_port_probe);
4919 EXPORT_SYMBOL_GPL(sata_phy_reset);
4920 EXPORT_SYMBOL_GPL(__sata_phy_reset);
4921 EXPORT_SYMBOL_GPL(ata_bus_reset);
4922 EXPORT_SYMBOL_GPL(ata_std_probeinit);
4923 EXPORT_SYMBOL_GPL(ata_std_softreset);
4924 EXPORT_SYMBOL_GPL(sata_std_hardreset);
4925 EXPORT_SYMBOL_GPL(ata_std_postreset);
4926 EXPORT_SYMBOL_GPL(ata_std_probe_reset);
4927 EXPORT_SYMBOL_GPL(ata_drive_probe_reset);
4928 EXPORT_SYMBOL_GPL(ata_dev_revalidate);
4929 EXPORT_SYMBOL_GPL(ata_port_disable);
4930 EXPORT_SYMBOL_GPL(ata_ratelimit);
4931 EXPORT_SYMBOL_GPL(ata_busy_sleep);
4932 EXPORT_SYMBOL_GPL(ata_port_queue_task);
4933 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
4934 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
4935 EXPORT_SYMBOL_GPL(ata_scsi_error);
4936 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
4937 EXPORT_SYMBOL_GPL(ata_scsi_release);
4938 EXPORT_SYMBOL_GPL(ata_host_intr);
4939 EXPORT_SYMBOL_GPL(ata_dev_classify);
4940 EXPORT_SYMBOL_GPL(ata_id_string);
4941 EXPORT_SYMBOL_GPL(ata_id_c_string);
4942 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
4943 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
4944 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
4946 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
4947 EXPORT_SYMBOL_GPL(ata_timing_compute);
4948 EXPORT_SYMBOL_GPL(ata_timing_merge);
4951 EXPORT_SYMBOL_GPL(pci_test_config_bits);
4952 EXPORT_SYMBOL_GPL(ata_pci_host_stop);
4953 EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
4954 EXPORT_SYMBOL_GPL(ata_pci_init_one);
4955 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
4956 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
4957 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
4958 EXPORT_SYMBOL_GPL(ata_pci_default_filter);
4959 EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
4960 #endif /* CONFIG_PCI */
4962 EXPORT_SYMBOL_GPL(ata_device_suspend);
4963 EXPORT_SYMBOL_GPL(ata_device_resume);
4964 EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
4965 EXPORT_SYMBOL_GPL(ata_scsi_device_resume);