2 * libata-core.c - helper library for ATA
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
35 #include <linux/config.h>
36 #include <linux/kernel.h>
37 #include <linux/module.h>
38 #include <linux/pci.h>
39 #include <linux/init.h>
40 #include <linux/list.h>
42 #include <linux/highmem.h>
43 #include <linux/spinlock.h>
44 #include <linux/blkdev.h>
45 #include <linux/delay.h>
46 #include <linux/timer.h>
47 #include <linux/interrupt.h>
48 #include <linux/completion.h>
49 #include <linux/suspend.h>
50 #include <linux/workqueue.h>
51 #include <linux/jiffies.h>
52 #include <linux/scatterlist.h>
53 #include <scsi/scsi.h>
54 #include "scsi_priv.h"
55 #include <scsi/scsi_cmnd.h>
56 #include <scsi/scsi_host.h>
57 #include <linux/libata.h>
59 #include <asm/semaphore.h>
60 #include <asm/byteorder.h>
64 static unsigned int ata_dev_init_params(struct ata_port *ap,
65 struct ata_device *dev,
68 static void ata_set_mode(struct ata_port *ap);
69 static unsigned int ata_dev_set_xfermode(struct ata_port *ap,
70 struct ata_device *dev);
71 static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev);
73 static unsigned int ata_unique_id = 1;
74 static struct workqueue_struct *ata_wq;
76 int atapi_enabled = 1;
77 module_param(atapi_enabled, int, 0444);
78 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
81 module_param_named(fua, libata_fua, int, 0444);
82 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
84 MODULE_AUTHOR("Jeff Garzik");
85 MODULE_DESCRIPTION("Library module for ATA devices");
86 MODULE_LICENSE("GPL");
87 MODULE_VERSION(DRV_VERSION);
91 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
92 * @tf: Taskfile to convert
93 * @fis: Buffer into which data will output
94 * @pmp: Port multiplier port
96 * Converts a standard ATA taskfile to a Serial ATA
97 * FIS structure (Register - Host to Device).
100 * Inherited from caller.
103 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
105 fis[0] = 0x27; /* Register - Host to Device FIS */
106 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
107 bit 7 indicates Command FIS */
108 fis[2] = tf->command;
109 fis[3] = tf->feature;
116 fis[8] = tf->hob_lbal;
117 fis[9] = tf->hob_lbam;
118 fis[10] = tf->hob_lbah;
119 fis[11] = tf->hob_feature;
122 fis[13] = tf->hob_nsect;
133 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
134 * @fis: Buffer from which data will be input
135 * @tf: Taskfile to output
137 * Converts a serial ATA FIS structure to a standard ATA taskfile.
140 * Inherited from caller.
143 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
145 tf->command = fis[2]; /* status */
146 tf->feature = fis[3]; /* error */
153 tf->hob_lbal = fis[8];
154 tf->hob_lbam = fis[9];
155 tf->hob_lbah = fis[10];
158 tf->hob_nsect = fis[13];
161 static const u8 ata_rw_cmds[] = {
165 ATA_CMD_READ_MULTI_EXT,
166 ATA_CMD_WRITE_MULTI_EXT,
170 ATA_CMD_WRITE_MULTI_FUA_EXT,
174 ATA_CMD_PIO_READ_EXT,
175 ATA_CMD_PIO_WRITE_EXT,
188 ATA_CMD_WRITE_FUA_EXT
192 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
193 * @qc: command to examine and configure
195 * Examine the device configuration and tf->flags to calculate
196 * the proper read/write commands and protocol to use.
201 int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
203 struct ata_taskfile *tf = &qc->tf;
204 struct ata_device *dev = qc->dev;
207 int index, fua, lba48, write;
209 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
210 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
211 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
213 if (dev->flags & ATA_DFLAG_PIO) {
214 tf->protocol = ATA_PROT_PIO;
215 index = dev->multi_count ? 0 : 8;
216 } else if (lba48 && (qc->ap->flags & ATA_FLAG_PIO_LBA48)) {
217 /* Unable to use DMA due to host limitation */
218 tf->protocol = ATA_PROT_PIO;
219 index = dev->multi_count ? 0 : 8;
221 tf->protocol = ATA_PROT_DMA;
225 cmd = ata_rw_cmds[index + fua + lba48 + write];
234 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
235 * @pio_mask: pio_mask
236 * @mwdma_mask: mwdma_mask
237 * @udma_mask: udma_mask
239 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
240 * unsigned int xfer_mask.
248 static unsigned int ata_pack_xfermask(unsigned int pio_mask,
249 unsigned int mwdma_mask,
250 unsigned int udma_mask)
252 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
253 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
254 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
258 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
259 * @xfer_mask: xfer_mask to unpack
260 * @pio_mask: resulting pio_mask
261 * @mwdma_mask: resulting mwdma_mask
262 * @udma_mask: resulting udma_mask
264 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
265 * Any NULL distination masks will be ignored.
267 static void ata_unpack_xfermask(unsigned int xfer_mask,
268 unsigned int *pio_mask,
269 unsigned int *mwdma_mask,
270 unsigned int *udma_mask)
273 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
275 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
277 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
280 static const struct ata_xfer_ent {
284 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
285 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
286 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
291 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
292 * @xfer_mask: xfer_mask of interest
294 * Return matching XFER_* value for @xfer_mask. Only the highest
295 * bit of @xfer_mask is considered.
301 * Matching XFER_* value, 0 if no match found.
303 static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
305 int highbit = fls(xfer_mask) - 1;
306 const struct ata_xfer_ent *ent;
308 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
309 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
310 return ent->base + highbit - ent->shift;
315 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
316 * @xfer_mode: XFER_* of interest
318 * Return matching xfer_mask for @xfer_mode.
324 * Matching xfer_mask, 0 if no match found.
326 static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
328 const struct ata_xfer_ent *ent;
330 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
331 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
332 return 1 << (ent->shift + xfer_mode - ent->base);
337 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
338 * @xfer_mode: XFER_* of interest
340 * Return matching xfer_shift for @xfer_mode.
346 * Matching xfer_shift, -1 if no match found.
348 static int ata_xfer_mode2shift(unsigned int xfer_mode)
350 const struct ata_xfer_ent *ent;
352 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
353 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
359 * ata_mode_string - convert xfer_mask to string
360 * @xfer_mask: mask of bits supported; only highest bit counts.
362 * Determine string which represents the highest speed
363 * (highest bit in @modemask).
369 * Constant C string representing highest speed listed in
370 * @mode_mask, or the constant C string "<n/a>".
372 static const char *ata_mode_string(unsigned int xfer_mask)
374 static const char * const xfer_mode_str[] = {
394 highbit = fls(xfer_mask) - 1;
395 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
396 return xfer_mode_str[highbit];
400 static void ata_dev_disable(struct ata_port *ap, struct ata_device *dev)
402 if (ata_dev_present(dev)) {
403 printk(KERN_WARNING "ata%u: dev %u disabled\n",
410 * ata_pio_devchk - PATA device presence detection
411 * @ap: ATA channel to examine
412 * @device: Device to examine (starting at zero)
414 * This technique was originally described in
415 * Hale Landis's ATADRVR (www.ata-atapi.com), and
416 * later found its way into the ATA/ATAPI spec.
418 * Write a pattern to the ATA shadow registers,
419 * and if a device is present, it will respond by
420 * correctly storing and echoing back the
421 * ATA shadow register contents.
427 static unsigned int ata_pio_devchk(struct ata_port *ap,
430 struct ata_ioports *ioaddr = &ap->ioaddr;
433 ap->ops->dev_select(ap, device);
435 outb(0x55, ioaddr->nsect_addr);
436 outb(0xaa, ioaddr->lbal_addr);
438 outb(0xaa, ioaddr->nsect_addr);
439 outb(0x55, ioaddr->lbal_addr);
441 outb(0x55, ioaddr->nsect_addr);
442 outb(0xaa, ioaddr->lbal_addr);
444 nsect = inb(ioaddr->nsect_addr);
445 lbal = inb(ioaddr->lbal_addr);
447 if ((nsect == 0x55) && (lbal == 0xaa))
448 return 1; /* we found a device */
450 return 0; /* nothing found */
454 * ata_mmio_devchk - PATA device presence detection
455 * @ap: ATA channel to examine
456 * @device: Device to examine (starting at zero)
458 * This technique was originally described in
459 * Hale Landis's ATADRVR (www.ata-atapi.com), and
460 * later found its way into the ATA/ATAPI spec.
462 * Write a pattern to the ATA shadow registers,
463 * and if a device is present, it will respond by
464 * correctly storing and echoing back the
465 * ATA shadow register contents.
471 static unsigned int ata_mmio_devchk(struct ata_port *ap,
474 struct ata_ioports *ioaddr = &ap->ioaddr;
477 ap->ops->dev_select(ap, device);
479 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
480 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
482 writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
483 writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
485 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
486 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
488 nsect = readb((void __iomem *) ioaddr->nsect_addr);
489 lbal = readb((void __iomem *) ioaddr->lbal_addr);
491 if ((nsect == 0x55) && (lbal == 0xaa))
492 return 1; /* we found a device */
494 return 0; /* nothing found */
498 * ata_devchk - PATA device presence detection
499 * @ap: ATA channel to examine
500 * @device: Device to examine (starting at zero)
502 * Dispatch ATA device presence detection, depending
503 * on whether we are using PIO or MMIO to talk to the
504 * ATA shadow registers.
510 static unsigned int ata_devchk(struct ata_port *ap,
513 if (ap->flags & ATA_FLAG_MMIO)
514 return ata_mmio_devchk(ap, device);
515 return ata_pio_devchk(ap, device);
519 * ata_dev_classify - determine device type based on ATA-spec signature
520 * @tf: ATA taskfile register set for device to be identified
522 * Determine from taskfile register contents whether a device is
523 * ATA or ATAPI, as per "Signature and persistence" section
524 * of ATA/PI spec (volume 1, sect 5.14).
530 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
531 * the event of failure.
534 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
536 /* Apple's open source Darwin code hints that some devices only
537 * put a proper signature into the LBA mid/high registers,
538 * So, we only check those. It's sufficient for uniqueness.
541 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
542 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
543 DPRINTK("found ATA device by sig\n");
547 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
548 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
549 DPRINTK("found ATAPI device by sig\n");
550 return ATA_DEV_ATAPI;
553 DPRINTK("unknown device\n");
554 return ATA_DEV_UNKNOWN;
558 * ata_dev_try_classify - Parse returned ATA device signature
559 * @ap: ATA channel to examine
560 * @device: Device to examine (starting at zero)
561 * @r_err: Value of error register on completion
563 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
564 * an ATA/ATAPI-defined set of values is placed in the ATA
565 * shadow registers, indicating the results of device detection
568 * Select the ATA device, and read the values from the ATA shadow
569 * registers. Then parse according to the Error register value,
570 * and the spec-defined values examined by ata_dev_classify().
576 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
580 ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
582 struct ata_taskfile tf;
586 ap->ops->dev_select(ap, device);
588 memset(&tf, 0, sizeof(tf));
590 ap->ops->tf_read(ap, &tf);
595 /* see if device passed diags */
598 else if ((device == 0) && (err == 0x81))
603 /* determine if device is ATA or ATAPI */
604 class = ata_dev_classify(&tf);
606 if (class == ATA_DEV_UNKNOWN)
608 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
614 * ata_id_string - Convert IDENTIFY DEVICE page into string
615 * @id: IDENTIFY DEVICE results we will examine
616 * @s: string into which data is output
617 * @ofs: offset into identify device page
618 * @len: length of string to return. must be an even number.
620 * The strings in the IDENTIFY DEVICE page are broken up into
621 * 16-bit chunks. Run through the string, and output each
622 * 8-bit chunk linearly, regardless of platform.
628 void ata_id_string(const u16 *id, unsigned char *s,
629 unsigned int ofs, unsigned int len)
648 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
649 * @id: IDENTIFY DEVICE results we will examine
650 * @s: string into which data is output
651 * @ofs: offset into identify device page
652 * @len: length of string to return. must be an odd number.
654 * This function is identical to ata_id_string except that it
655 * trims trailing spaces and terminates the resulting string with
656 * null. @len must be actual maximum length (even number) + 1.
661 void ata_id_c_string(const u16 *id, unsigned char *s,
662 unsigned int ofs, unsigned int len)
668 ata_id_string(id, s, ofs, len - 1);
670 p = s + strnlen(s, len - 1);
671 while (p > s && p[-1] == ' ')
676 static u64 ata_id_n_sectors(const u16 *id)
678 if (ata_id_has_lba(id)) {
679 if (ata_id_has_lba48(id))
680 return ata_id_u64(id, 100);
682 return ata_id_u32(id, 60);
684 if (ata_id_current_chs_valid(id))
685 return ata_id_u32(id, 57);
687 return id[1] * id[3] * id[6];
692 * ata_noop_dev_select - Select device 0/1 on ATA bus
693 * @ap: ATA channel to manipulate
694 * @device: ATA device (numbered from zero) to select
696 * This function performs no actual function.
698 * May be used as the dev_select() entry in ata_port_operations.
703 void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
709 * ata_std_dev_select - Select device 0/1 on ATA bus
710 * @ap: ATA channel to manipulate
711 * @device: ATA device (numbered from zero) to select
713 * Use the method defined in the ATA specification to
714 * make either device 0, or device 1, active on the
715 * ATA channel. Works with both PIO and MMIO.
717 * May be used as the dev_select() entry in ata_port_operations.
723 void ata_std_dev_select (struct ata_port *ap, unsigned int device)
728 tmp = ATA_DEVICE_OBS;
730 tmp = ATA_DEVICE_OBS | ATA_DEV1;
732 if (ap->flags & ATA_FLAG_MMIO) {
733 writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
735 outb(tmp, ap->ioaddr.device_addr);
737 ata_pause(ap); /* needed; also flushes, for mmio */
741 * ata_dev_select - Select device 0/1 on ATA bus
742 * @ap: ATA channel to manipulate
743 * @device: ATA device (numbered from zero) to select
744 * @wait: non-zero to wait for Status register BSY bit to clear
745 * @can_sleep: non-zero if context allows sleeping
747 * Use the method defined in the ATA specification to
748 * make either device 0, or device 1, active on the
751 * This is a high-level version of ata_std_dev_select(),
752 * which additionally provides the services of inserting
753 * the proper pauses and status polling, where needed.
759 void ata_dev_select(struct ata_port *ap, unsigned int device,
760 unsigned int wait, unsigned int can_sleep)
762 VPRINTK("ENTER, ata%u: device %u, wait %u\n",
763 ap->id, device, wait);
768 ap->ops->dev_select(ap, device);
771 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
778 * ata_dump_id - IDENTIFY DEVICE info debugging output
779 * @id: IDENTIFY DEVICE page to dump
781 * Dump selected 16-bit words from the given IDENTIFY DEVICE
788 static inline void ata_dump_id(const u16 *id)
790 DPRINTK("49==0x%04x "
800 DPRINTK("80==0x%04x "
810 DPRINTK("88==0x%04x "
817 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
818 * @id: IDENTIFY data to compute xfer mask from
820 * Compute the xfermask for this device. This is not as trivial
821 * as it seems if we must consider early devices correctly.
823 * FIXME: pre IDE drive timing (do we care ?).
831 static unsigned int ata_id_xfermask(const u16 *id)
833 unsigned int pio_mask, mwdma_mask, udma_mask;
835 /* Usual case. Word 53 indicates word 64 is valid */
836 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
837 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
841 /* If word 64 isn't valid then Word 51 high byte holds
842 * the PIO timing number for the maximum. Turn it into
845 pio_mask = (2 << (id[ATA_ID_OLD_PIO_MODES] & 0xFF)) - 1 ;
847 /* But wait.. there's more. Design your standards by
848 * committee and you too can get a free iordy field to
849 * process. However its the speeds not the modes that
850 * are supported... Note drivers using the timing API
851 * will get this right anyway
855 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
858 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
859 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
861 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
865 * ata_port_queue_task - Queue port_task
866 * @ap: The ata_port to queue port_task for
868 * Schedule @fn(@data) for execution after @delay jiffies using
869 * port_task. There is one port_task per port and it's the
870 * user(low level driver)'s responsibility to make sure that only
871 * one task is active at any given time.
873 * libata core layer takes care of synchronization between
874 * port_task and EH. ata_port_queue_task() may be ignored for EH
878 * Inherited from caller.
880 void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
885 if (ap->flags & ATA_FLAG_FLUSH_PORT_TASK)
888 PREPARE_WORK(&ap->port_task, fn, data);
891 rc = queue_work(ata_wq, &ap->port_task);
893 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
895 /* rc == 0 means that another user is using port task */
900 * ata_port_flush_task - Flush port_task
901 * @ap: The ata_port to flush port_task for
903 * After this function completes, port_task is guranteed not to
904 * be running or scheduled.
907 * Kernel thread context (may sleep)
909 void ata_port_flush_task(struct ata_port *ap)
915 spin_lock_irqsave(&ap->host_set->lock, flags);
916 ap->flags |= ATA_FLAG_FLUSH_PORT_TASK;
917 spin_unlock_irqrestore(&ap->host_set->lock, flags);
919 DPRINTK("flush #1\n");
920 flush_workqueue(ata_wq);
923 * At this point, if a task is running, it's guaranteed to see
924 * the FLUSH flag; thus, it will never queue pio tasks again.
927 if (!cancel_delayed_work(&ap->port_task)) {
928 DPRINTK("flush #2\n");
929 flush_workqueue(ata_wq);
932 spin_lock_irqsave(&ap->host_set->lock, flags);
933 ap->flags &= ~ATA_FLAG_FLUSH_PORT_TASK;
934 spin_unlock_irqrestore(&ap->host_set->lock, flags);
939 void ata_qc_complete_internal(struct ata_queued_cmd *qc)
941 struct completion *waiting = qc->private_data;
943 qc->ap->ops->tf_read(qc->ap, &qc->tf);
948 * ata_exec_internal - execute libata internal command
949 * @ap: Port to which the command is sent
950 * @dev: Device to which the command is sent
951 * @tf: Taskfile registers for the command and the result
952 * @dma_dir: Data tranfer direction of the command
953 * @buf: Data buffer of the command
954 * @buflen: Length of data buffer
956 * Executes libata internal command with timeout. @tf contains
957 * command on entry and result on return. Timeout and error
958 * conditions are reported via return value. No recovery action
959 * is taken after a command times out. It's caller's duty to
960 * clean up after timeout.
963 * None. Should be called with kernel context, might sleep.
967 ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
968 struct ata_taskfile *tf,
969 int dma_dir, void *buf, unsigned int buflen)
971 u8 command = tf->command;
972 struct ata_queued_cmd *qc;
973 DECLARE_COMPLETION(wait);
975 unsigned int err_mask;
977 spin_lock_irqsave(&ap->host_set->lock, flags);
979 qc = ata_qc_new_init(ap, dev);
983 qc->dma_dir = dma_dir;
984 if (dma_dir != DMA_NONE) {
985 ata_sg_init_one(qc, buf, buflen);
986 qc->nsect = buflen / ATA_SECT_SIZE;
989 qc->private_data = &wait;
990 qc->complete_fn = ata_qc_complete_internal;
994 spin_unlock_irqrestore(&ap->host_set->lock, flags);
996 if (!wait_for_completion_timeout(&wait, ATA_TMOUT_INTERNAL)) {
997 ata_port_flush_task(ap);
999 spin_lock_irqsave(&ap->host_set->lock, flags);
1001 /* We're racing with irq here. If we lose, the
1002 * following test prevents us from completing the qc
1003 * again. If completion irq occurs after here but
1004 * before the caller cleans up, it will result in a
1005 * spurious interrupt. We can live with that.
1007 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1008 qc->err_mask = AC_ERR_TIMEOUT;
1009 ata_qc_complete(qc);
1010 printk(KERN_WARNING "ata%u: qc timeout (cmd 0x%x)\n",
1014 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1018 err_mask = qc->err_mask;
1022 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1023 * Until those drivers are fixed, we detect the condition
1024 * here, fail the command with AC_ERR_SYSTEM and reenable the
1027 * Note that this doesn't change any behavior as internal
1028 * command failure results in disabling the device in the
1029 * higher layer for LLDDs without new reset/EH callbacks.
1031 * Kill the following code as soon as those drivers are fixed.
1033 if (ap->flags & ATA_FLAG_PORT_DISABLED) {
1034 err_mask |= AC_ERR_SYSTEM;
1042 * ata_pio_need_iordy - check if iordy needed
1045 * Check if the current speed of the device requires IORDY. Used
1046 * by various controllers for chip configuration.
1049 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1052 int speed = adev->pio_mode - XFER_PIO_0;
1059 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1061 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1062 pio = adev->id[ATA_ID_EIDE_PIO];
1063 /* Is the speed faster than the drive allows non IORDY ? */
1065 /* This is cycle times not frequency - watch the logic! */
1066 if (pio > 240) /* PIO2 is 240nS per cycle */
1075 * ata_dev_read_id - Read ID data from the specified device
1076 * @ap: port on which target device resides
1077 * @dev: target device
1078 * @p_class: pointer to class of the target device (may be changed)
1079 * @post_reset: is this read ID post-reset?
1080 * @p_id: read IDENTIFY page (newly allocated)
1082 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1083 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1084 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1085 * for pre-ATA4 drives.
1088 * Kernel thread context (may sleep)
1091 * 0 on success, -errno otherwise.
1093 static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev,
1094 unsigned int *p_class, int post_reset, u16 **p_id)
1096 unsigned int class = *p_class;
1097 struct ata_taskfile tf;
1098 unsigned int err_mask = 0;
1103 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
1105 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1107 id = kmalloc(sizeof(id[0]) * ATA_ID_WORDS, GFP_KERNEL);
1110 reason = "out of memory";
1115 ata_tf_init(ap, &tf, dev->devno);
1119 tf.command = ATA_CMD_ID_ATA;
1122 tf.command = ATA_CMD_ID_ATAPI;
1126 reason = "unsupported class";
1130 tf.protocol = ATA_PROT_PIO;
1132 err_mask = ata_exec_internal(ap, dev, &tf, DMA_FROM_DEVICE,
1133 id, sizeof(id[0]) * ATA_ID_WORDS);
1136 reason = "I/O error";
1140 swap_buf_le16(id, ATA_ID_WORDS);
1143 if ((class == ATA_DEV_ATA) != (ata_id_is_ata(id) | ata_id_is_cfa(id))) {
1145 reason = "device reports illegal type";
1149 if (post_reset && class == ATA_DEV_ATA) {
1151 * The exact sequence expected by certain pre-ATA4 drives is:
1154 * INITIALIZE DEVICE PARAMETERS
1156 * Some drives were very specific about that exact sequence.
1158 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1159 err_mask = ata_dev_init_params(ap, dev, id[3], id[6]);
1162 reason = "INIT_DEV_PARAMS failed";
1166 /* current CHS translation info (id[53-58]) might be
1167 * changed. reread the identify device info.
1179 printk(KERN_WARNING "ata%u: dev %u failed to IDENTIFY (%s)\n",
1180 ap->id, dev->devno, reason);
1185 static inline u8 ata_dev_knobble(const struct ata_port *ap,
1186 struct ata_device *dev)
1188 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1192 * ata_dev_configure - Configure the specified ATA/ATAPI device
1193 * @ap: Port on which target device resides
1194 * @dev: Target device to configure
1195 * @print_info: Enable device info printout
1197 * Configure @dev according to @dev->id. Generic and low-level
1198 * driver specific fixups are also applied.
1201 * Kernel thread context (may sleep)
1204 * 0 on success, -errno otherwise
1206 static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1209 const u16 *id = dev->id;
1210 unsigned int xfer_mask;
1213 if (!ata_dev_present(dev)) {
1214 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
1215 ap->id, dev->devno);
1219 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
1221 /* print device capabilities */
1223 printk(KERN_DEBUG "ata%u: dev %u cfg 49:%04x 82:%04x 83:%04x "
1224 "84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
1225 ap->id, dev->devno, id[49], id[82], id[83],
1226 id[84], id[85], id[86], id[87], id[88]);
1228 /* initialize to-be-configured parameters */
1230 dev->max_sectors = 0;
1238 * common ATA, ATAPI feature tests
1241 /* find max transfer mode; for printk only */
1242 xfer_mask = ata_id_xfermask(id);
1246 /* ATA-specific feature tests */
1247 if (dev->class == ATA_DEV_ATA) {
1248 dev->n_sectors = ata_id_n_sectors(id);
1250 if (ata_id_has_lba(id)) {
1251 const char *lba_desc;
1254 dev->flags |= ATA_DFLAG_LBA;
1255 if (ata_id_has_lba48(id)) {
1256 dev->flags |= ATA_DFLAG_LBA48;
1260 /* print device info to dmesg */
1262 printk(KERN_INFO "ata%u: dev %u ATA-%d, "
1263 "max %s, %Lu sectors: %s\n",
1265 ata_id_major_version(id),
1266 ata_mode_string(xfer_mask),
1267 (unsigned long long)dev->n_sectors,
1272 /* Default translation */
1273 dev->cylinders = id[1];
1275 dev->sectors = id[6];
1277 if (ata_id_current_chs_valid(id)) {
1278 /* Current CHS translation is valid. */
1279 dev->cylinders = id[54];
1280 dev->heads = id[55];
1281 dev->sectors = id[56];
1284 /* print device info to dmesg */
1286 printk(KERN_INFO "ata%u: dev %u ATA-%d, "
1287 "max %s, %Lu sectors: CHS %u/%u/%u\n",
1289 ata_id_major_version(id),
1290 ata_mode_string(xfer_mask),
1291 (unsigned long long)dev->n_sectors,
1292 dev->cylinders, dev->heads, dev->sectors);
1298 /* ATAPI-specific feature tests */
1299 else if (dev->class == ATA_DEV_ATAPI) {
1300 rc = atapi_cdb_len(id);
1301 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1302 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id);
1306 dev->cdb_len = (unsigned int) rc;
1308 /* print device info to dmesg */
1310 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n",
1311 ap->id, dev->devno, ata_mode_string(xfer_mask));
1314 ap->host->max_cmd_len = 0;
1315 for (i = 0; i < ATA_MAX_DEVICES; i++)
1316 ap->host->max_cmd_len = max_t(unsigned int,
1317 ap->host->max_cmd_len,
1318 ap->device[i].cdb_len);
1320 /* limit bridge transfers to udma5, 200 sectors */
1321 if (ata_dev_knobble(ap, dev)) {
1323 printk(KERN_INFO "ata%u(%u): applying bridge limits\n",
1324 ap->id, dev->devno);
1325 dev->udma_mask &= ATA_UDMA5;
1326 dev->max_sectors = ATA_MAX_SECTORS;
1329 if (ap->ops->dev_config)
1330 ap->ops->dev_config(ap, dev);
1332 DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap));
1336 DPRINTK("EXIT, err\n");
1341 * ata_bus_probe - Reset and probe ATA bus
1344 * Master ATA bus probing function. Initiates a hardware-dependent
1345 * bus reset, then attempts to identify any devices found on
1349 * PCI/etc. bus probe sem.
1352 * Zero on success, non-zero on error.
1355 static int ata_bus_probe(struct ata_port *ap)
1357 unsigned int classes[ATA_MAX_DEVICES];
1358 unsigned int i, rc, found = 0;
1362 /* reset and determine device classes */
1363 for (i = 0; i < ATA_MAX_DEVICES; i++)
1364 classes[i] = ATA_DEV_UNKNOWN;
1366 if (ap->ops->probe_reset) {
1367 rc = ap->ops->probe_reset(ap, classes);
1369 printk("ata%u: reset failed (errno=%d)\n", ap->id, rc);
1373 ap->ops->phy_reset(ap);
1375 if (!(ap->flags & ATA_FLAG_PORT_DISABLED))
1376 for (i = 0; i < ATA_MAX_DEVICES; i++)
1377 classes[i] = ap->device[i].class;
1382 for (i = 0; i < ATA_MAX_DEVICES; i++)
1383 if (classes[i] == ATA_DEV_UNKNOWN)
1384 classes[i] = ATA_DEV_NONE;
1386 /* read IDENTIFY page and configure devices */
1387 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1388 struct ata_device *dev = &ap->device[i];
1390 dev->class = classes[i];
1392 if (!ata_dev_present(dev))
1395 WARN_ON(dev->id != NULL);
1396 if (ata_dev_read_id(ap, dev, &dev->class, 1, &dev->id)) {
1397 dev->class = ATA_DEV_NONE;
1401 if (ata_dev_configure(ap, dev, 1)) {
1402 ata_dev_disable(ap, dev);
1410 goto err_out_disable;
1412 if (ap->ops->set_mode)
1413 ap->ops->set_mode(ap);
1417 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1418 goto err_out_disable;
1423 ap->ops->port_disable(ap);
1428 * ata_port_probe - Mark port as enabled
1429 * @ap: Port for which we indicate enablement
1431 * Modify @ap data structure such that the system
1432 * thinks that the entire port is enabled.
1434 * LOCKING: host_set lock, or some other form of
1438 void ata_port_probe(struct ata_port *ap)
1440 ap->flags &= ~ATA_FLAG_PORT_DISABLED;
1444 * sata_print_link_status - Print SATA link status
1445 * @ap: SATA port to printk link status about
1447 * This function prints link speed and status of a SATA link.
1452 static void sata_print_link_status(struct ata_port *ap)
1457 if (!ap->ops->scr_read)
1460 sstatus = scr_read(ap, SCR_STATUS);
1462 if (sata_dev_present(ap)) {
1463 tmp = (sstatus >> 4) & 0xf;
1466 else if (tmp & (1 << 1))
1469 speed = "<unknown>";
1470 printk(KERN_INFO "ata%u: SATA link up %s Gbps (SStatus %X)\n",
1471 ap->id, speed, sstatus);
1473 printk(KERN_INFO "ata%u: SATA link down (SStatus %X)\n",
1479 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1480 * @ap: SATA port associated with target SATA PHY.
1482 * This function issues commands to standard SATA Sxxx
1483 * PHY registers, to wake up the phy (and device), and
1484 * clear any reset condition.
1487 * PCI/etc. bus probe sem.
1490 void __sata_phy_reset(struct ata_port *ap)
1493 unsigned long timeout = jiffies + (HZ * 5);
1495 if (ap->flags & ATA_FLAG_SATA_RESET) {
1496 /* issue phy wake/reset */
1497 scr_write_flush(ap, SCR_CONTROL, 0x301);
1498 /* Couldn't find anything in SATA I/II specs, but
1499 * AHCI-1.1 10.4.2 says at least 1 ms. */
1502 scr_write_flush(ap, SCR_CONTROL, 0x300); /* phy wake/clear reset */
1504 /* wait for phy to become ready, if necessary */
1507 sstatus = scr_read(ap, SCR_STATUS);
1508 if ((sstatus & 0xf) != 1)
1510 } while (time_before(jiffies, timeout));
1512 /* print link status */
1513 sata_print_link_status(ap);
1515 /* TODO: phy layer with polling, timeouts, etc. */
1516 if (sata_dev_present(ap))
1519 ata_port_disable(ap);
1521 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1524 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1525 ata_port_disable(ap);
1529 ap->cbl = ATA_CBL_SATA;
1533 * sata_phy_reset - Reset SATA bus.
1534 * @ap: SATA port associated with target SATA PHY.
1536 * This function resets the SATA bus, and then probes
1537 * the bus for devices.
1540 * PCI/etc. bus probe sem.
1543 void sata_phy_reset(struct ata_port *ap)
1545 __sata_phy_reset(ap);
1546 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1552 * ata_dev_pair - return other device on cable
1556 * Obtain the other device on the same cable, or if none is
1557 * present NULL is returned
1560 struct ata_device *ata_dev_pair(struct ata_port *ap, struct ata_device *adev)
1562 struct ata_device *pair = &ap->device[1 - adev->devno];
1563 if (!ata_dev_present(pair))
1569 * ata_port_disable - Disable port.
1570 * @ap: Port to be disabled.
1572 * Modify @ap data structure such that the system
1573 * thinks that the entire port is disabled, and should
1574 * never attempt to probe or communicate with devices
1577 * LOCKING: host_set lock, or some other form of
1581 void ata_port_disable(struct ata_port *ap)
1583 ap->device[0].class = ATA_DEV_NONE;
1584 ap->device[1].class = ATA_DEV_NONE;
1585 ap->flags |= ATA_FLAG_PORT_DISABLED;
1589 * This mode timing computation functionality is ported over from
1590 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
1593 * PIO 0-5, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
1594 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
1595 * for PIO 5, which is a nonstandard extension and UDMA6, which
1596 * is currently supported only by Maxtor drives.
1599 static const struct ata_timing ata_timing[] = {
1601 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
1602 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
1603 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
1604 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
1606 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
1607 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
1608 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
1610 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
1612 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
1613 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
1614 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
1616 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
1617 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
1618 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
1620 /* { XFER_PIO_5, 20, 50, 30, 100, 50, 30, 100, 0 }, */
1621 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
1622 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
1624 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
1625 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
1626 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
1628 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
1633 #define ENOUGH(v,unit) (((v)-1)/(unit)+1)
1634 #define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
1636 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
1638 q->setup = EZ(t->setup * 1000, T);
1639 q->act8b = EZ(t->act8b * 1000, T);
1640 q->rec8b = EZ(t->rec8b * 1000, T);
1641 q->cyc8b = EZ(t->cyc8b * 1000, T);
1642 q->active = EZ(t->active * 1000, T);
1643 q->recover = EZ(t->recover * 1000, T);
1644 q->cycle = EZ(t->cycle * 1000, T);
1645 q->udma = EZ(t->udma * 1000, UT);
1648 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
1649 struct ata_timing *m, unsigned int what)
1651 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
1652 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
1653 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
1654 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
1655 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
1656 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
1657 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
1658 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
1661 static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
1663 const struct ata_timing *t;
1665 for (t = ata_timing; t->mode != speed; t++)
1666 if (t->mode == 0xFF)
1671 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1672 struct ata_timing *t, int T, int UT)
1674 const struct ata_timing *s;
1675 struct ata_timing p;
1681 if (!(s = ata_timing_find_mode(speed)))
1684 memcpy(t, s, sizeof(*s));
1687 * If the drive is an EIDE drive, it can tell us it needs extended
1688 * PIO/MW_DMA cycle timing.
1691 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
1692 memset(&p, 0, sizeof(p));
1693 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
1694 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
1695 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
1696 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
1697 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
1699 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
1703 * Convert the timing to bus clock counts.
1706 ata_timing_quantize(t, t, T, UT);
1709 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
1710 * S.M.A.R.T * and some other commands. We have to ensure that the
1711 * DMA cycle timing is slower/equal than the fastest PIO timing.
1714 if (speed > XFER_PIO_4) {
1715 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
1716 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
1720 * Lengthen active & recovery time so that cycle time is correct.
1723 if (t->act8b + t->rec8b < t->cyc8b) {
1724 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
1725 t->rec8b = t->cyc8b - t->act8b;
1728 if (t->active + t->recover < t->cycle) {
1729 t->active += (t->cycle - (t->active + t->recover)) / 2;
1730 t->recover = t->cycle - t->active;
1736 static int ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev)
1738 unsigned int err_mask;
1741 if (dev->xfer_shift == ATA_SHIFT_PIO)
1742 dev->flags |= ATA_DFLAG_PIO;
1744 err_mask = ata_dev_set_xfermode(ap, dev);
1747 "ata%u: failed to set xfermode (err_mask=0x%x)\n",
1752 rc = ata_dev_revalidate(ap, dev, 0);
1755 "ata%u: failed to revalidate after set xfermode\n",
1760 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
1761 dev->xfer_shift, (int)dev->xfer_mode);
1763 printk(KERN_INFO "ata%u: dev %u configured for %s\n",
1765 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
1769 static int ata_host_set_pio(struct ata_port *ap)
1773 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1774 struct ata_device *dev = &ap->device[i];
1776 if (!ata_dev_present(dev))
1779 if (!dev->pio_mode) {
1780 printk(KERN_WARNING "ata%u: no PIO support for device %d.\n", ap->id, i);
1784 dev->xfer_mode = dev->pio_mode;
1785 dev->xfer_shift = ATA_SHIFT_PIO;
1786 if (ap->ops->set_piomode)
1787 ap->ops->set_piomode(ap, dev);
1793 static void ata_host_set_dma(struct ata_port *ap)
1797 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1798 struct ata_device *dev = &ap->device[i];
1800 if (!ata_dev_present(dev) || !dev->dma_mode)
1803 dev->xfer_mode = dev->dma_mode;
1804 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
1805 if (ap->ops->set_dmamode)
1806 ap->ops->set_dmamode(ap, dev);
1811 * ata_set_mode - Program timings and issue SET FEATURES - XFER
1812 * @ap: port on which timings will be programmed
1814 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.).
1817 * PCI/etc. bus probe sem.
1819 static void ata_set_mode(struct ata_port *ap)
1821 int i, rc, used_dma = 0;
1823 /* step 1: calculate xfer_mask */
1824 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1825 struct ata_device *dev = &ap->device[i];
1826 unsigned int pio_mask, dma_mask;
1828 if (!ata_dev_present(dev))
1831 ata_dev_xfermask(ap, dev);
1833 /* TODO: let LLDD filter dev->*_mask here */
1835 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
1836 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
1837 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
1838 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
1844 /* step 2: always set host PIO timings */
1845 rc = ata_host_set_pio(ap);
1849 /* step 3: set host DMA timings */
1850 ata_host_set_dma(ap);
1852 /* step 4: update devices' xfer mode */
1853 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1854 struct ata_device *dev = &ap->device[i];
1856 if (!ata_dev_present(dev))
1859 if (ata_dev_set_mode(ap, dev))
1864 * Record simplex status. If we selected DMA then the other
1865 * host channels are not permitted to do so.
1868 if (used_dma && (ap->host_set->flags & ATA_HOST_SIMPLEX))
1869 ap->host_set->simplex_claimed = 1;
1872 * Chip specific finalisation
1874 if (ap->ops->post_set_mode)
1875 ap->ops->post_set_mode(ap);
1880 ata_port_disable(ap);
1884 * ata_tf_to_host - issue ATA taskfile to host controller
1885 * @ap: port to which command is being issued
1886 * @tf: ATA taskfile register set
1888 * Issues ATA taskfile register set to ATA host controller,
1889 * with proper synchronization with interrupt handler and
1893 * spin_lock_irqsave(host_set lock)
1896 static inline void ata_tf_to_host(struct ata_port *ap,
1897 const struct ata_taskfile *tf)
1899 ap->ops->tf_load(ap, tf);
1900 ap->ops->exec_command(ap, tf);
1904 * ata_busy_sleep - sleep until BSY clears, or timeout
1905 * @ap: port containing status register to be polled
1906 * @tmout_pat: impatience timeout
1907 * @tmout: overall timeout
1909 * Sleep until ATA Status register bit BSY clears,
1910 * or a timeout occurs.
1915 unsigned int ata_busy_sleep (struct ata_port *ap,
1916 unsigned long tmout_pat, unsigned long tmout)
1918 unsigned long timer_start, timeout;
1921 status = ata_busy_wait(ap, ATA_BUSY, 300);
1922 timer_start = jiffies;
1923 timeout = timer_start + tmout_pat;
1924 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1926 status = ata_busy_wait(ap, ATA_BUSY, 3);
1929 if (status & ATA_BUSY)
1930 printk(KERN_WARNING "ata%u is slow to respond, "
1931 "please be patient\n", ap->id);
1933 timeout = timer_start + tmout;
1934 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1936 status = ata_chk_status(ap);
1939 if (status & ATA_BUSY) {
1940 printk(KERN_ERR "ata%u failed to respond (%lu secs)\n",
1941 ap->id, tmout / HZ);
1948 static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
1950 struct ata_ioports *ioaddr = &ap->ioaddr;
1951 unsigned int dev0 = devmask & (1 << 0);
1952 unsigned int dev1 = devmask & (1 << 1);
1953 unsigned long timeout;
1955 /* if device 0 was found in ata_devchk, wait for its
1959 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1961 /* if device 1 was found in ata_devchk, wait for
1962 * register access, then wait for BSY to clear
1964 timeout = jiffies + ATA_TMOUT_BOOT;
1968 ap->ops->dev_select(ap, 1);
1969 if (ap->flags & ATA_FLAG_MMIO) {
1970 nsect = readb((void __iomem *) ioaddr->nsect_addr);
1971 lbal = readb((void __iomem *) ioaddr->lbal_addr);
1973 nsect = inb(ioaddr->nsect_addr);
1974 lbal = inb(ioaddr->lbal_addr);
1976 if ((nsect == 1) && (lbal == 1))
1978 if (time_after(jiffies, timeout)) {
1982 msleep(50); /* give drive a breather */
1985 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1987 /* is all this really necessary? */
1988 ap->ops->dev_select(ap, 0);
1990 ap->ops->dev_select(ap, 1);
1992 ap->ops->dev_select(ap, 0);
1995 static unsigned int ata_bus_softreset(struct ata_port *ap,
1996 unsigned int devmask)
1998 struct ata_ioports *ioaddr = &ap->ioaddr;
2000 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
2002 /* software reset. causes dev0 to be selected */
2003 if (ap->flags & ATA_FLAG_MMIO) {
2004 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2005 udelay(20); /* FIXME: flush */
2006 writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
2007 udelay(20); /* FIXME: flush */
2008 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2010 outb(ap->ctl, ioaddr->ctl_addr);
2012 outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2014 outb(ap->ctl, ioaddr->ctl_addr);
2017 /* spec mandates ">= 2ms" before checking status.
2018 * We wait 150ms, because that was the magic delay used for
2019 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
2020 * between when the ATA command register is written, and then
2021 * status is checked. Because waiting for "a while" before
2022 * checking status is fine, post SRST, we perform this magic
2023 * delay here as well.
2025 * Old drivers/ide uses the 2mS rule and then waits for ready
2029 /* Before we perform post reset processing we want to see if
2030 * the bus shows 0xFF because the odd clown forgets the D7
2031 * pulldown resistor.
2033 if (ata_check_status(ap) == 0xFF)
2034 return AC_ERR_OTHER;
2036 ata_bus_post_reset(ap, devmask);
2042 * ata_bus_reset - reset host port and associated ATA channel
2043 * @ap: port to reset
2045 * This is typically the first time we actually start issuing
2046 * commands to the ATA channel. We wait for BSY to clear, then
2047 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2048 * result. Determine what devices, if any, are on the channel
2049 * by looking at the device 0/1 error register. Look at the signature
2050 * stored in each device's taskfile registers, to determine if
2051 * the device is ATA or ATAPI.
2054 * PCI/etc. bus probe sem.
2055 * Obtains host_set lock.
2058 * Sets ATA_FLAG_PORT_DISABLED if bus reset fails.
2061 void ata_bus_reset(struct ata_port *ap)
2063 struct ata_ioports *ioaddr = &ap->ioaddr;
2064 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2066 unsigned int dev0, dev1 = 0, devmask = 0;
2068 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
2070 /* determine if device 0/1 are present */
2071 if (ap->flags & ATA_FLAG_SATA_RESET)
2074 dev0 = ata_devchk(ap, 0);
2076 dev1 = ata_devchk(ap, 1);
2080 devmask |= (1 << 0);
2082 devmask |= (1 << 1);
2084 /* select device 0 again */
2085 ap->ops->dev_select(ap, 0);
2087 /* issue bus reset */
2088 if (ap->flags & ATA_FLAG_SRST)
2089 if (ata_bus_softreset(ap, devmask))
2093 * determine by signature whether we have ATA or ATAPI devices
2095 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
2096 if ((slave_possible) && (err != 0x81))
2097 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
2099 /* re-enable interrupts */
2100 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2103 /* is double-select really necessary? */
2104 if (ap->device[1].class != ATA_DEV_NONE)
2105 ap->ops->dev_select(ap, 1);
2106 if (ap->device[0].class != ATA_DEV_NONE)
2107 ap->ops->dev_select(ap, 0);
2109 /* if no devices were detected, disable this port */
2110 if ((ap->device[0].class == ATA_DEV_NONE) &&
2111 (ap->device[1].class == ATA_DEV_NONE))
2114 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2115 /* set up device control for ATA_FLAG_SATA_RESET */
2116 if (ap->flags & ATA_FLAG_MMIO)
2117 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2119 outb(ap->ctl, ioaddr->ctl_addr);
2126 printk(KERN_ERR "ata%u: disabling port\n", ap->id);
2127 ap->ops->port_disable(ap);
2132 static int sata_phy_resume(struct ata_port *ap)
2134 unsigned long timeout = jiffies + (HZ * 5);
2137 scr_write_flush(ap, SCR_CONTROL, 0x300);
2139 /* Wait for phy to become ready, if necessary. */
2142 sstatus = scr_read(ap, SCR_STATUS);
2143 if ((sstatus & 0xf) != 1)
2145 } while (time_before(jiffies, timeout));
2151 * ata_std_probeinit - initialize probing
2152 * @ap: port to be probed
2154 * @ap is about to be probed. Initialize it. This function is
2155 * to be used as standard callback for ata_drive_probe_reset().
2157 * NOTE!!! Do not use this function as probeinit if a low level
2158 * driver implements only hardreset. Just pass NULL as probeinit
2159 * in that case. Using this function is probably okay but doing
2160 * so makes reset sequence different from the original
2161 * ->phy_reset implementation and Jeff nervous. :-P
2163 void ata_std_probeinit(struct ata_port *ap)
2165 if ((ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read) {
2166 sata_phy_resume(ap);
2167 if (sata_dev_present(ap))
2168 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2173 * ata_std_softreset - reset host port via ATA SRST
2174 * @ap: port to reset
2175 * @verbose: fail verbosely
2176 * @classes: resulting classes of attached devices
2178 * Reset host port using ATA SRST. This function is to be used
2179 * as standard callback for ata_drive_*_reset() functions.
2182 * Kernel thread context (may sleep)
2185 * 0 on success, -errno otherwise.
2187 int ata_std_softreset(struct ata_port *ap, int verbose, unsigned int *classes)
2189 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2190 unsigned int devmask = 0, err_mask;
2195 if (ap->ops->scr_read && !sata_dev_present(ap)) {
2196 classes[0] = ATA_DEV_NONE;
2200 /* determine if device 0/1 are present */
2201 if (ata_devchk(ap, 0))
2202 devmask |= (1 << 0);
2203 if (slave_possible && ata_devchk(ap, 1))
2204 devmask |= (1 << 1);
2206 /* select device 0 again */
2207 ap->ops->dev_select(ap, 0);
2209 /* issue bus reset */
2210 DPRINTK("about to softreset, devmask=%x\n", devmask);
2211 err_mask = ata_bus_softreset(ap, devmask);
2214 printk(KERN_ERR "ata%u: SRST failed (err_mask=0x%x)\n",
2217 DPRINTK("EXIT, softreset failed (err_mask=0x%x)\n",
2222 /* determine by signature whether we have ATA or ATAPI devices */
2223 classes[0] = ata_dev_try_classify(ap, 0, &err);
2224 if (slave_possible && err != 0x81)
2225 classes[1] = ata_dev_try_classify(ap, 1, &err);
2228 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2233 * sata_std_hardreset - reset host port via SATA phy reset
2234 * @ap: port to reset
2235 * @verbose: fail verbosely
2236 * @class: resulting class of attached device
2238 * SATA phy-reset host port using DET bits of SControl register.
2239 * This function is to be used as standard callback for
2240 * ata_drive_*_reset().
2243 * Kernel thread context (may sleep)
2246 * 0 on success, -errno otherwise.
2248 int sata_std_hardreset(struct ata_port *ap, int verbose, unsigned int *class)
2252 /* Issue phy wake/reset */
2253 scr_write_flush(ap, SCR_CONTROL, 0x301);
2256 * Couldn't find anything in SATA I/II specs, but AHCI-1.1
2257 * 10.4.2 says at least 1 ms.
2261 /* Bring phy back */
2262 sata_phy_resume(ap);
2264 /* TODO: phy layer with polling, timeouts, etc. */
2265 if (!sata_dev_present(ap)) {
2266 *class = ATA_DEV_NONE;
2267 DPRINTK("EXIT, link offline\n");
2271 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2273 printk(KERN_ERR "ata%u: COMRESET failed "
2274 "(device not ready)\n", ap->id);
2276 DPRINTK("EXIT, device not ready\n");
2280 ap->ops->dev_select(ap, 0); /* probably unnecessary */
2282 *class = ata_dev_try_classify(ap, 0, NULL);
2284 DPRINTK("EXIT, class=%u\n", *class);
2289 * ata_std_postreset - standard postreset callback
2290 * @ap: the target ata_port
2291 * @classes: classes of attached devices
2293 * This function is invoked after a successful reset. Note that
2294 * the device might have been reset more than once using
2295 * different reset methods before postreset is invoked.
2297 * This function is to be used as standard callback for
2298 * ata_drive_*_reset().
2301 * Kernel thread context (may sleep)
2303 void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
2307 /* set cable type if it isn't already set */
2308 if (ap->cbl == ATA_CBL_NONE && ap->flags & ATA_FLAG_SATA)
2309 ap->cbl = ATA_CBL_SATA;
2311 /* print link status */
2312 if (ap->cbl == ATA_CBL_SATA)
2313 sata_print_link_status(ap);
2315 /* re-enable interrupts */
2316 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2319 /* is double-select really necessary? */
2320 if (classes[0] != ATA_DEV_NONE)
2321 ap->ops->dev_select(ap, 1);
2322 if (classes[1] != ATA_DEV_NONE)
2323 ap->ops->dev_select(ap, 0);
2325 /* bail out if no device is present */
2326 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2327 DPRINTK("EXIT, no device\n");
2331 /* set up device control */
2332 if (ap->ioaddr.ctl_addr) {
2333 if (ap->flags & ATA_FLAG_MMIO)
2334 writeb(ap->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
2336 outb(ap->ctl, ap->ioaddr.ctl_addr);
2343 * ata_std_probe_reset - standard probe reset method
2344 * @ap: prot to perform probe-reset
2345 * @classes: resulting classes of attached devices
2347 * The stock off-the-shelf ->probe_reset method.
2350 * Kernel thread context (may sleep)
2353 * 0 on success, -errno otherwise.
2355 int ata_std_probe_reset(struct ata_port *ap, unsigned int *classes)
2357 ata_reset_fn_t hardreset;
2360 if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read)
2361 hardreset = sata_std_hardreset;
2363 return ata_drive_probe_reset(ap, ata_std_probeinit,
2364 ata_std_softreset, hardreset,
2365 ata_std_postreset, classes);
2368 static int do_probe_reset(struct ata_port *ap, ata_reset_fn_t reset,
2369 ata_postreset_fn_t postreset,
2370 unsigned int *classes)
2374 for (i = 0; i < ATA_MAX_DEVICES; i++)
2375 classes[i] = ATA_DEV_UNKNOWN;
2377 rc = reset(ap, 0, classes);
2381 /* If any class isn't ATA_DEV_UNKNOWN, consider classification
2382 * is complete and convert all ATA_DEV_UNKNOWN to
2385 for (i = 0; i < ATA_MAX_DEVICES; i++)
2386 if (classes[i] != ATA_DEV_UNKNOWN)
2389 if (i < ATA_MAX_DEVICES)
2390 for (i = 0; i < ATA_MAX_DEVICES; i++)
2391 if (classes[i] == ATA_DEV_UNKNOWN)
2392 classes[i] = ATA_DEV_NONE;
2395 postreset(ap, classes);
2397 return classes[0] != ATA_DEV_UNKNOWN ? 0 : -ENODEV;
2401 * ata_drive_probe_reset - Perform probe reset with given methods
2402 * @ap: port to reset
2403 * @probeinit: probeinit method (can be NULL)
2404 * @softreset: softreset method (can be NULL)
2405 * @hardreset: hardreset method (can be NULL)
2406 * @postreset: postreset method (can be NULL)
2407 * @classes: resulting classes of attached devices
2409 * Reset the specified port and classify attached devices using
2410 * given methods. This function prefers softreset but tries all
2411 * possible reset sequences to reset and classify devices. This
2412 * function is intended to be used for constructing ->probe_reset
2413 * callback by low level drivers.
2415 * Reset methods should follow the following rules.
2417 * - Return 0 on sucess, -errno on failure.
2418 * - If classification is supported, fill classes[] with
2419 * recognized class codes.
2420 * - If classification is not supported, leave classes[] alone.
2421 * - If verbose is non-zero, print error message on failure;
2422 * otherwise, shut up.
2425 * Kernel thread context (may sleep)
2428 * 0 on success, -EINVAL if no reset method is avaliable, -ENODEV
2429 * if classification fails, and any error code from reset
2432 int ata_drive_probe_reset(struct ata_port *ap, ata_probeinit_fn_t probeinit,
2433 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
2434 ata_postreset_fn_t postreset, unsigned int *classes)
2442 rc = do_probe_reset(ap, softreset, postreset, classes);
2450 rc = do_probe_reset(ap, hardreset, postreset, classes);
2451 if (rc == 0 || rc != -ENODEV)
2455 rc = do_probe_reset(ap, softreset, postreset, classes);
2461 * ata_dev_same_device - Determine whether new ID matches configured device
2462 * @ap: port on which the device to compare against resides
2463 * @dev: device to compare against
2464 * @new_class: class of the new device
2465 * @new_id: IDENTIFY page of the new device
2467 * Compare @new_class and @new_id against @dev and determine
2468 * whether @dev is the device indicated by @new_class and
2475 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
2477 static int ata_dev_same_device(struct ata_port *ap, struct ata_device *dev,
2478 unsigned int new_class, const u16 *new_id)
2480 const u16 *old_id = dev->id;
2481 unsigned char model[2][41], serial[2][21];
2484 if (dev->class != new_class) {
2486 "ata%u: dev %u class mismatch %d != %d\n",
2487 ap->id, dev->devno, dev->class, new_class);
2491 ata_id_c_string(old_id, model[0], ATA_ID_PROD_OFS, sizeof(model[0]));
2492 ata_id_c_string(new_id, model[1], ATA_ID_PROD_OFS, sizeof(model[1]));
2493 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO_OFS, sizeof(serial[0]));
2494 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO_OFS, sizeof(serial[1]));
2495 new_n_sectors = ata_id_n_sectors(new_id);
2497 if (strcmp(model[0], model[1])) {
2499 "ata%u: dev %u model number mismatch '%s' != '%s'\n",
2500 ap->id, dev->devno, model[0], model[1]);
2504 if (strcmp(serial[0], serial[1])) {
2506 "ata%u: dev %u serial number mismatch '%s' != '%s'\n",
2507 ap->id, dev->devno, serial[0], serial[1]);
2511 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
2513 "ata%u: dev %u n_sectors mismatch %llu != %llu\n",
2514 ap->id, dev->devno, (unsigned long long)dev->n_sectors,
2515 (unsigned long long)new_n_sectors);
2523 * ata_dev_revalidate - Revalidate ATA device
2524 * @ap: port on which the device to revalidate resides
2525 * @dev: device to revalidate
2526 * @post_reset: is this revalidation after reset?
2528 * Re-read IDENTIFY page and make sure @dev is still attached to
2532 * Kernel thread context (may sleep)
2535 * 0 on success, negative errno otherwise
2537 int ata_dev_revalidate(struct ata_port *ap, struct ata_device *dev,
2544 if (!ata_dev_present(dev))
2550 /* allocate & read ID data */
2551 rc = ata_dev_read_id(ap, dev, &class, post_reset, &id);
2555 /* is the device still there? */
2556 if (!ata_dev_same_device(ap, dev, class, id)) {
2564 /* configure device according to the new ID */
2565 return ata_dev_configure(ap, dev, 0);
2568 printk(KERN_ERR "ata%u: dev %u revalidation failed (errno=%d)\n",
2569 ap->id, dev->devno, rc);
2574 static const char * const ata_dma_blacklist [] = {
2575 "WDC AC11000H", NULL,
2576 "WDC AC22100H", NULL,
2577 "WDC AC32500H", NULL,
2578 "WDC AC33100H", NULL,
2579 "WDC AC31600H", NULL,
2580 "WDC AC32100H", "24.09P07",
2581 "WDC AC23200L", "21.10N21",
2582 "Compaq CRD-8241B", NULL,
2587 "SanDisk SDP3B", NULL,
2588 "SanDisk SDP3B-64", NULL,
2589 "SANYO CD-ROM CRD", NULL,
2590 "HITACHI CDR-8", NULL,
2591 "HITACHI CDR-8335", NULL,
2592 "HITACHI CDR-8435", NULL,
2593 "Toshiba CD-ROM XM-6202B", NULL,
2594 "TOSHIBA CD-ROM XM-1702BC", NULL,
2596 "E-IDE CD-ROM CR-840", NULL,
2597 "CD-ROM Drive/F5A", NULL,
2598 "WPI CDD-820", NULL,
2599 "SAMSUNG CD-ROM SC-148C", NULL,
2600 "SAMSUNG CD-ROM SC", NULL,
2601 "SanDisk SDP3B-64", NULL,
2602 "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,
2603 "_NEC DV5800A", NULL,
2604 "SAMSUNG CD-ROM SN-124", "N001"
2607 static int ata_strim(char *s, size_t len)
2609 len = strnlen(s, len);
2611 /* ATAPI specifies that empty space is blank-filled; remove blanks */
2612 while ((len > 0) && (s[len - 1] == ' ')) {
2619 static int ata_dma_blacklisted(const struct ata_device *dev)
2621 unsigned char model_num[40];
2622 unsigned char model_rev[16];
2623 unsigned int nlen, rlen;
2626 ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
2628 ata_id_string(dev->id, model_rev, ATA_ID_FW_REV_OFS,
2630 nlen = ata_strim(model_num, sizeof(model_num));
2631 rlen = ata_strim(model_rev, sizeof(model_rev));
2633 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i += 2) {
2634 if (!strncmp(ata_dma_blacklist[i], model_num, nlen)) {
2635 if (ata_dma_blacklist[i+1] == NULL)
2637 if (!strncmp(ata_dma_blacklist[i], model_rev, rlen))
2645 * ata_dev_xfermask - Compute supported xfermask of the given device
2646 * @ap: Port on which the device to compute xfermask for resides
2647 * @dev: Device to compute xfermask for
2649 * Compute supported xfermask of @dev and store it in
2650 * dev->*_mask. This function is responsible for applying all
2651 * known limits including host controller limits, device
2654 * FIXME: The current implementation limits all transfer modes to
2655 * the fastest of the lowested device on the port. This is not
2656 * required on most controllers.
2661 static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev)
2663 struct ata_host_set *hs = ap->host_set;
2664 unsigned long xfer_mask;
2667 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
2670 /* FIXME: Use port-wide xfermask for now */
2671 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2672 struct ata_device *d = &ap->device[i];
2673 if (!ata_dev_present(d))
2675 xfer_mask &= ata_pack_xfermask(d->pio_mask, d->mwdma_mask,
2677 xfer_mask &= ata_id_xfermask(d->id);
2678 if (ata_dma_blacklisted(d))
2679 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
2680 /* Apply cable rule here. Don't apply it early because when
2681 we handle hot plug the cable type can itself change */
2682 if (ap->cbl == ATA_CBL_PATA40)
2683 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
2686 if (ata_dma_blacklisted(dev))
2687 printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, "
2688 "disabling DMA\n", ap->id, dev->devno);
2690 if (hs->flags & ATA_HOST_SIMPLEX) {
2691 if (hs->simplex_claimed)
2692 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
2694 if (ap->ops->mode_filter)
2695 xfer_mask = ap->ops->mode_filter(ap, dev, xfer_mask);
2697 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2702 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
2703 * @ap: Port associated with device @dev
2704 * @dev: Device to which command will be sent
2706 * Issue SET FEATURES - XFER MODE command to device @dev
2710 * PCI/etc. bus probe sem.
2713 * 0 on success, AC_ERR_* mask otherwise.
2716 static unsigned int ata_dev_set_xfermode(struct ata_port *ap,
2717 struct ata_device *dev)
2719 struct ata_taskfile tf;
2720 unsigned int err_mask;
2722 /* set up set-features taskfile */
2723 DPRINTK("set features - xfer mode\n");
2725 ata_tf_init(ap, &tf, dev->devno);
2726 tf.command = ATA_CMD_SET_FEATURES;
2727 tf.feature = SETFEATURES_XFER;
2728 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2729 tf.protocol = ATA_PROT_NODATA;
2730 tf.nsect = dev->xfer_mode;
2732 err_mask = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
2734 DPRINTK("EXIT, err_mask=%x\n", err_mask);
2739 * ata_dev_init_params - Issue INIT DEV PARAMS command
2740 * @ap: Port associated with device @dev
2741 * @dev: Device to which command will be sent
2744 * Kernel thread context (may sleep)
2747 * 0 on success, AC_ERR_* mask otherwise.
2750 static unsigned int ata_dev_init_params(struct ata_port *ap,
2751 struct ata_device *dev,
2755 struct ata_taskfile tf;
2756 unsigned int err_mask;
2758 /* Number of sectors per track 1-255. Number of heads 1-16 */
2759 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
2760 return AC_ERR_INVALID;
2762 /* set up init dev params taskfile */
2763 DPRINTK("init dev params \n");
2765 ata_tf_init(ap, &tf, dev->devno);
2766 tf.command = ATA_CMD_INIT_DEV_PARAMS;
2767 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2768 tf.protocol = ATA_PROT_NODATA;
2770 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
2772 err_mask = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
2774 DPRINTK("EXIT, err_mask=%x\n", err_mask);
2779 * ata_sg_clean - Unmap DMA memory associated with command
2780 * @qc: Command containing DMA memory to be released
2782 * Unmap all mapped DMA memory associated with this command.
2785 * spin_lock_irqsave(host_set lock)
2788 static void ata_sg_clean(struct ata_queued_cmd *qc)
2790 struct ata_port *ap = qc->ap;
2791 struct scatterlist *sg = qc->__sg;
2792 int dir = qc->dma_dir;
2793 void *pad_buf = NULL;
2795 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
2796 WARN_ON(sg == NULL);
2798 if (qc->flags & ATA_QCFLAG_SINGLE)
2799 WARN_ON(qc->n_elem > 1);
2801 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
2803 /* if we padded the buffer out to 32-bit bound, and data
2804 * xfer direction is from-device, we must copy from the
2805 * pad buffer back into the supplied buffer
2807 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
2808 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
2810 if (qc->flags & ATA_QCFLAG_SG) {
2812 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
2813 /* restore last sg */
2814 sg[qc->orig_n_elem - 1].length += qc->pad_len;
2816 struct scatterlist *psg = &qc->pad_sgent;
2817 void *addr = kmap_atomic(psg->page, KM_IRQ0);
2818 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
2819 kunmap_atomic(addr, KM_IRQ0);
2823 dma_unmap_single(ap->dev,
2824 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
2827 sg->length += qc->pad_len;
2829 memcpy(qc->buf_virt + sg->length - qc->pad_len,
2830 pad_buf, qc->pad_len);
2833 qc->flags &= ~ATA_QCFLAG_DMAMAP;
2838 * ata_fill_sg - Fill PCI IDE PRD table
2839 * @qc: Metadata associated with taskfile to be transferred
2841 * Fill PCI IDE PRD (scatter-gather) table with segments
2842 * associated with the current disk command.
2845 * spin_lock_irqsave(host_set lock)
2848 static void ata_fill_sg(struct ata_queued_cmd *qc)
2850 struct ata_port *ap = qc->ap;
2851 struct scatterlist *sg;
2854 WARN_ON(qc->__sg == NULL);
2855 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
2858 ata_for_each_sg(sg, qc) {
2862 /* determine if physical DMA addr spans 64K boundary.
2863 * Note h/w doesn't support 64-bit, so we unconditionally
2864 * truncate dma_addr_t to u32.
2866 addr = (u32) sg_dma_address(sg);
2867 sg_len = sg_dma_len(sg);
2870 offset = addr & 0xffff;
2872 if ((offset + sg_len) > 0x10000)
2873 len = 0x10000 - offset;
2875 ap->prd[idx].addr = cpu_to_le32(addr);
2876 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2877 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
2886 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2889 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
2890 * @qc: Metadata associated with taskfile to check
2892 * Allow low-level driver to filter ATA PACKET commands, returning
2893 * a status indicating whether or not it is OK to use DMA for the
2894 * supplied PACKET command.
2897 * spin_lock_irqsave(host_set lock)
2899 * RETURNS: 0 when ATAPI DMA can be used
2902 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
2904 struct ata_port *ap = qc->ap;
2905 int rc = 0; /* Assume ATAPI DMA is OK by default */
2907 if (ap->ops->check_atapi_dma)
2908 rc = ap->ops->check_atapi_dma(qc);
2913 * ata_qc_prep - Prepare taskfile for submission
2914 * @qc: Metadata associated with taskfile to be prepared
2916 * Prepare ATA taskfile for submission.
2919 * spin_lock_irqsave(host_set lock)
2921 void ata_qc_prep(struct ata_queued_cmd *qc)
2923 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2929 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
2932 * ata_sg_init_one - Associate command with memory buffer
2933 * @qc: Command to be associated
2934 * @buf: Memory buffer
2935 * @buflen: Length of memory buffer, in bytes.
2937 * Initialize the data-related elements of queued_cmd @qc
2938 * to point to a single memory buffer, @buf of byte length @buflen.
2941 * spin_lock_irqsave(host_set lock)
2944 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
2946 struct scatterlist *sg;
2948 qc->flags |= ATA_QCFLAG_SINGLE;
2950 memset(&qc->sgent, 0, sizeof(qc->sgent));
2951 qc->__sg = &qc->sgent;
2953 qc->orig_n_elem = 1;
2957 sg_init_one(sg, buf, buflen);
2961 * ata_sg_init - Associate command with scatter-gather table.
2962 * @qc: Command to be associated
2963 * @sg: Scatter-gather table.
2964 * @n_elem: Number of elements in s/g table.
2966 * Initialize the data-related elements of queued_cmd @qc
2967 * to point to a scatter-gather table @sg, containing @n_elem
2971 * spin_lock_irqsave(host_set lock)
2974 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
2975 unsigned int n_elem)
2977 qc->flags |= ATA_QCFLAG_SG;
2979 qc->n_elem = n_elem;
2980 qc->orig_n_elem = n_elem;
2984 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
2985 * @qc: Command with memory buffer to be mapped.
2987 * DMA-map the memory buffer associated with queued_cmd @qc.
2990 * spin_lock_irqsave(host_set lock)
2993 * Zero on success, negative on error.
2996 static int ata_sg_setup_one(struct ata_queued_cmd *qc)
2998 struct ata_port *ap = qc->ap;
2999 int dir = qc->dma_dir;
3000 struct scatterlist *sg = qc->__sg;
3001 dma_addr_t dma_address;
3004 /* we must lengthen transfers to end on a 32-bit boundary */
3005 qc->pad_len = sg->length & 3;
3007 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3008 struct scatterlist *psg = &qc->pad_sgent;
3010 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3012 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3014 if (qc->tf.flags & ATA_TFLAG_WRITE)
3015 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
3018 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3019 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3021 sg->length -= qc->pad_len;
3022 if (sg->length == 0)
3025 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
3026 sg->length, qc->pad_len);
3034 dma_address = dma_map_single(ap->dev, qc->buf_virt,
3036 if (dma_mapping_error(dma_address)) {
3038 sg->length += qc->pad_len;
3042 sg_dma_address(sg) = dma_address;
3043 sg_dma_len(sg) = sg->length;
3046 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
3047 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3053 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
3054 * @qc: Command with scatter-gather table to be mapped.
3056 * DMA-map the scatter-gather table associated with queued_cmd @qc.
3059 * spin_lock_irqsave(host_set lock)
3062 * Zero on success, negative on error.
3066 static int ata_sg_setup(struct ata_queued_cmd *qc)
3068 struct ata_port *ap = qc->ap;
3069 struct scatterlist *sg = qc->__sg;
3070 struct scatterlist *lsg = &sg[qc->n_elem - 1];
3071 int n_elem, pre_n_elem, dir, trim_sg = 0;
3073 VPRINTK("ENTER, ata%u\n", ap->id);
3074 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
3076 /* we must lengthen transfers to end on a 32-bit boundary */
3077 qc->pad_len = lsg->length & 3;
3079 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3080 struct scatterlist *psg = &qc->pad_sgent;
3081 unsigned int offset;
3083 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3085 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3088 * psg->page/offset are used to copy to-be-written
3089 * data in this function or read data in ata_sg_clean.
3091 offset = lsg->offset + lsg->length - qc->pad_len;
3092 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
3093 psg->offset = offset_in_page(offset);
3095 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3096 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3097 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
3098 kunmap_atomic(addr, KM_IRQ0);
3101 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3102 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3104 lsg->length -= qc->pad_len;
3105 if (lsg->length == 0)
3108 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
3109 qc->n_elem - 1, lsg->length, qc->pad_len);
3112 pre_n_elem = qc->n_elem;
3113 if (trim_sg && pre_n_elem)
3122 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
3124 /* restore last sg */
3125 lsg->length += qc->pad_len;
3129 DPRINTK("%d sg elements mapped\n", n_elem);
3132 qc->n_elem = n_elem;
3138 * ata_poll_qc_complete - turn irq back on and finish qc
3139 * @qc: Command to complete
3140 * @err_mask: ATA status register content
3143 * None. (grabs host lock)
3146 void ata_poll_qc_complete(struct ata_queued_cmd *qc)
3148 struct ata_port *ap = qc->ap;
3149 unsigned long flags;
3151 spin_lock_irqsave(&ap->host_set->lock, flags);
3152 ap->flags &= ~ATA_FLAG_NOINTR;
3154 ata_qc_complete(qc);
3155 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3159 * ata_pio_poll - poll using PIO, depending on current state
3160 * @ap: the target ata_port
3163 * None. (executing in kernel thread context)
3166 * timeout value to use
3169 static unsigned long ata_pio_poll(struct ata_port *ap)
3171 struct ata_queued_cmd *qc;
3173 unsigned int poll_state = HSM_ST_UNKNOWN;
3174 unsigned int reg_state = HSM_ST_UNKNOWN;
3176 qc = ata_qc_from_tag(ap, ap->active_tag);
3177 WARN_ON(qc == NULL);
3179 switch (ap->hsm_task_state) {
3182 poll_state = HSM_ST_POLL;
3186 case HSM_ST_LAST_POLL:
3187 poll_state = HSM_ST_LAST_POLL;
3188 reg_state = HSM_ST_LAST;
3195 status = ata_chk_status(ap);
3196 if (status & ATA_BUSY) {
3197 if (time_after(jiffies, ap->pio_task_timeout)) {
3198 qc->err_mask |= AC_ERR_TIMEOUT;
3199 ap->hsm_task_state = HSM_ST_TMOUT;
3202 ap->hsm_task_state = poll_state;
3203 return ATA_SHORT_PAUSE;
3206 ap->hsm_task_state = reg_state;
3211 * ata_pio_complete - check if drive is busy or idle
3212 * @ap: the target ata_port
3215 * None. (executing in kernel thread context)
3218 * Non-zero if qc completed, zero otherwise.
3221 static int ata_pio_complete (struct ata_port *ap)
3223 struct ata_queued_cmd *qc;
3227 * This is purely heuristic. This is a fast path. Sometimes when
3228 * we enter, BSY will be cleared in a chk-status or two. If not,
3229 * the drive is probably seeking or something. Snooze for a couple
3230 * msecs, then chk-status again. If still busy, fall back to
3231 * HSM_ST_POLL state.
3233 drv_stat = ata_busy_wait(ap, ATA_BUSY, 10);
3234 if (drv_stat & ATA_BUSY) {
3236 drv_stat = ata_busy_wait(ap, ATA_BUSY, 10);
3237 if (drv_stat & ATA_BUSY) {
3238 ap->hsm_task_state = HSM_ST_LAST_POLL;
3239 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
3244 qc = ata_qc_from_tag(ap, ap->active_tag);
3245 WARN_ON(qc == NULL);
3247 drv_stat = ata_wait_idle(ap);
3248 if (!ata_ok(drv_stat)) {
3249 qc->err_mask |= __ac_err_mask(drv_stat);
3250 ap->hsm_task_state = HSM_ST_ERR;
3254 ap->hsm_task_state = HSM_ST_IDLE;
3256 WARN_ON(qc->err_mask);
3257 ata_poll_qc_complete(qc);
3259 /* another command may start at this point */
3266 * swap_buf_le16 - swap halves of 16-bit words in place
3267 * @buf: Buffer to swap
3268 * @buf_words: Number of 16-bit words in buffer.
3270 * Swap halves of 16-bit words if needed to convert from
3271 * little-endian byte order to native cpu byte order, or
3275 * Inherited from caller.
3277 void swap_buf_le16(u16 *buf, unsigned int buf_words)
3282 for (i = 0; i < buf_words; i++)
3283 buf[i] = le16_to_cpu(buf[i]);
3284 #endif /* __BIG_ENDIAN */
3288 * ata_mmio_data_xfer - Transfer data by MMIO
3289 * @ap: port to read/write
3291 * @buflen: buffer length
3292 * @write_data: read/write
3294 * Transfer data from/to the device data register by MMIO.
3297 * Inherited from caller.
3300 static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
3301 unsigned int buflen, int write_data)
3304 unsigned int words = buflen >> 1;
3305 u16 *buf16 = (u16 *) buf;
3306 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
3308 /* Transfer multiple of 2 bytes */
3310 for (i = 0; i < words; i++)
3311 writew(le16_to_cpu(buf16[i]), mmio);
3313 for (i = 0; i < words; i++)
3314 buf16[i] = cpu_to_le16(readw(mmio));
3317 /* Transfer trailing 1 byte, if any. */
3318 if (unlikely(buflen & 0x01)) {
3319 u16 align_buf[1] = { 0 };
3320 unsigned char *trailing_buf = buf + buflen - 1;
3323 memcpy(align_buf, trailing_buf, 1);
3324 writew(le16_to_cpu(align_buf[0]), mmio);
3326 align_buf[0] = cpu_to_le16(readw(mmio));
3327 memcpy(trailing_buf, align_buf, 1);
3333 * ata_pio_data_xfer - Transfer data by PIO
3334 * @ap: port to read/write
3336 * @buflen: buffer length
3337 * @write_data: read/write
3339 * Transfer data from/to the device data register by PIO.
3342 * Inherited from caller.
3345 static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
3346 unsigned int buflen, int write_data)
3348 unsigned int words = buflen >> 1;
3350 /* Transfer multiple of 2 bytes */
3352 outsw(ap->ioaddr.data_addr, buf, words);
3354 insw(ap->ioaddr.data_addr, buf, words);
3356 /* Transfer trailing 1 byte, if any. */
3357 if (unlikely(buflen & 0x01)) {
3358 u16 align_buf[1] = { 0 };
3359 unsigned char *trailing_buf = buf + buflen - 1;
3362 memcpy(align_buf, trailing_buf, 1);
3363 outw(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
3365 align_buf[0] = cpu_to_le16(inw(ap->ioaddr.data_addr));
3366 memcpy(trailing_buf, align_buf, 1);
3372 * ata_data_xfer - Transfer data from/to the data register.
3373 * @ap: port to read/write
3375 * @buflen: buffer length
3376 * @do_write: read/write
3378 * Transfer data from/to the device data register.
3381 * Inherited from caller.
3384 static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
3385 unsigned int buflen, int do_write)
3387 /* Make the crap hardware pay the costs not the good stuff */
3388 if (unlikely(ap->flags & ATA_FLAG_IRQ_MASK)) {
3389 unsigned long flags;
3390 local_irq_save(flags);
3391 if (ap->flags & ATA_FLAG_MMIO)
3392 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3394 ata_pio_data_xfer(ap, buf, buflen, do_write);
3395 local_irq_restore(flags);
3397 if (ap->flags & ATA_FLAG_MMIO)
3398 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3400 ata_pio_data_xfer(ap, buf, buflen, do_write);
3405 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
3406 * @qc: Command on going
3408 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
3411 * Inherited from caller.
3414 static void ata_pio_sector(struct ata_queued_cmd *qc)
3416 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3417 struct scatterlist *sg = qc->__sg;
3418 struct ata_port *ap = qc->ap;
3420 unsigned int offset;
3423 if (qc->cursect == (qc->nsect - 1))
3424 ap->hsm_task_state = HSM_ST_LAST;
3426 page = sg[qc->cursg].page;
3427 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
3429 /* get the current page and offset */
3430 page = nth_page(page, (offset >> PAGE_SHIFT));
3431 offset %= PAGE_SIZE;
3433 buf = kmap(page) + offset;
3438 if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) {
3443 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3445 /* do the actual data transfer */
3446 do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3447 ata_data_xfer(ap, buf, ATA_SECT_SIZE, do_write);
3453 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
3454 * @qc: Command on going
3455 * @bytes: number of bytes
3457 * Transfer Transfer data from/to the ATAPI device.
3460 * Inherited from caller.
3464 static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
3466 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3467 struct scatterlist *sg = qc->__sg;
3468 struct ata_port *ap = qc->ap;
3471 unsigned int offset, count;
3473 if (qc->curbytes + bytes >= qc->nbytes)
3474 ap->hsm_task_state = HSM_ST_LAST;
3477 if (unlikely(qc->cursg >= qc->n_elem)) {
3479 * The end of qc->sg is reached and the device expects
3480 * more data to transfer. In order not to overrun qc->sg
3481 * and fulfill length specified in the byte count register,
3482 * - for read case, discard trailing data from the device
3483 * - for write case, padding zero data to the device
3485 u16 pad_buf[1] = { 0 };
3486 unsigned int words = bytes >> 1;
3489 if (words) /* warning if bytes > 1 */
3490 printk(KERN_WARNING "ata%u: %u bytes trailing data\n",
3493 for (i = 0; i < words; i++)
3494 ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write);
3496 ap->hsm_task_state = HSM_ST_LAST;
3500 sg = &qc->__sg[qc->cursg];
3503 offset = sg->offset + qc->cursg_ofs;
3505 /* get the current page and offset */
3506 page = nth_page(page, (offset >> PAGE_SHIFT));
3507 offset %= PAGE_SIZE;
3509 /* don't overrun current sg */
3510 count = min(sg->length - qc->cursg_ofs, bytes);
3512 /* don't cross page boundaries */
3513 count = min(count, (unsigned int)PAGE_SIZE - offset);
3515 buf = kmap(page) + offset;
3518 qc->curbytes += count;
3519 qc->cursg_ofs += count;
3521 if (qc->cursg_ofs == sg->length) {
3526 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3528 /* do the actual data transfer */
3529 ata_data_xfer(ap, buf, count, do_write);
3538 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
3539 * @qc: Command on going
3541 * Transfer Transfer data from/to the ATAPI device.
3544 * Inherited from caller.
3547 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3549 struct ata_port *ap = qc->ap;
3550 struct ata_device *dev = qc->dev;
3551 unsigned int ireason, bc_lo, bc_hi, bytes;
3552 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
3554 ap->ops->tf_read(ap, &qc->tf);
3555 ireason = qc->tf.nsect;
3556 bc_lo = qc->tf.lbam;
3557 bc_hi = qc->tf.lbah;
3558 bytes = (bc_hi << 8) | bc_lo;
3560 /* shall be cleared to zero, indicating xfer of data */
3561 if (ireason & (1 << 0))
3564 /* make sure transfer direction matches expected */
3565 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
3566 if (do_write != i_write)
3569 __atapi_pio_bytes(qc, bytes);
3574 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n",
3575 ap->id, dev->devno);
3576 qc->err_mask |= AC_ERR_HSM;
3577 ap->hsm_task_state = HSM_ST_ERR;
3581 * ata_pio_block - start PIO on a block
3582 * @ap: the target ata_port
3585 * None. (executing in kernel thread context)
3588 static void ata_pio_block(struct ata_port *ap)
3590 struct ata_queued_cmd *qc;
3594 * This is purely heuristic. This is a fast path.
3595 * Sometimes when we enter, BSY will be cleared in
3596 * a chk-status or two. If not, the drive is probably seeking
3597 * or something. Snooze for a couple msecs, then
3598 * chk-status again. If still busy, fall back to
3599 * HSM_ST_POLL state.
3601 status = ata_busy_wait(ap, ATA_BUSY, 5);
3602 if (status & ATA_BUSY) {
3604 status = ata_busy_wait(ap, ATA_BUSY, 10);
3605 if (status & ATA_BUSY) {
3606 ap->hsm_task_state = HSM_ST_POLL;
3607 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
3612 qc = ata_qc_from_tag(ap, ap->active_tag);
3613 WARN_ON(qc == NULL);
3616 if (status & (ATA_ERR | ATA_DF)) {
3617 qc->err_mask |= AC_ERR_DEV;
3618 ap->hsm_task_state = HSM_ST_ERR;
3622 /* transfer data if any */
3623 if (is_atapi_taskfile(&qc->tf)) {
3624 /* DRQ=0 means no more data to transfer */
3625 if ((status & ATA_DRQ) == 0) {
3626 ap->hsm_task_state = HSM_ST_LAST;
3630 atapi_pio_bytes(qc);
3632 /* handle BSY=0, DRQ=0 as error */
3633 if ((status & ATA_DRQ) == 0) {
3634 qc->err_mask |= AC_ERR_HSM;
3635 ap->hsm_task_state = HSM_ST_ERR;
3643 static void ata_pio_error(struct ata_port *ap)
3645 struct ata_queued_cmd *qc;
3647 qc = ata_qc_from_tag(ap, ap->active_tag);
3648 WARN_ON(qc == NULL);
3650 if (qc->tf.command != ATA_CMD_PACKET)
3651 printk(KERN_WARNING "ata%u: PIO error\n", ap->id);
3653 /* make sure qc->err_mask is available to
3654 * know what's wrong and recover
3656 WARN_ON(qc->err_mask == 0);
3658 ap->hsm_task_state = HSM_ST_IDLE;
3660 ata_poll_qc_complete(qc);
3663 static void ata_pio_task(void *_data)
3665 struct ata_port *ap = _data;
3666 unsigned long timeout;
3673 switch (ap->hsm_task_state) {
3682 qc_completed = ata_pio_complete(ap);
3686 case HSM_ST_LAST_POLL:
3687 timeout = ata_pio_poll(ap);
3697 ata_port_queue_task(ap, ata_pio_task, ap, timeout);
3698 else if (!qc_completed)
3703 * atapi_packet_task - Write CDB bytes to hardware
3704 * @_data: Port to which ATAPI device is attached.
3706 * When device has indicated its readiness to accept
3707 * a CDB, this function is called. Send the CDB.
3708 * If DMA is to be performed, exit immediately.
3709 * Otherwise, we are in polling mode, so poll
3710 * status under operation succeeds or fails.
3713 * Kernel thread context (may sleep)
3716 static void atapi_packet_task(void *_data)
3718 struct ata_port *ap = _data;
3719 struct ata_queued_cmd *qc;
3722 qc = ata_qc_from_tag(ap, ap->active_tag);
3723 WARN_ON(qc == NULL);
3724 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
3726 /* sleep-wait for BSY to clear */
3727 DPRINTK("busy wait\n");
3728 if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB)) {
3729 qc->err_mask |= AC_ERR_TIMEOUT;
3733 /* make sure DRQ is set */
3734 status = ata_chk_status(ap);
3735 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) {
3736 qc->err_mask |= AC_ERR_HSM;
3741 DPRINTK("send cdb\n");
3742 WARN_ON(qc->dev->cdb_len < 12);
3744 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA ||
3745 qc->tf.protocol == ATA_PROT_ATAPI_NODATA) {
3746 unsigned long flags;
3748 /* Once we're done issuing command and kicking bmdma,
3749 * irq handler takes over. To not lose irq, we need
3750 * to clear NOINTR flag before sending cdb, but
3751 * interrupt handler shouldn't be invoked before we're
3752 * finished. Hence, the following locking.
3754 spin_lock_irqsave(&ap->host_set->lock, flags);
3755 ap->flags &= ~ATA_FLAG_NOINTR;
3756 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
3757 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA)
3758 ap->ops->bmdma_start(qc); /* initiate bmdma */
3759 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3761 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
3763 /* PIO commands are handled by polling */
3764 ap->hsm_task_state = HSM_ST;
3765 ata_port_queue_task(ap, ata_pio_task, ap, 0);
3771 ata_poll_qc_complete(qc);
3775 * ata_qc_timeout - Handle timeout of queued command
3776 * @qc: Command that timed out
3778 * Some part of the kernel (currently, only the SCSI layer)
3779 * has noticed that the active command on port @ap has not
3780 * completed after a specified length of time. Handle this
3781 * condition by disabling DMA (if necessary) and completing
3782 * transactions, with error if necessary.
3784 * This also handles the case of the "lost interrupt", where
3785 * for some reason (possibly hardware bug, possibly driver bug)
3786 * an interrupt was not delivered to the driver, even though the
3787 * transaction completed successfully.
3790 * Inherited from SCSI layer (none, can sleep)
3793 static void ata_qc_timeout(struct ata_queued_cmd *qc)
3795 struct ata_port *ap = qc->ap;
3796 struct ata_host_set *host_set = ap->host_set;
3797 u8 host_stat = 0, drv_stat;
3798 unsigned long flags;
3802 ap->hsm_task_state = HSM_ST_IDLE;
3804 spin_lock_irqsave(&host_set->lock, flags);
3806 switch (qc->tf.protocol) {
3809 case ATA_PROT_ATAPI_DMA:
3810 host_stat = ap->ops->bmdma_status(ap);
3812 /* before we do anything else, clear DMA-Start bit */
3813 ap->ops->bmdma_stop(qc);
3819 drv_stat = ata_chk_status(ap);
3821 /* ack bmdma irq events */
3822 ap->ops->irq_clear(ap);
3824 printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n",
3825 ap->id, qc->tf.command, drv_stat, host_stat);
3827 /* complete taskfile transaction */
3828 qc->err_mask |= ac_err_mask(drv_stat);
3832 spin_unlock_irqrestore(&host_set->lock, flags);
3834 ata_eh_qc_complete(qc);
3840 * ata_eng_timeout - Handle timeout of queued command
3841 * @ap: Port on which timed-out command is active
3843 * Some part of the kernel (currently, only the SCSI layer)
3844 * has noticed that the active command on port @ap has not
3845 * completed after a specified length of time. Handle this
3846 * condition by disabling DMA (if necessary) and completing
3847 * transactions, with error if necessary.
3849 * This also handles the case of the "lost interrupt", where
3850 * for some reason (possibly hardware bug, possibly driver bug)
3851 * an interrupt was not delivered to the driver, even though the
3852 * transaction completed successfully.
3855 * Inherited from SCSI layer (none, can sleep)
3858 void ata_eng_timeout(struct ata_port *ap)
3862 ata_qc_timeout(ata_qc_from_tag(ap, ap->active_tag));
3868 * ata_qc_new - Request an available ATA command, for queueing
3869 * @ap: Port associated with device @dev
3870 * @dev: Device from whom we request an available command structure
3876 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
3878 struct ata_queued_cmd *qc = NULL;
3881 for (i = 0; i < ATA_MAX_QUEUE; i++)
3882 if (!test_and_set_bit(i, &ap->qactive)) {
3883 qc = ata_qc_from_tag(ap, i);
3894 * ata_qc_new_init - Request an available ATA command, and initialize it
3895 * @ap: Port associated with device @dev
3896 * @dev: Device from whom we request an available command structure
3902 struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
3903 struct ata_device *dev)
3905 struct ata_queued_cmd *qc;
3907 qc = ata_qc_new(ap);
3920 * ata_qc_free - free unused ata_queued_cmd
3921 * @qc: Command to complete
3923 * Designed to free unused ata_queued_cmd object
3924 * in case something prevents using it.
3927 * spin_lock_irqsave(host_set lock)
3929 void ata_qc_free(struct ata_queued_cmd *qc)
3931 struct ata_port *ap = qc->ap;
3934 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
3938 if (likely(ata_tag_valid(tag))) {
3939 if (tag == ap->active_tag)
3940 ap->active_tag = ATA_TAG_POISON;
3941 qc->tag = ATA_TAG_POISON;
3942 clear_bit(tag, &ap->qactive);
3946 void __ata_qc_complete(struct ata_queued_cmd *qc)
3948 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
3949 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
3951 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
3954 /* atapi: mark qc as inactive to prevent the interrupt handler
3955 * from completing the command twice later, before the error handler
3956 * is called. (when rc != 0 and atapi request sense is needed)
3958 qc->flags &= ~ATA_QCFLAG_ACTIVE;
3960 /* call completion callback */
3961 qc->complete_fn(qc);
3964 static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
3966 struct ata_port *ap = qc->ap;
3968 switch (qc->tf.protocol) {
3970 case ATA_PROT_ATAPI_DMA:
3973 case ATA_PROT_ATAPI:
3975 if (ap->flags & ATA_FLAG_PIO_DMA)
3988 * ata_qc_issue - issue taskfile to device
3989 * @qc: command to issue to device
3991 * Prepare an ATA command to submission to device.
3992 * This includes mapping the data into a DMA-able
3993 * area, filling in the S/G table, and finally
3994 * writing the taskfile to hardware, starting the command.
3997 * spin_lock_irqsave(host_set lock)
3999 void ata_qc_issue(struct ata_queued_cmd *qc)
4001 struct ata_port *ap = qc->ap;
4003 qc->ap->active_tag = qc->tag;
4004 qc->flags |= ATA_QCFLAG_ACTIVE;
4006 if (ata_should_dma_map(qc)) {
4007 if (qc->flags & ATA_QCFLAG_SG) {
4008 if (ata_sg_setup(qc))
4010 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
4011 if (ata_sg_setup_one(qc))
4015 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4018 ap->ops->qc_prep(qc);
4020 qc->err_mask |= ap->ops->qc_issue(qc);
4021 if (unlikely(qc->err_mask))
4026 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4027 qc->err_mask |= AC_ERR_SYSTEM;
4029 ata_qc_complete(qc);
4033 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
4034 * @qc: command to issue to device
4036 * Using various libata functions and hooks, this function
4037 * starts an ATA command. ATA commands are grouped into
4038 * classes called "protocols", and issuing each type of protocol
4039 * is slightly different.
4041 * May be used as the qc_issue() entry in ata_port_operations.
4044 * spin_lock_irqsave(host_set lock)
4047 * Zero on success, AC_ERR_* mask on failure
4050 unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
4052 struct ata_port *ap = qc->ap;
4054 ata_dev_select(ap, qc->dev->devno, 1, 0);
4056 switch (qc->tf.protocol) {
4057 case ATA_PROT_NODATA:
4058 ata_tf_to_host(ap, &qc->tf);
4062 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4063 ap->ops->bmdma_setup(qc); /* set up bmdma */
4064 ap->ops->bmdma_start(qc); /* initiate bmdma */
4067 case ATA_PROT_PIO: /* load tf registers, initiate polling pio */
4068 ata_qc_set_polling(qc);
4069 ata_tf_to_host(ap, &qc->tf);
4070 ap->hsm_task_state = HSM_ST;
4071 ata_port_queue_task(ap, ata_pio_task, ap, 0);
4074 case ATA_PROT_ATAPI:
4075 ata_qc_set_polling(qc);
4076 ata_tf_to_host(ap, &qc->tf);
4077 ata_port_queue_task(ap, atapi_packet_task, ap, 0);
4080 case ATA_PROT_ATAPI_NODATA:
4081 ap->flags |= ATA_FLAG_NOINTR;
4082 ata_tf_to_host(ap, &qc->tf);
4083 ata_port_queue_task(ap, atapi_packet_task, ap, 0);
4086 case ATA_PROT_ATAPI_DMA:
4087 ap->flags |= ATA_FLAG_NOINTR;
4088 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4089 ap->ops->bmdma_setup(qc); /* set up bmdma */
4090 ata_port_queue_task(ap, atapi_packet_task, ap, 0);
4095 return AC_ERR_SYSTEM;
4102 * ata_host_intr - Handle host interrupt for given (port, task)
4103 * @ap: Port on which interrupt arrived (possibly...)
4104 * @qc: Taskfile currently active in engine
4106 * Handle host interrupt for given queued command. Currently,
4107 * only DMA interrupts are handled. All other commands are
4108 * handled via polling with interrupts disabled (nIEN bit).
4111 * spin_lock_irqsave(host_set lock)
4114 * One if interrupt was handled, zero if not (shared irq).
4117 inline unsigned int ata_host_intr (struct ata_port *ap,
4118 struct ata_queued_cmd *qc)
4120 u8 status, host_stat;
4122 switch (qc->tf.protocol) {
4125 case ATA_PROT_ATAPI_DMA:
4126 case ATA_PROT_ATAPI:
4127 /* check status of DMA engine */
4128 host_stat = ap->ops->bmdma_status(ap);
4129 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
4131 /* if it's not our irq... */
4132 if (!(host_stat & ATA_DMA_INTR))
4135 /* before we do anything else, clear DMA-Start bit */
4136 ap->ops->bmdma_stop(qc);
4140 case ATA_PROT_ATAPI_NODATA:
4141 case ATA_PROT_NODATA:
4142 /* check altstatus */
4143 status = ata_altstatus(ap);
4144 if (status & ATA_BUSY)
4147 /* check main status, clearing INTRQ */
4148 status = ata_chk_status(ap);
4149 if (unlikely(status & ATA_BUSY))
4151 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
4152 ap->id, qc->tf.protocol, status);
4154 /* ack bmdma irq events */
4155 ap->ops->irq_clear(ap);
4157 /* complete taskfile transaction */
4158 qc->err_mask |= ac_err_mask(status);
4159 ata_qc_complete(qc);
4166 return 1; /* irq handled */
4169 ap->stats.idle_irq++;
4172 if ((ap->stats.idle_irq % 1000) == 0) {
4173 ata_irq_ack(ap, 0); /* debug trap */
4174 printk(KERN_WARNING "ata%d: irq trap\n", ap->id);
4178 return 0; /* irq not handled */
4182 * ata_interrupt - Default ATA host interrupt handler
4183 * @irq: irq line (unused)
4184 * @dev_instance: pointer to our ata_host_set information structure
4187 * Default interrupt handler for PCI IDE devices. Calls
4188 * ata_host_intr() for each port that is not disabled.
4191 * Obtains host_set lock during operation.
4194 * IRQ_NONE or IRQ_HANDLED.
4197 irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
4199 struct ata_host_set *host_set = dev_instance;
4201 unsigned int handled = 0;
4202 unsigned long flags;
4204 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
4205 spin_lock_irqsave(&host_set->lock, flags);
4207 for (i = 0; i < host_set->n_ports; i++) {
4208 struct ata_port *ap;
4210 ap = host_set->ports[i];
4212 !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) {
4213 struct ata_queued_cmd *qc;
4215 qc = ata_qc_from_tag(ap, ap->active_tag);
4216 if (qc && (!(qc->tf.ctl & ATA_NIEN)) &&
4217 (qc->flags & ATA_QCFLAG_ACTIVE))
4218 handled |= ata_host_intr(ap, qc);
4222 spin_unlock_irqrestore(&host_set->lock, flags);
4224 return IRQ_RETVAL(handled);
4229 * Execute a 'simple' command, that only consists of the opcode 'cmd' itself,
4230 * without filling any other registers
4232 static int ata_do_simple_cmd(struct ata_port *ap, struct ata_device *dev,
4235 struct ata_taskfile tf;
4238 ata_tf_init(ap, &tf, dev->devno);
4241 tf.flags |= ATA_TFLAG_DEVICE;
4242 tf.protocol = ATA_PROT_NODATA;
4244 err = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
4246 printk(KERN_ERR "%s: ata command failed: %d\n",
4252 static int ata_flush_cache(struct ata_port *ap, struct ata_device *dev)
4256 if (!ata_try_flush_cache(dev))
4259 if (ata_id_has_flush_ext(dev->id))
4260 cmd = ATA_CMD_FLUSH_EXT;
4262 cmd = ATA_CMD_FLUSH;
4264 return ata_do_simple_cmd(ap, dev, cmd);
4267 static int ata_standby_drive(struct ata_port *ap, struct ata_device *dev)
4269 return ata_do_simple_cmd(ap, dev, ATA_CMD_STANDBYNOW1);
4272 static int ata_start_drive(struct ata_port *ap, struct ata_device *dev)
4274 return ata_do_simple_cmd(ap, dev, ATA_CMD_IDLEIMMEDIATE);
4278 * ata_device_resume - wakeup a previously suspended devices
4279 * @ap: port the device is connected to
4280 * @dev: the device to resume
4282 * Kick the drive back into action, by sending it an idle immediate
4283 * command and making sure its transfer mode matches between drive
4287 int ata_device_resume(struct ata_port *ap, struct ata_device *dev)
4289 if (ap->flags & ATA_FLAG_SUSPENDED) {
4290 ap->flags &= ~ATA_FLAG_SUSPENDED;
4293 if (!ata_dev_present(dev))
4295 if (dev->class == ATA_DEV_ATA)
4296 ata_start_drive(ap, dev);
4302 * ata_device_suspend - prepare a device for suspend
4303 * @ap: port the device is connected to
4304 * @dev: the device to suspend
4306 * Flush the cache on the drive, if appropriate, then issue a
4307 * standbynow command.
4309 int ata_device_suspend(struct ata_port *ap, struct ata_device *dev, pm_message_t state)
4311 if (!ata_dev_present(dev))
4313 if (dev->class == ATA_DEV_ATA)
4314 ata_flush_cache(ap, dev);
4316 if (state.event != PM_EVENT_FREEZE)
4317 ata_standby_drive(ap, dev);
4318 ap->flags |= ATA_FLAG_SUSPENDED;
4323 * ata_port_start - Set port up for dma.
4324 * @ap: Port to initialize
4326 * Called just after data structures for each port are
4327 * initialized. Allocates space for PRD table.
4329 * May be used as the port_start() entry in ata_port_operations.
4332 * Inherited from caller.
4335 int ata_port_start (struct ata_port *ap)
4337 struct device *dev = ap->dev;
4340 ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
4344 rc = ata_pad_alloc(ap, dev);
4346 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
4350 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
4357 * ata_port_stop - Undo ata_port_start()
4358 * @ap: Port to shut down
4360 * Frees the PRD table.
4362 * May be used as the port_stop() entry in ata_port_operations.
4365 * Inherited from caller.
4368 void ata_port_stop (struct ata_port *ap)
4370 struct device *dev = ap->dev;
4372 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
4373 ata_pad_free(ap, dev);
4376 void ata_host_stop (struct ata_host_set *host_set)
4378 if (host_set->mmio_base)
4379 iounmap(host_set->mmio_base);
4384 * ata_host_remove - Unregister SCSI host structure with upper layers
4385 * @ap: Port to unregister
4386 * @do_unregister: 1 if we fully unregister, 0 to just stop the port
4389 * Inherited from caller.
4392 static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
4394 struct Scsi_Host *sh = ap->host;
4399 scsi_remove_host(sh);
4401 ap->ops->port_stop(ap);
4405 * ata_host_init - Initialize an ata_port structure
4406 * @ap: Structure to initialize
4407 * @host: associated SCSI mid-layer structure
4408 * @host_set: Collection of hosts to which @ap belongs
4409 * @ent: Probe information provided by low-level driver
4410 * @port_no: Port number associated with this ata_port
4412 * Initialize a new ata_port structure, and its associated
4416 * Inherited from caller.
4419 static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
4420 struct ata_host_set *host_set,
4421 const struct ata_probe_ent *ent, unsigned int port_no)
4427 host->max_channel = 1;
4428 host->unique_id = ata_unique_id++;
4429 host->max_cmd_len = 12;
4431 ap->flags = ATA_FLAG_PORT_DISABLED;
4432 ap->id = host->unique_id;
4434 ap->ctl = ATA_DEVCTL_OBS;
4435 ap->host_set = host_set;
4437 ap->port_no = port_no;
4439 ent->legacy_mode ? ent->hard_port_no : port_no;
4440 ap->pio_mask = ent->pio_mask;
4441 ap->mwdma_mask = ent->mwdma_mask;
4442 ap->udma_mask = ent->udma_mask;
4443 ap->flags |= ent->host_flags;
4444 ap->ops = ent->port_ops;
4445 ap->cbl = ATA_CBL_NONE;
4446 ap->active_tag = ATA_TAG_POISON;
4447 ap->last_ctl = 0xFF;
4449 INIT_WORK(&ap->port_task, NULL, NULL);
4450 INIT_LIST_HEAD(&ap->eh_done_q);
4452 for (i = 0; i < ATA_MAX_DEVICES; i++) {
4453 struct ata_device *dev = &ap->device[i];
4455 dev->pio_mask = UINT_MAX;
4456 dev->mwdma_mask = UINT_MAX;
4457 dev->udma_mask = UINT_MAX;
4461 ap->stats.unhandled_irq = 1;
4462 ap->stats.idle_irq = 1;
4465 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
4469 * ata_host_add - Attach low-level ATA driver to system
4470 * @ent: Information provided by low-level driver
4471 * @host_set: Collections of ports to which we add
4472 * @port_no: Port number associated with this host
4474 * Attach low-level ATA driver to system.
4477 * PCI/etc. bus probe sem.
4480 * New ata_port on success, for NULL on error.
4483 static struct ata_port * ata_host_add(const struct ata_probe_ent *ent,
4484 struct ata_host_set *host_set,
4485 unsigned int port_no)
4487 struct Scsi_Host *host;
4488 struct ata_port *ap;
4493 if (!ent->port_ops->probe_reset &&
4494 !(ent->host_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) {
4495 printk(KERN_ERR "ata%u: no reset mechanism available\n",
4500 host = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
4504 host->transportt = &ata_scsi_transport_template;
4506 ap = (struct ata_port *) &host->hostdata[0];
4508 ata_host_init(ap, host, host_set, ent, port_no);
4510 rc = ap->ops->port_start(ap);
4517 scsi_host_put(host);
4522 * ata_device_add - Register hardware device with ATA and SCSI layers
4523 * @ent: Probe information describing hardware device to be registered
4525 * This function processes the information provided in the probe
4526 * information struct @ent, allocates the necessary ATA and SCSI
4527 * host information structures, initializes them, and registers
4528 * everything with requisite kernel subsystems.
4530 * This function requests irqs, probes the ATA bus, and probes
4534 * PCI/etc. bus probe sem.
4537 * Number of ports registered. Zero on error (no ports registered).
4540 int ata_device_add(const struct ata_probe_ent *ent)
4542 unsigned int count = 0, i;
4543 struct device *dev = ent->dev;
4544 struct ata_host_set *host_set;
4547 /* alloc a container for our list of ATA ports (buses) */
4548 host_set = kzalloc(sizeof(struct ata_host_set) +
4549 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
4552 spin_lock_init(&host_set->lock);
4554 host_set->dev = dev;
4555 host_set->n_ports = ent->n_ports;
4556 host_set->irq = ent->irq;
4557 host_set->mmio_base = ent->mmio_base;
4558 host_set->private_data = ent->private_data;
4559 host_set->ops = ent->port_ops;
4560 host_set->flags = ent->host_set_flags;
4562 /* register each port bound to this device */
4563 for (i = 0; i < ent->n_ports; i++) {
4564 struct ata_port *ap;
4565 unsigned long xfer_mode_mask;
4567 ap = ata_host_add(ent, host_set, i);
4571 host_set->ports[i] = ap;
4572 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
4573 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
4574 (ap->pio_mask << ATA_SHIFT_PIO);
4576 /* print per-port info to dmesg */
4577 printk(KERN_INFO "ata%u: %cATA max %s cmd 0x%lX ctl 0x%lX "
4578 "bmdma 0x%lX irq %lu\n",
4580 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
4581 ata_mode_string(xfer_mode_mask),
4582 ap->ioaddr.cmd_addr,
4583 ap->ioaddr.ctl_addr,
4584 ap->ioaddr.bmdma_addr,
4588 host_set->ops->irq_clear(ap);
4595 /* obtain irq, that is shared between channels */
4596 if (request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
4597 DRV_NAME, host_set))
4600 /* perform each probe synchronously */
4601 DPRINTK("probe begin\n");
4602 for (i = 0; i < count; i++) {
4603 struct ata_port *ap;
4606 ap = host_set->ports[i];
4608 DPRINTK("ata%u: bus probe begin\n", ap->id);
4609 rc = ata_bus_probe(ap);
4610 DPRINTK("ata%u: bus probe end\n", ap->id);
4613 /* FIXME: do something useful here?
4614 * Current libata behavior will
4615 * tear down everything when
4616 * the module is removed
4617 * or the h/w is unplugged.
4621 rc = scsi_add_host(ap->host, dev);
4623 printk(KERN_ERR "ata%u: scsi_add_host failed\n",
4625 /* FIXME: do something useful here */
4626 /* FIXME: handle unconditional calls to
4627 * scsi_scan_host and ata_host_remove, below,
4633 /* probes are done, now scan each port's disk(s) */
4634 DPRINTK("host probe begin\n");
4635 for (i = 0; i < count; i++) {
4636 struct ata_port *ap = host_set->ports[i];
4638 ata_scsi_scan_host(ap);
4641 dev_set_drvdata(dev, host_set);
4643 VPRINTK("EXIT, returning %u\n", ent->n_ports);
4644 return ent->n_ports; /* success */
4647 for (i = 0; i < count; i++) {
4648 ata_host_remove(host_set->ports[i], 1);
4649 scsi_host_put(host_set->ports[i]->host);
4653 VPRINTK("EXIT, returning 0\n");
4658 * ata_host_set_remove - PCI layer callback for device removal
4659 * @host_set: ATA host set that was removed
4661 * Unregister all objects associated with this host set. Free those
4665 * Inherited from calling layer (may sleep).
4668 void ata_host_set_remove(struct ata_host_set *host_set)
4670 struct ata_port *ap;
4673 for (i = 0; i < host_set->n_ports; i++) {
4674 ap = host_set->ports[i];
4675 scsi_remove_host(ap->host);
4678 free_irq(host_set->irq, host_set);
4680 for (i = 0; i < host_set->n_ports; i++) {
4681 ap = host_set->ports[i];
4683 ata_scsi_release(ap->host);
4685 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
4686 struct ata_ioports *ioaddr = &ap->ioaddr;
4688 if (ioaddr->cmd_addr == 0x1f0)
4689 release_region(0x1f0, 8);
4690 else if (ioaddr->cmd_addr == 0x170)
4691 release_region(0x170, 8);
4694 scsi_host_put(ap->host);
4697 if (host_set->ops->host_stop)
4698 host_set->ops->host_stop(host_set);
4704 * ata_scsi_release - SCSI layer callback hook for host unload
4705 * @host: libata host to be unloaded
4707 * Performs all duties necessary to shut down a libata port...
4708 * Kill port kthread, disable port, and release resources.
4711 * Inherited from SCSI layer.
4717 int ata_scsi_release(struct Scsi_Host *host)
4719 struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
4724 ap->ops->port_disable(ap);
4725 ata_host_remove(ap, 0);
4726 for (i = 0; i < ATA_MAX_DEVICES; i++)
4727 kfree(ap->device[i].id);
4734 * ata_std_ports - initialize ioaddr with standard port offsets.
4735 * @ioaddr: IO address structure to be initialized
4737 * Utility function which initializes data_addr, error_addr,
4738 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
4739 * device_addr, status_addr, and command_addr to standard offsets
4740 * relative to cmd_addr.
4742 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
4745 void ata_std_ports(struct ata_ioports *ioaddr)
4747 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
4748 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
4749 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
4750 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
4751 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
4752 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
4753 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
4754 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
4755 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
4756 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
4762 void ata_pci_host_stop (struct ata_host_set *host_set)
4764 struct pci_dev *pdev = to_pci_dev(host_set->dev);
4766 pci_iounmap(pdev, host_set->mmio_base);
4770 * ata_pci_remove_one - PCI layer callback for device removal
4771 * @pdev: PCI device that was removed
4773 * PCI layer indicates to libata via this hook that
4774 * hot-unplug or module unload event has occurred.
4775 * Handle this by unregistering all objects associated
4776 * with this PCI device. Free those objects. Then finally
4777 * release PCI resources and disable device.
4780 * Inherited from PCI layer (may sleep).
4783 void ata_pci_remove_one (struct pci_dev *pdev)
4785 struct device *dev = pci_dev_to_dev(pdev);
4786 struct ata_host_set *host_set = dev_get_drvdata(dev);
4788 ata_host_set_remove(host_set);
4789 pci_release_regions(pdev);
4790 pci_disable_device(pdev);
4791 dev_set_drvdata(dev, NULL);
4794 /* move to PCI subsystem */
4795 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
4797 unsigned long tmp = 0;
4799 switch (bits->width) {
4802 pci_read_config_byte(pdev, bits->reg, &tmp8);
4808 pci_read_config_word(pdev, bits->reg, &tmp16);
4814 pci_read_config_dword(pdev, bits->reg, &tmp32);
4825 return (tmp == bits->val) ? 1 : 0;
4828 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state)
4830 pci_save_state(pdev);
4831 pci_disable_device(pdev);
4832 pci_set_power_state(pdev, PCI_D3hot);
4836 int ata_pci_device_resume(struct pci_dev *pdev)
4838 pci_set_power_state(pdev, PCI_D0);
4839 pci_restore_state(pdev);
4840 pci_enable_device(pdev);
4841 pci_set_master(pdev);
4844 #endif /* CONFIG_PCI */
4847 static int __init ata_init(void)
4849 ata_wq = create_workqueue("ata");
4853 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
4857 static void __exit ata_exit(void)
4859 destroy_workqueue(ata_wq);
4862 module_init(ata_init);
4863 module_exit(ata_exit);
4865 static unsigned long ratelimit_time;
4866 static spinlock_t ata_ratelimit_lock = SPIN_LOCK_UNLOCKED;
4868 int ata_ratelimit(void)
4871 unsigned long flags;
4873 spin_lock_irqsave(&ata_ratelimit_lock, flags);
4875 if (time_after(jiffies, ratelimit_time)) {
4877 ratelimit_time = jiffies + (HZ/5);
4881 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
4887 * libata is essentially a library of internal helper functions for
4888 * low-level ATA host controller drivers. As such, the API/ABI is
4889 * likely to change as new drivers are added and updated.
4890 * Do not depend on ABI/API stability.
4893 EXPORT_SYMBOL_GPL(ata_std_bios_param);
4894 EXPORT_SYMBOL_GPL(ata_std_ports);
4895 EXPORT_SYMBOL_GPL(ata_device_add);
4896 EXPORT_SYMBOL_GPL(ata_host_set_remove);
4897 EXPORT_SYMBOL_GPL(ata_sg_init);
4898 EXPORT_SYMBOL_GPL(ata_sg_init_one);
4899 EXPORT_SYMBOL_GPL(__ata_qc_complete);
4900 EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
4901 EXPORT_SYMBOL_GPL(ata_eng_timeout);
4902 EXPORT_SYMBOL_GPL(ata_tf_load);
4903 EXPORT_SYMBOL_GPL(ata_tf_read);
4904 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
4905 EXPORT_SYMBOL_GPL(ata_std_dev_select);
4906 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
4907 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
4908 EXPORT_SYMBOL_GPL(ata_check_status);
4909 EXPORT_SYMBOL_GPL(ata_altstatus);
4910 EXPORT_SYMBOL_GPL(ata_exec_command);
4911 EXPORT_SYMBOL_GPL(ata_port_start);
4912 EXPORT_SYMBOL_GPL(ata_port_stop);
4913 EXPORT_SYMBOL_GPL(ata_host_stop);
4914 EXPORT_SYMBOL_GPL(ata_interrupt);
4915 EXPORT_SYMBOL_GPL(ata_qc_prep);
4916 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
4917 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
4918 EXPORT_SYMBOL_GPL(ata_bmdma_start);
4919 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
4920 EXPORT_SYMBOL_GPL(ata_bmdma_status);
4921 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
4922 EXPORT_SYMBOL_GPL(ata_port_probe);
4923 EXPORT_SYMBOL_GPL(sata_phy_reset);
4924 EXPORT_SYMBOL_GPL(__sata_phy_reset);
4925 EXPORT_SYMBOL_GPL(ata_bus_reset);
4926 EXPORT_SYMBOL_GPL(ata_std_probeinit);
4927 EXPORT_SYMBOL_GPL(ata_std_softreset);
4928 EXPORT_SYMBOL_GPL(sata_std_hardreset);
4929 EXPORT_SYMBOL_GPL(ata_std_postreset);
4930 EXPORT_SYMBOL_GPL(ata_std_probe_reset);
4931 EXPORT_SYMBOL_GPL(ata_drive_probe_reset);
4932 EXPORT_SYMBOL_GPL(ata_dev_revalidate);
4933 EXPORT_SYMBOL_GPL(ata_dev_classify);
4934 EXPORT_SYMBOL_GPL(ata_dev_pair);
4935 EXPORT_SYMBOL_GPL(ata_port_disable);
4936 EXPORT_SYMBOL_GPL(ata_ratelimit);
4937 EXPORT_SYMBOL_GPL(ata_busy_sleep);
4938 EXPORT_SYMBOL_GPL(ata_port_queue_task);
4939 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
4940 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
4941 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
4942 EXPORT_SYMBOL_GPL(ata_scsi_release);
4943 EXPORT_SYMBOL_GPL(ata_host_intr);
4944 EXPORT_SYMBOL_GPL(ata_id_string);
4945 EXPORT_SYMBOL_GPL(ata_id_c_string);
4946 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
4947 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
4948 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
4950 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
4951 EXPORT_SYMBOL_GPL(ata_timing_compute);
4952 EXPORT_SYMBOL_GPL(ata_timing_merge);
4955 EXPORT_SYMBOL_GPL(pci_test_config_bits);
4956 EXPORT_SYMBOL_GPL(ata_pci_host_stop);
4957 EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
4958 EXPORT_SYMBOL_GPL(ata_pci_init_one);
4959 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
4960 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
4961 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
4962 EXPORT_SYMBOL_GPL(ata_pci_default_filter);
4963 EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
4964 #endif /* CONFIG_PCI */
4966 EXPORT_SYMBOL_GPL(ata_device_suspend);
4967 EXPORT_SYMBOL_GPL(ata_device_resume);
4968 EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
4969 EXPORT_SYMBOL_GPL(ata_scsi_device_resume);