2 * libata-eh.c - libata error handling
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
8 * Copyright 2006 Tejun Heo <htejun@gmail.com>
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; either version 2, or
14 * (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
35 #include <linux/config.h>
36 #include <linux/kernel.h>
37 #include <scsi/scsi.h>
38 #include <scsi/scsi_host.h>
39 #include <scsi/scsi_eh.h>
40 #include <scsi/scsi_device.h>
41 #include <scsi/scsi_cmnd.h>
42 #include "scsi_transport_api.h"
44 #include <linux/libata.h>
48 static void __ata_port_freeze(struct ata_port *ap);
49 static void ata_eh_finish(struct ata_port *ap);
51 static void ata_ering_record(struct ata_ering *ering, int is_io,
52 unsigned int err_mask)
54 struct ata_ering_entry *ent;
59 ering->cursor %= ATA_ERING_SIZE;
61 ent = &ering->ring[ering->cursor];
63 ent->err_mask = err_mask;
64 ent->timestamp = get_jiffies_64();
67 static struct ata_ering_entry * ata_ering_top(struct ata_ering *ering)
69 struct ata_ering_entry *ent = &ering->ring[ering->cursor];
75 static int ata_ering_map(struct ata_ering *ering,
76 int (*map_fn)(struct ata_ering_entry *, void *),
80 struct ata_ering_entry *ent;
84 ent = &ering->ring[idx];
87 rc = map_fn(ent, arg);
90 idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE;
91 } while (idx != ering->cursor);
96 static unsigned int ata_eh_dev_action(struct ata_device *dev)
98 struct ata_eh_context *ehc = &dev->ap->eh_context;
100 return ehc->i.action | ehc->i.dev_action[dev->devno];
103 static void ata_eh_clear_action(struct ata_device *dev,
104 struct ata_eh_info *ehi, unsigned int action)
109 ehi->action &= ~action;
110 for (i = 0; i < ATA_MAX_DEVICES; i++)
111 ehi->dev_action[i] &= ~action;
113 /* doesn't make sense for port-wide EH actions */
114 WARN_ON(!(action & ATA_EH_PERDEV_MASK));
116 /* break ehi->action into ehi->dev_action */
117 if (ehi->action & action) {
118 for (i = 0; i < ATA_MAX_DEVICES; i++)
119 ehi->dev_action[i] |= ehi->action & action;
120 ehi->action &= ~action;
123 /* turn off the specified per-dev action */
124 ehi->dev_action[dev->devno] &= ~action;
129 * ata_scsi_timed_out - SCSI layer time out callback
130 * @cmd: timed out SCSI command
132 * Handles SCSI layer timeout. We race with normal completion of
133 * the qc for @cmd. If the qc is already gone, we lose and let
134 * the scsi command finish (EH_HANDLED). Otherwise, the qc has
135 * timed out and EH should be invoked. Prevent ata_qc_complete()
136 * from finishing it by setting EH_SCHEDULED and return
139 * TODO: kill this function once old EH is gone.
142 * Called from timer context
145 * EH_HANDLED or EH_NOT_HANDLED
147 enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
149 struct Scsi_Host *host = cmd->device->host;
150 struct ata_port *ap = ata_shost_to_port(host);
152 struct ata_queued_cmd *qc;
153 enum scsi_eh_timer_return ret;
157 if (ap->ops->error_handler) {
158 ret = EH_NOT_HANDLED;
163 spin_lock_irqsave(ap->lock, flags);
164 qc = ata_qc_from_tag(ap, ap->active_tag);
166 WARN_ON(qc->scsicmd != cmd);
167 qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
168 qc->err_mask |= AC_ERR_TIMEOUT;
169 ret = EH_NOT_HANDLED;
171 spin_unlock_irqrestore(ap->lock, flags);
174 DPRINTK("EXIT, ret=%d\n", ret);
179 * ata_scsi_error - SCSI layer error handler callback
180 * @host: SCSI host on which error occurred
182 * Handles SCSI-layer-thrown error events.
185 * Inherited from SCSI layer (none, can sleep)
190 void ata_scsi_error(struct Scsi_Host *host)
192 struct ata_port *ap = ata_shost_to_port(host);
193 spinlock_t *ap_lock = ap->lock;
194 int i, repeat_cnt = ATA_EH_MAX_REPEAT;
199 /* synchronize with port task */
200 ata_port_flush_task(ap);
202 /* synchronize with host_set lock and sort out timeouts */
204 /* For new EH, all qcs are finished in one of three ways -
205 * normal completion, error completion, and SCSI timeout.
206 * Both cmpletions can race against SCSI timeout. When normal
207 * completion wins, the qc never reaches EH. When error
208 * completion wins, the qc has ATA_QCFLAG_FAILED set.
210 * When SCSI timeout wins, things are a bit more complex.
211 * Normal or error completion can occur after the timeout but
212 * before this point. In such cases, both types of
213 * completions are honored. A scmd is determined to have
214 * timed out iff its associated qc is active and not failed.
216 if (ap->ops->error_handler) {
217 struct scsi_cmnd *scmd, *tmp;
220 spin_lock_irqsave(ap_lock, flags);
222 list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) {
223 struct ata_queued_cmd *qc;
225 for (i = 0; i < ATA_MAX_QUEUE; i++) {
226 qc = __ata_qc_from_tag(ap, i);
227 if (qc->flags & ATA_QCFLAG_ACTIVE &&
232 if (i < ATA_MAX_QUEUE) {
233 /* the scmd has an associated qc */
234 if (!(qc->flags & ATA_QCFLAG_FAILED)) {
235 /* which hasn't failed yet, timeout */
236 qc->err_mask |= AC_ERR_TIMEOUT;
237 qc->flags |= ATA_QCFLAG_FAILED;
241 /* Normal completion occurred after
242 * SCSI timeout but before this point.
243 * Successfully complete it.
245 scmd->retries = scmd->allowed;
246 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
250 /* If we have timed out qcs. They belong to EH from
251 * this point but the state of the controller is
252 * unknown. Freeze the port to make sure the IRQ
253 * handler doesn't diddle with those qcs. This must
254 * be done atomically w.r.t. setting QCFLAG_FAILED.
257 __ata_port_freeze(ap);
259 spin_unlock_irqrestore(ap_lock, flags);
261 spin_unlock_wait(ap_lock);
264 /* invoke error handler */
265 if (ap->ops->error_handler) {
266 /* fetch & clear EH info */
267 spin_lock_irqsave(ap_lock, flags);
269 memset(&ap->eh_context, 0, sizeof(ap->eh_context));
270 ap->eh_context.i = ap->eh_info;
271 memset(&ap->eh_info, 0, sizeof(ap->eh_info));
273 ap->flags |= ATA_FLAG_EH_IN_PROGRESS;
274 ap->flags &= ~ATA_FLAG_EH_PENDING;
276 spin_unlock_irqrestore(ap_lock, flags);
278 /* invoke EH. if unloading, just finish failed qcs */
279 if (!(ap->flags & ATA_FLAG_UNLOADING))
280 ap->ops->error_handler(ap);
284 /* Exception might have happend after ->error_handler
285 * recovered the port but before this point. Repeat
288 spin_lock_irqsave(ap_lock, flags);
290 if (ap->flags & ATA_FLAG_EH_PENDING) {
292 ata_port_printk(ap, KERN_INFO,
293 "EH pending after completion, "
294 "repeating EH (cnt=%d)\n", repeat_cnt);
295 spin_unlock_irqrestore(ap_lock, flags);
298 ata_port_printk(ap, KERN_ERR, "EH pending after %d "
299 "tries, giving up\n", ATA_EH_MAX_REPEAT);
302 /* this run is complete, make sure EH info is clear */
303 memset(&ap->eh_info, 0, sizeof(ap->eh_info));
305 /* Clear host_eh_scheduled while holding ap_lock such
306 * that if exception occurs after this point but
307 * before EH completion, SCSI midlayer will
310 host->host_eh_scheduled = 0;
312 spin_unlock_irqrestore(ap_lock, flags);
314 WARN_ON(ata_qc_from_tag(ap, ap->active_tag) == NULL);
315 ap->ops->eng_timeout(ap);
318 /* finish or retry handled scmd's and clean up */
319 WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q));
321 scsi_eh_flush_done_q(&ap->eh_done_q);
324 spin_lock_irqsave(ap_lock, flags);
326 if (ap->flags & ATA_FLAG_LOADING) {
327 ap->flags &= ~ATA_FLAG_LOADING;
329 if (ap->flags & ATA_FLAG_SCSI_HOTPLUG)
330 queue_work(ata_aux_wq, &ap->hotplug_task);
331 if (ap->flags & ATA_FLAG_RECOVERED)
332 ata_port_printk(ap, KERN_INFO, "EH complete\n");
335 ap->flags &= ~(ATA_FLAG_SCSI_HOTPLUG | ATA_FLAG_RECOVERED);
337 /* tell wait_eh that we're done */
338 ap->flags &= ~ATA_FLAG_EH_IN_PROGRESS;
339 wake_up_all(&ap->eh_wait_q);
341 spin_unlock_irqrestore(ap_lock, flags);
347 * ata_port_wait_eh - Wait for the currently pending EH to complete
348 * @ap: Port to wait EH for
350 * Wait until the currently pending EH is complete.
353 * Kernel thread context (may sleep).
355 void ata_port_wait_eh(struct ata_port *ap)
361 spin_lock_irqsave(ap->lock, flags);
363 while (ap->flags & (ATA_FLAG_EH_PENDING | ATA_FLAG_EH_IN_PROGRESS)) {
364 prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
365 spin_unlock_irqrestore(ap->lock, flags);
367 spin_lock_irqsave(ap->lock, flags);
369 finish_wait(&ap->eh_wait_q, &wait);
371 spin_unlock_irqrestore(ap->lock, flags);
373 /* make sure SCSI EH is complete */
374 if (scsi_host_in_recovery(ap->host)) {
381 * ata_qc_timeout - Handle timeout of queued command
382 * @qc: Command that timed out
384 * Some part of the kernel (currently, only the SCSI layer)
385 * has noticed that the active command on port @ap has not
386 * completed after a specified length of time. Handle this
387 * condition by disabling DMA (if necessary) and completing
388 * transactions, with error if necessary.
390 * This also handles the case of the "lost interrupt", where
391 * for some reason (possibly hardware bug, possibly driver bug)
392 * an interrupt was not delivered to the driver, even though the
393 * transaction completed successfully.
395 * TODO: kill this function once old EH is gone.
398 * Inherited from SCSI layer (none, can sleep)
400 static void ata_qc_timeout(struct ata_queued_cmd *qc)
402 struct ata_port *ap = qc->ap;
403 u8 host_stat = 0, drv_stat;
408 ap->hsm_task_state = HSM_ST_IDLE;
410 spin_lock_irqsave(ap->lock, flags);
412 switch (qc->tf.protocol) {
415 case ATA_PROT_ATAPI_DMA:
416 host_stat = ap->ops->bmdma_status(ap);
418 /* before we do anything else, clear DMA-Start bit */
419 ap->ops->bmdma_stop(qc);
425 drv_stat = ata_chk_status(ap);
427 /* ack bmdma irq events */
428 ap->ops->irq_clear(ap);
430 ata_dev_printk(qc->dev, KERN_ERR, "command 0x%x timeout, "
431 "stat 0x%x host_stat 0x%x\n",
432 qc->tf.command, drv_stat, host_stat);
434 /* complete taskfile transaction */
435 qc->err_mask |= AC_ERR_TIMEOUT;
439 spin_unlock_irqrestore(ap->lock, flags);
441 ata_eh_qc_complete(qc);
447 * ata_eng_timeout - Handle timeout of queued command
448 * @ap: Port on which timed-out command is active
450 * Some part of the kernel (currently, only the SCSI layer)
451 * has noticed that the active command on port @ap has not
452 * completed after a specified length of time. Handle this
453 * condition by disabling DMA (if necessary) and completing
454 * transactions, with error if necessary.
456 * This also handles the case of the "lost interrupt", where
457 * for some reason (possibly hardware bug, possibly driver bug)
458 * an interrupt was not delivered to the driver, even though the
459 * transaction completed successfully.
461 * TODO: kill this function once old EH is gone.
464 * Inherited from SCSI layer (none, can sleep)
466 void ata_eng_timeout(struct ata_port *ap)
470 ata_qc_timeout(ata_qc_from_tag(ap, ap->active_tag));
476 * ata_qc_schedule_eh - schedule qc for error handling
477 * @qc: command to schedule error handling for
479 * Schedule error handling for @qc. EH will kick in as soon as
480 * other commands are drained.
483 * spin_lock_irqsave(host_set lock)
485 void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
487 struct ata_port *ap = qc->ap;
489 WARN_ON(!ap->ops->error_handler);
491 qc->flags |= ATA_QCFLAG_FAILED;
492 qc->ap->flags |= ATA_FLAG_EH_PENDING;
494 /* The following will fail if timeout has already expired.
495 * ata_scsi_error() takes care of such scmds on EH entry.
496 * Note that ATA_QCFLAG_FAILED is unconditionally set after
497 * this function completes.
499 scsi_req_abort_cmd(qc->scsicmd);
503 * ata_port_schedule_eh - schedule error handling without a qc
504 * @ap: ATA port to schedule EH for
506 * Schedule error handling for @ap. EH will kick in as soon as
507 * all commands are drained.
510 * spin_lock_irqsave(host_set lock)
512 void ata_port_schedule_eh(struct ata_port *ap)
514 WARN_ON(!ap->ops->error_handler);
516 ap->flags |= ATA_FLAG_EH_PENDING;
517 scsi_schedule_eh(ap->host);
519 DPRINTK("port EH scheduled\n");
523 * ata_port_abort - abort all qc's on the port
524 * @ap: ATA port to abort qc's for
526 * Abort all active qc's of @ap and schedule EH.
529 * spin_lock_irqsave(host_set lock)
532 * Number of aborted qc's.
534 int ata_port_abort(struct ata_port *ap)
536 int tag, nr_aborted = 0;
538 WARN_ON(!ap->ops->error_handler);
540 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
541 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
544 qc->flags |= ATA_QCFLAG_FAILED;
551 ata_port_schedule_eh(ap);
557 * __ata_port_freeze - freeze port
558 * @ap: ATA port to freeze
560 * This function is called when HSM violation or some other
561 * condition disrupts normal operation of the port. Frozen port
562 * is not allowed to perform any operation until the port is
563 * thawed, which usually follows a successful reset.
565 * ap->ops->freeze() callback can be used for freezing the port
566 * hardware-wise (e.g. mask interrupt and stop DMA engine). If a
567 * port cannot be frozen hardware-wise, the interrupt handler
568 * must ack and clear interrupts unconditionally while the port
572 * spin_lock_irqsave(host_set lock)
574 static void __ata_port_freeze(struct ata_port *ap)
576 WARN_ON(!ap->ops->error_handler);
581 ap->flags |= ATA_FLAG_FROZEN;
583 DPRINTK("ata%u port frozen\n", ap->id);
587 * ata_port_freeze - abort & freeze port
588 * @ap: ATA port to freeze
590 * Abort and freeze @ap.
593 * spin_lock_irqsave(host_set lock)
596 * Number of aborted commands.
598 int ata_port_freeze(struct ata_port *ap)
602 WARN_ON(!ap->ops->error_handler);
604 nr_aborted = ata_port_abort(ap);
605 __ata_port_freeze(ap);
611 * ata_eh_freeze_port - EH helper to freeze port
612 * @ap: ATA port to freeze
619 void ata_eh_freeze_port(struct ata_port *ap)
623 if (!ap->ops->error_handler)
626 spin_lock_irqsave(ap->lock, flags);
627 __ata_port_freeze(ap);
628 spin_unlock_irqrestore(ap->lock, flags);
632 * ata_port_thaw_port - EH helper to thaw port
633 * @ap: ATA port to thaw
635 * Thaw frozen port @ap.
640 void ata_eh_thaw_port(struct ata_port *ap)
644 if (!ap->ops->error_handler)
647 spin_lock_irqsave(ap->lock, flags);
649 ap->flags &= ~ATA_FLAG_FROZEN;
654 spin_unlock_irqrestore(ap->lock, flags);
656 DPRINTK("ata%u port thawed\n", ap->id);
659 static void ata_eh_scsidone(struct scsi_cmnd *scmd)
664 static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
666 struct ata_port *ap = qc->ap;
667 struct scsi_cmnd *scmd = qc->scsicmd;
670 spin_lock_irqsave(ap->lock, flags);
671 qc->scsidone = ata_eh_scsidone;
672 __ata_qc_complete(qc);
673 WARN_ON(ata_tag_valid(qc->tag));
674 spin_unlock_irqrestore(ap->lock, flags);
676 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
680 * ata_eh_qc_complete - Complete an active ATA command from EH
681 * @qc: Command to complete
683 * Indicate to the mid and upper layers that an ATA command has
684 * completed. To be used from EH.
686 void ata_eh_qc_complete(struct ata_queued_cmd *qc)
688 struct scsi_cmnd *scmd = qc->scsicmd;
689 scmd->retries = scmd->allowed;
690 __ata_eh_qc_complete(qc);
694 * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
695 * @qc: Command to retry
697 * Indicate to the mid and upper layers that an ATA command
698 * should be retried. To be used from EH.
700 * SCSI midlayer limits the number of retries to scmd->allowed.
701 * scmd->retries is decremented for commands which get retried
702 * due to unrelated failures (qc->err_mask is zero).
704 void ata_eh_qc_retry(struct ata_queued_cmd *qc)
706 struct scsi_cmnd *scmd = qc->scsicmd;
707 if (!qc->err_mask && scmd->retries)
709 __ata_eh_qc_complete(qc);
713 * ata_eh_detach_dev - detach ATA device
714 * @dev: ATA device to detach
721 static void ata_eh_detach_dev(struct ata_device *dev)
723 struct ata_port *ap = dev->ap;
726 ata_dev_disable(dev);
728 spin_lock_irqsave(ap->lock, flags);
730 dev->flags &= ~ATA_DFLAG_DETACH;
732 if (ata_scsi_offline_dev(dev)) {
733 dev->flags |= ATA_DFLAG_DETACHED;
734 ap->flags |= ATA_FLAG_SCSI_HOTPLUG;
737 /* clear per-dev EH actions */
738 ata_eh_clear_action(dev, &ap->eh_info, ATA_EH_PERDEV_MASK);
739 ata_eh_clear_action(dev, &ap->eh_context.i, ATA_EH_PERDEV_MASK);
741 spin_unlock_irqrestore(ap->lock, flags);
745 * ata_eh_about_to_do - about to perform eh_action
746 * @ap: target ATA port
747 * @dev: target ATA dev for per-dev action (can be NULL)
748 * @action: action about to be performed
750 * Called just before performing EH actions to clear related bits
751 * in @ap->eh_info such that eh actions are not unnecessarily
757 static void ata_eh_about_to_do(struct ata_port *ap, struct ata_device *dev,
762 spin_lock_irqsave(ap->lock, flags);
763 ata_eh_clear_action(dev, &ap->eh_info, action);
764 ap->flags |= ATA_FLAG_RECOVERED;
765 spin_unlock_irqrestore(ap->lock, flags);
769 * ata_eh_done - EH action complete
770 * @ap: target ATA port
771 * @dev: target ATA dev for per-dev action (can be NULL)
772 * @action: action just completed
774 * Called right after performing EH actions to clear related bits
775 * in @ap->eh_context.
780 static void ata_eh_done(struct ata_port *ap, struct ata_device *dev,
783 ata_eh_clear_action(dev, &ap->eh_context.i, action);
787 * ata_err_string - convert err_mask to descriptive string
788 * @err_mask: error mask to convert to string
790 * Convert @err_mask to descriptive string. Errors are
791 * prioritized according to severity and only the most severe
798 * Descriptive string for @err_mask
800 static const char * ata_err_string(unsigned int err_mask)
802 if (err_mask & AC_ERR_HOST_BUS)
803 return "host bus error";
804 if (err_mask & AC_ERR_ATA_BUS)
805 return "ATA bus error";
806 if (err_mask & AC_ERR_TIMEOUT)
808 if (err_mask & AC_ERR_HSM)
809 return "HSM violation";
810 if (err_mask & AC_ERR_SYSTEM)
811 return "internal error";
812 if (err_mask & AC_ERR_MEDIA)
813 return "media error";
814 if (err_mask & AC_ERR_INVALID)
815 return "invalid argument";
816 if (err_mask & AC_ERR_DEV)
817 return "device error";
818 return "unknown error";
822 * ata_read_log_page - read a specific log page
823 * @dev: target device
824 * @page: page to read
825 * @buf: buffer to store read page
826 * @sectors: number of sectors to read
828 * Read log page using READ_LOG_EXT command.
831 * Kernel thread context (may sleep).
834 * 0 on success, AC_ERR_* mask otherwise.
836 static unsigned int ata_read_log_page(struct ata_device *dev,
837 u8 page, void *buf, unsigned int sectors)
839 struct ata_taskfile tf;
840 unsigned int err_mask;
842 DPRINTK("read log page - page %d\n", page);
844 ata_tf_init(dev, &tf);
845 tf.command = ATA_CMD_READ_LOG_EXT;
848 tf.hob_nsect = sectors >> 8;
849 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
850 tf.protocol = ATA_PROT_PIO;
852 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
853 buf, sectors * ATA_SECT_SIZE);
855 DPRINTK("EXIT, err_mask=%x\n", err_mask);
860 * ata_eh_read_log_10h - Read log page 10h for NCQ error details
861 * @dev: Device to read log page 10h from
862 * @tag: Resulting tag of the failed command
863 * @tf: Resulting taskfile registers of the failed command
865 * Read log page 10h to obtain NCQ error details and clear error
869 * Kernel thread context (may sleep).
872 * 0 on success, -errno otherwise.
874 static int ata_eh_read_log_10h(struct ata_device *dev,
875 int *tag, struct ata_taskfile *tf)
877 u8 *buf = dev->ap->sector_buf;
878 unsigned int err_mask;
882 err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, buf, 1);
887 for (i = 0; i < ATA_SECT_SIZE; i++)
890 ata_dev_printk(dev, KERN_WARNING,
891 "invalid checksum 0x%x on log page 10h\n", csum);
896 *tag = buf[0] & 0x1f;
898 tf->command = buf[2];
899 tf->feature = buf[3];
904 tf->hob_lbal = buf[8];
905 tf->hob_lbam = buf[9];
906 tf->hob_lbah = buf[10];
908 tf->hob_nsect = buf[13];
914 * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
915 * @dev: device to perform REQUEST_SENSE to
916 * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
918 * Perform ATAPI REQUEST_SENSE after the device reported CHECK
919 * SENSE. This function is EH helper.
922 * Kernel thread context (may sleep).
925 * 0 on success, AC_ERR_* mask on failure
927 static unsigned int atapi_eh_request_sense(struct ata_device *dev,
928 unsigned char *sense_buf)
930 struct ata_port *ap = dev->ap;
931 struct ata_taskfile tf;
932 u8 cdb[ATAPI_CDB_LEN];
934 DPRINTK("ATAPI request sense\n");
936 ata_tf_init(dev, &tf);
938 /* FIXME: is this needed? */
939 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
941 /* XXX: why tf_read here? */
942 ap->ops->tf_read(ap, &tf);
944 /* fill these in, for the case where they are -not- overwritten */
946 sense_buf[2] = tf.feature >> 4;
948 memset(cdb, 0, ATAPI_CDB_LEN);
949 cdb[0] = REQUEST_SENSE;
950 cdb[4] = SCSI_SENSE_BUFFERSIZE;
952 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
953 tf.command = ATA_CMD_PACKET;
955 /* is it pointless to prefer PIO for "safety reasons"? */
956 if (ap->flags & ATA_FLAG_PIO_DMA) {
957 tf.protocol = ATA_PROT_ATAPI_DMA;
958 tf.feature |= ATAPI_PKT_DMA;
960 tf.protocol = ATA_PROT_ATAPI;
961 tf.lbam = (8 * 1024) & 0xff;
962 tf.lbah = (8 * 1024) >> 8;
965 return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
966 sense_buf, SCSI_SENSE_BUFFERSIZE);
970 * ata_eh_analyze_serror - analyze SError for a failed port
971 * @ap: ATA port to analyze SError for
973 * Analyze SError if available and further determine cause of
979 static void ata_eh_analyze_serror(struct ata_port *ap)
981 struct ata_eh_context *ehc = &ap->eh_context;
982 u32 serror = ehc->i.serror;
983 unsigned int err_mask = 0, action = 0;
985 if (serror & SERR_PERSISTENT) {
986 err_mask |= AC_ERR_ATA_BUS;
987 action |= ATA_EH_HARDRESET;
990 (SERR_DATA_RECOVERED | SERR_COMM_RECOVERED | SERR_DATA)) {
991 err_mask |= AC_ERR_ATA_BUS;
992 action |= ATA_EH_SOFTRESET;
994 if (serror & SERR_PROTOCOL) {
995 err_mask |= AC_ERR_HSM;
996 action |= ATA_EH_SOFTRESET;
998 if (serror & SERR_INTERNAL) {
999 err_mask |= AC_ERR_SYSTEM;
1000 action |= ATA_EH_SOFTRESET;
1002 if (serror & (SERR_PHYRDY_CHG | SERR_DEV_XCHG))
1003 ata_ehi_hotplugged(&ehc->i);
1005 ehc->i.err_mask |= err_mask;
1006 ehc->i.action |= action;
1010 * ata_eh_analyze_ncq_error - analyze NCQ error
1011 * @ap: ATA port to analyze NCQ error for
1013 * Read log page 10h, determine the offending qc and acquire
1014 * error status TF. For NCQ device errors, all LLDDs have to do
1015 * is setting AC_ERR_DEV in ehi->err_mask. This function takes
1019 * Kernel thread context (may sleep).
1021 static void ata_eh_analyze_ncq_error(struct ata_port *ap)
1023 struct ata_eh_context *ehc = &ap->eh_context;
1024 struct ata_device *dev = ap->device;
1025 struct ata_queued_cmd *qc;
1026 struct ata_taskfile tf;
1029 /* if frozen, we can't do much */
1030 if (ap->flags & ATA_FLAG_FROZEN)
1033 /* is it NCQ device error? */
1034 if (!ap->sactive || !(ehc->i.err_mask & AC_ERR_DEV))
1037 /* has LLDD analyzed already? */
1038 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1039 qc = __ata_qc_from_tag(ap, tag);
1041 if (!(qc->flags & ATA_QCFLAG_FAILED))
1048 /* okay, this error is ours */
1049 rc = ata_eh_read_log_10h(dev, &tag, &tf);
1051 ata_port_printk(ap, KERN_ERR, "failed to read log page 10h "
1052 "(errno=%d)\n", rc);
1056 if (!(ap->sactive & (1 << tag))) {
1057 ata_port_printk(ap, KERN_ERR, "log page 10h reported "
1058 "inactive tag %d\n", tag);
1062 /* we've got the perpetrator, condemn it */
1063 qc = __ata_qc_from_tag(ap, tag);
1064 memcpy(&qc->result_tf, &tf, sizeof(tf));
1065 qc->err_mask |= AC_ERR_DEV;
1066 ehc->i.err_mask &= ~AC_ERR_DEV;
1070 * ata_eh_analyze_tf - analyze taskfile of a failed qc
1071 * @qc: qc to analyze
1072 * @tf: Taskfile registers to analyze
1074 * Analyze taskfile of @qc and further determine cause of
1075 * failure. This function also requests ATAPI sense data if
1079 * Kernel thread context (may sleep).
1082 * Determined recovery action
1084 static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
1085 const struct ata_taskfile *tf)
1087 unsigned int tmp, action = 0;
1088 u8 stat = tf->command, err = tf->feature;
1090 if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) {
1091 qc->err_mask |= AC_ERR_HSM;
1092 return ATA_EH_SOFTRESET;
1095 if (!(qc->err_mask & AC_ERR_DEV))
1098 switch (qc->dev->class) {
1101 qc->err_mask |= AC_ERR_ATA_BUS;
1103 qc->err_mask |= AC_ERR_MEDIA;
1105 qc->err_mask |= AC_ERR_INVALID;
1109 tmp = atapi_eh_request_sense(qc->dev,
1110 qc->scsicmd->sense_buffer);
1112 /* ATA_QCFLAG_SENSE_VALID is used to tell
1113 * atapi_qc_complete() that sense data is
1116 * TODO: interpret sense data and set
1117 * appropriate err_mask.
1119 qc->flags |= ATA_QCFLAG_SENSE_VALID;
1121 qc->err_mask |= tmp;
1124 if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS))
1125 action |= ATA_EH_SOFTRESET;
1130 static int ata_eh_categorize_ering_entry(struct ata_ering_entry *ent)
1132 if (ent->err_mask & (AC_ERR_ATA_BUS | AC_ERR_TIMEOUT))
1136 if (ent->err_mask & AC_ERR_HSM)
1138 if ((ent->err_mask &
1139 (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV)
1146 struct speed_down_needed_arg {
1151 static int speed_down_needed_cb(struct ata_ering_entry *ent, void *void_arg)
1153 struct speed_down_needed_arg *arg = void_arg;
1155 if (ent->timestamp < arg->since)
1158 arg->nr_errors[ata_eh_categorize_ering_entry(ent)]++;
1163 * ata_eh_speed_down_needed - Determine wheter speed down is necessary
1164 * @dev: Device of interest
1166 * This function examines error ring of @dev and determines
1167 * whether speed down is necessary. Speed down is necessary if
1168 * there have been more than 3 of Cat-1 errors or 10 of Cat-2
1169 * errors during last 15 minutes.
1171 * Cat-1 errors are ATA_BUS, TIMEOUT for any command and HSM
1172 * violation for known supported commands.
1174 * Cat-2 errors are unclassified DEV error for known supported
1178 * Inherited from caller.
1181 * 1 if speed down is necessary, 0 otherwise
1183 static int ata_eh_speed_down_needed(struct ata_device *dev)
1185 const u64 interval = 15LLU * 60 * HZ;
1186 static const int err_limits[3] = { -1, 3, 10 };
1187 struct speed_down_needed_arg arg;
1188 struct ata_ering_entry *ent;
1192 ent = ata_ering_top(&dev->ering);
1196 err_cat = ata_eh_categorize_ering_entry(ent);
1200 memset(&arg, 0, sizeof(arg));
1202 j64 = get_jiffies_64();
1203 if (j64 >= interval)
1204 arg.since = j64 - interval;
1208 ata_ering_map(&dev->ering, speed_down_needed_cb, &arg);
1210 return arg.nr_errors[err_cat] > err_limits[err_cat];
1214 * ata_eh_speed_down - record error and speed down if necessary
1215 * @dev: Failed device
1216 * @is_io: Did the device fail during normal IO?
1217 * @err_mask: err_mask of the error
1219 * Record error and examine error history to determine whether
1220 * adjusting transmission speed is necessary. It also sets
1221 * transmission limits appropriately if such adjustment is
1225 * Kernel thread context (may sleep).
1228 * 0 on success, -errno otherwise
1230 static int ata_eh_speed_down(struct ata_device *dev, int is_io,
1231 unsigned int err_mask)
1236 /* record error and determine whether speed down is necessary */
1237 ata_ering_record(&dev->ering, is_io, err_mask);
1239 if (!ata_eh_speed_down_needed(dev))
1242 /* speed down SATA link speed if possible */
1243 if (sata_down_spd_limit(dev->ap) == 0)
1244 return ATA_EH_HARDRESET;
1246 /* lower transfer mode */
1247 if (ata_down_xfermask_limit(dev, 0) == 0)
1248 return ATA_EH_SOFTRESET;
1250 ata_dev_printk(dev, KERN_ERR,
1251 "speed down requested but no transfer mode left\n");
1256 * ata_eh_autopsy - analyze error and determine recovery action
1257 * @ap: ATA port to perform autopsy on
1259 * Analyze why @ap failed and determine which recovery action is
1260 * needed. This function also sets more detailed AC_ERR_* values
1261 * and fills sense data for ATAPI CHECK SENSE.
1264 * Kernel thread context (may sleep).
1266 static void ata_eh_autopsy(struct ata_port *ap)
1268 struct ata_eh_context *ehc = &ap->eh_context;
1269 unsigned int action = ehc->i.action;
1270 struct ata_device *failed_dev = NULL;
1271 unsigned int all_err_mask = 0;
1278 /* obtain and analyze SError */
1279 rc = sata_scr_read(ap, SCR_ERROR, &serror);
1281 ehc->i.serror |= serror;
1282 ata_eh_analyze_serror(ap);
1283 } else if (rc != -EOPNOTSUPP)
1284 action |= ATA_EH_HARDRESET;
1286 /* analyze NCQ failure */
1287 ata_eh_analyze_ncq_error(ap);
1289 /* any real error trumps AC_ERR_OTHER */
1290 if (ehc->i.err_mask & ~AC_ERR_OTHER)
1291 ehc->i.err_mask &= ~AC_ERR_OTHER;
1293 all_err_mask |= ehc->i.err_mask;
1295 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1296 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1298 if (!(qc->flags & ATA_QCFLAG_FAILED))
1301 /* inherit upper level err_mask */
1302 qc->err_mask |= ehc->i.err_mask;
1305 action |= ata_eh_analyze_tf(qc, &qc->result_tf);
1307 /* DEV errors are probably spurious in case of ATA_BUS error */
1308 if (qc->err_mask & AC_ERR_ATA_BUS)
1309 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA |
1312 /* any real error trumps unknown error */
1313 if (qc->err_mask & ~AC_ERR_OTHER)
1314 qc->err_mask &= ~AC_ERR_OTHER;
1316 /* SENSE_VALID trumps dev/unknown error and revalidation */
1317 if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
1318 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
1319 action &= ~ATA_EH_REVALIDATE;
1322 /* accumulate error info */
1323 failed_dev = qc->dev;
1324 all_err_mask |= qc->err_mask;
1325 if (qc->flags & ATA_QCFLAG_IO)
1329 /* enforce default EH actions */
1330 if (ap->flags & ATA_FLAG_FROZEN ||
1331 all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT))
1332 action |= ATA_EH_SOFTRESET;
1333 else if (all_err_mask)
1334 action |= ATA_EH_REVALIDATE;
1336 /* if we have offending qcs and the associated failed device */
1339 action |= ata_eh_speed_down(failed_dev, is_io, all_err_mask);
1341 /* perform per-dev EH action only on the offending device */
1342 ehc->i.dev_action[failed_dev->devno] |=
1343 action & ATA_EH_PERDEV_MASK;
1344 action &= ~ATA_EH_PERDEV_MASK;
1347 /* record autopsy result */
1348 ehc->i.dev = failed_dev;
1349 ehc->i.action = action;
1355 * ata_eh_report - report error handling to user
1356 * @ap: ATA port EH is going on
1358 * Report EH to user.
1363 static void ata_eh_report(struct ata_port *ap)
1365 struct ata_eh_context *ehc = &ap->eh_context;
1366 const char *frozen, *desc;
1367 int tag, nr_failed = 0;
1370 if (ehc->i.desc[0] != '\0')
1373 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1374 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1376 if (!(qc->flags & ATA_QCFLAG_FAILED))
1378 if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask)
1384 if (!nr_failed && !ehc->i.err_mask)
1388 if (ap->flags & ATA_FLAG_FROZEN)
1392 ata_dev_printk(ehc->i.dev, KERN_ERR, "exception Emask 0x%x "
1393 "SAct 0x%x SErr 0x%x action 0x%x%s\n",
1394 ehc->i.err_mask, ap->sactive, ehc->i.serror,
1395 ehc->i.action, frozen);
1397 ata_dev_printk(ehc->i.dev, KERN_ERR, "(%s)\n", desc);
1399 ata_port_printk(ap, KERN_ERR, "exception Emask 0x%x "
1400 "SAct 0x%x SErr 0x%x action 0x%x%s\n",
1401 ehc->i.err_mask, ap->sactive, ehc->i.serror,
1402 ehc->i.action, frozen);
1404 ata_port_printk(ap, KERN_ERR, "(%s)\n", desc);
1407 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1408 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1410 if (!(qc->flags & ATA_QCFLAG_FAILED) || !qc->err_mask)
1413 ata_dev_printk(qc->dev, KERN_ERR, "tag %d cmd 0x%x "
1414 "Emask 0x%x stat 0x%x err 0x%x (%s)\n",
1415 qc->tag, qc->tf.command, qc->err_mask,
1416 qc->result_tf.command, qc->result_tf.feature,
1417 ata_err_string(qc->err_mask));
1421 static int ata_do_reset(struct ata_port *ap, ata_reset_fn_t reset,
1422 unsigned int *classes)
1426 for (i = 0; i < ATA_MAX_DEVICES; i++)
1427 classes[i] = ATA_DEV_UNKNOWN;
1429 rc = reset(ap, classes);
1433 /* If any class isn't ATA_DEV_UNKNOWN, consider classification
1434 * is complete and convert all ATA_DEV_UNKNOWN to
1437 for (i = 0; i < ATA_MAX_DEVICES; i++)
1438 if (classes[i] != ATA_DEV_UNKNOWN)
1441 if (i < ATA_MAX_DEVICES)
1442 for (i = 0; i < ATA_MAX_DEVICES; i++)
1443 if (classes[i] == ATA_DEV_UNKNOWN)
1444 classes[i] = ATA_DEV_NONE;
1449 static int ata_eh_followup_srst_needed(int rc, int classify,
1450 const unsigned int *classes)
1456 if (classify && classes[0] == ATA_DEV_UNKNOWN)
1461 static int ata_eh_reset(struct ata_port *ap, int classify,
1462 ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
1463 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
1465 struct ata_eh_context *ehc = &ap->eh_context;
1466 unsigned int *classes = ehc->classes;
1467 int tries = ATA_EH_RESET_TRIES;
1468 int verbose = !(ap->flags & ATA_FLAG_LOADING);
1469 unsigned int action;
1470 ata_reset_fn_t reset;
1471 int i, did_followup_srst, rc;
1473 /* Determine which reset to use and record in ehc->i.action.
1474 * prereset() may examine and modify it.
1476 action = ehc->i.action;
1477 ehc->i.action &= ~ATA_EH_RESET_MASK;
1478 if (softreset && (!hardreset || (!sata_set_spd_needed(ap) &&
1479 !(action & ATA_EH_HARDRESET))))
1480 ehc->i.action |= ATA_EH_SOFTRESET;
1482 ehc->i.action |= ATA_EH_HARDRESET;
1487 ata_port_printk(ap, KERN_ERR,
1488 "prereset failed (errno=%d)\n", rc);
1493 /* prereset() might have modified ehc->i.action */
1494 if (ehc->i.action & ATA_EH_HARDRESET)
1496 else if (ehc->i.action & ATA_EH_SOFTRESET)
1499 /* prereset told us not to reset, bang classes and return */
1500 for (i = 0; i < ATA_MAX_DEVICES; i++)
1501 classes[i] = ATA_DEV_NONE;
1505 /* did prereset() screw up? if so, fix up to avoid oopsing */
1507 ata_port_printk(ap, KERN_ERR, "BUG: prereset() requested "
1508 "invalid reset type\n");
1516 /* shut up during boot probing */
1518 ata_port_printk(ap, KERN_INFO, "%s resetting port\n",
1519 reset == softreset ? "soft" : "hard");
1522 ata_eh_about_to_do(ap, NULL, ATA_EH_RESET_MASK);
1523 ehc->i.flags |= ATA_EHI_DID_RESET;
1525 rc = ata_do_reset(ap, reset, classes);
1527 did_followup_srst = 0;
1528 if (reset == hardreset &&
1529 ata_eh_followup_srst_needed(rc, classify, classes)) {
1530 /* okay, let's do follow-up softreset */
1531 did_followup_srst = 1;
1535 ata_port_printk(ap, KERN_ERR,
1536 "follow-up softreset required "
1537 "but no softreset avaliable\n");
1541 ata_eh_about_to_do(ap, NULL, ATA_EH_RESET_MASK);
1542 rc = ata_do_reset(ap, reset, classes);
1544 if (rc == 0 && classify &&
1545 classes[0] == ATA_DEV_UNKNOWN) {
1546 ata_port_printk(ap, KERN_ERR,
1547 "classification failed\n");
1552 if (rc && --tries) {
1555 if (reset == softreset) {
1556 if (did_followup_srst)
1557 type = "follow-up soft";
1563 ata_port_printk(ap, KERN_WARNING,
1564 "%sreset failed, retrying in 5 secs\n", type);
1567 if (reset == hardreset)
1568 sata_down_spd_limit(ap);
1575 /* After the reset, the device state is PIO 0 and the
1576 * controller state is undefined. Record the mode.
1578 for (i = 0; i < ATA_MAX_DEVICES; i++)
1579 ap->device[i].pio_mode = XFER_PIO_0;
1582 postreset(ap, classes);
1584 /* reset successful, schedule revalidation */
1585 ata_eh_done(ap, NULL, ATA_EH_RESET_MASK);
1586 ehc->i.action |= ATA_EH_REVALIDATE;
1592 static int ata_eh_revalidate_and_attach(struct ata_port *ap,
1593 struct ata_device **r_failed_dev)
1595 struct ata_eh_context *ehc = &ap->eh_context;
1596 struct ata_device *dev;
1597 unsigned long flags;
1602 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1603 unsigned int action;
1605 dev = &ap->device[i];
1606 action = ata_eh_dev_action(dev);
1608 if (action & ATA_EH_REVALIDATE && ata_dev_enabled(dev)) {
1609 if (ata_port_offline(ap)) {
1614 ata_eh_about_to_do(ap, dev, ATA_EH_REVALIDATE);
1615 rc = ata_dev_revalidate(dev,
1616 ehc->i.flags & ATA_EHI_DID_RESET);
1620 ata_eh_done(ap, dev, ATA_EH_REVALIDATE);
1622 /* schedule the scsi_rescan_device() here */
1623 queue_work(ata_aux_wq, &(ap->scsi_rescan_task));
1624 } else if (dev->class == ATA_DEV_UNKNOWN &&
1625 ehc->tries[dev->devno] &&
1626 ata_class_enabled(ehc->classes[dev->devno])) {
1627 dev->class = ehc->classes[dev->devno];
1629 rc = ata_dev_read_id(dev, &dev->class, 1, dev->id);
1631 rc = ata_dev_configure(dev, 1);
1634 dev->class = ATA_DEV_UNKNOWN;
1638 spin_lock_irqsave(ap->lock, flags);
1639 ap->flags |= ATA_FLAG_SCSI_HOTPLUG;
1640 spin_unlock_irqrestore(ap->lock, flags);
1645 *r_failed_dev = dev;
1651 static int ata_port_nr_enabled(struct ata_port *ap)
1655 for (i = 0; i < ATA_MAX_DEVICES; i++)
1656 if (ata_dev_enabled(&ap->device[i]))
1661 static int ata_port_nr_vacant(struct ata_port *ap)
1665 for (i = 0; i < ATA_MAX_DEVICES; i++)
1666 if (ap->device[i].class == ATA_DEV_UNKNOWN)
1671 static int ata_eh_skip_recovery(struct ata_port *ap)
1673 struct ata_eh_context *ehc = &ap->eh_context;
1676 if (ap->flags & ATA_FLAG_FROZEN || ata_port_nr_enabled(ap))
1679 /* skip if class codes for all vacant slots are ATA_DEV_NONE */
1680 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1681 struct ata_device *dev = &ap->device[i];
1683 if (dev->class == ATA_DEV_UNKNOWN &&
1684 ehc->classes[dev->devno] != ATA_DEV_NONE)
1692 * ata_eh_recover - recover host port after error
1693 * @ap: host port to recover
1694 * @prereset: prereset method (can be NULL)
1695 * @softreset: softreset method (can be NULL)
1696 * @hardreset: hardreset method (can be NULL)
1697 * @postreset: postreset method (can be NULL)
1699 * This is the alpha and omega, eum and yang, heart and soul of
1700 * libata exception handling. On entry, actions required to
1701 * recover the port and hotplug requests are recorded in
1702 * eh_context. This function executes all the operations with
1703 * appropriate retrials and fallbacks to resurrect failed
1704 * devices, detach goners and greet newcomers.
1707 * Kernel thread context (may sleep).
1710 * 0 on success, -errno on failure.
1712 static int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
1713 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
1714 ata_postreset_fn_t postreset)
1716 struct ata_eh_context *ehc = &ap->eh_context;
1717 struct ata_device *dev;
1718 int down_xfermask, i, rc;
1722 /* prep for recovery */
1723 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1724 dev = &ap->device[i];
1726 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
1728 /* process hotplug request */
1729 if (dev->flags & ATA_DFLAG_DETACH)
1730 ata_eh_detach_dev(dev);
1732 if (!ata_dev_enabled(dev) &&
1733 ((ehc->i.probe_mask & (1 << dev->devno)) &&
1734 !(ehc->did_probe_mask & (1 << dev->devno)))) {
1735 ata_eh_detach_dev(dev);
1737 ehc->did_probe_mask |= (1 << dev->devno);
1738 ehc->i.action |= ATA_EH_SOFTRESET;
1746 /* if UNLOADING, finish immediately */
1747 if (ap->flags & ATA_FLAG_UNLOADING)
1750 /* skip EH if possible. */
1751 if (ata_eh_skip_recovery(ap))
1754 for (i = 0; i < ATA_MAX_DEVICES; i++)
1755 ehc->classes[i] = ATA_DEV_UNKNOWN;
1758 if (ehc->i.action & ATA_EH_RESET_MASK) {
1759 ata_eh_freeze_port(ap);
1761 rc = ata_eh_reset(ap, ata_port_nr_vacant(ap), prereset,
1762 softreset, hardreset, postreset);
1764 ata_port_printk(ap, KERN_ERR,
1765 "reset failed, giving up\n");
1769 ata_eh_thaw_port(ap);
1772 /* revalidate existing devices and attach new ones */
1773 rc = ata_eh_revalidate_and_attach(ap, &dev);
1777 /* configure transfer mode if the port has been reset */
1778 if (ehc->i.flags & ATA_EHI_DID_RESET) {
1779 rc = ata_set_mode(ap, &dev);
1791 /* device missing, schedule probing */
1792 ehc->i.probe_mask |= (1 << dev->devno);
1794 ehc->tries[dev->devno] = 0;
1797 sata_down_spd_limit(ap);
1799 ehc->tries[dev->devno]--;
1800 if (down_xfermask &&
1801 ata_down_xfermask_limit(dev, ehc->tries[dev->devno] == 1))
1802 ehc->tries[dev->devno] = 0;
1805 if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) {
1806 /* disable device if it has used up all its chances */
1807 ata_dev_disable(dev);
1809 /* detach if offline */
1810 if (ata_port_offline(ap))
1811 ata_eh_detach_dev(dev);
1813 /* probe if requested */
1814 if ((ehc->i.probe_mask & (1 << dev->devno)) &&
1815 !(ehc->did_probe_mask & (1 << dev->devno))) {
1816 ata_eh_detach_dev(dev);
1819 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
1820 ehc->did_probe_mask |= (1 << dev->devno);
1821 ehc->i.action |= ATA_EH_SOFTRESET;
1824 /* soft didn't work? be haaaaard */
1825 if (ehc->i.flags & ATA_EHI_DID_RESET)
1826 ehc->i.action |= ATA_EH_HARDRESET;
1828 ehc->i.action |= ATA_EH_SOFTRESET;
1831 if (ata_port_nr_enabled(ap)) {
1832 ata_port_printk(ap, KERN_WARNING, "failed to recover some "
1833 "devices, retrying in 5 secs\n");
1836 /* no device left, repeat fast */
1844 for (i = 0; i < ATA_MAX_DEVICES; i++)
1845 ata_dev_disable(&ap->device[i]);
1848 DPRINTK("EXIT, rc=%d\n", rc);
1853 * ata_eh_finish - finish up EH
1854 * @ap: host port to finish EH for
1856 * Recovery is complete. Clean up EH states and retry or finish
1862 static void ata_eh_finish(struct ata_port *ap)
1866 /* retry or finish qcs */
1867 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1868 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1870 if (!(qc->flags & ATA_QCFLAG_FAILED))
1874 /* FIXME: Once EH migration is complete,
1875 * generate sense data in this function,
1876 * considering both err_mask and tf.
1878 if (qc->err_mask & AC_ERR_INVALID)
1879 ata_eh_qc_complete(qc);
1881 ata_eh_qc_retry(qc);
1883 if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
1884 ata_eh_qc_complete(qc);
1886 /* feed zero TF to sense generation */
1887 memset(&qc->result_tf, 0, sizeof(qc->result_tf));
1888 ata_eh_qc_retry(qc);
1895 * ata_do_eh - do standard error handling
1896 * @ap: host port to handle error for
1897 * @prereset: prereset method (can be NULL)
1898 * @softreset: softreset method (can be NULL)
1899 * @hardreset: hardreset method (can be NULL)
1900 * @postreset: postreset method (can be NULL)
1902 * Perform standard error handling sequence.
1905 * Kernel thread context (may sleep).
1907 void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
1908 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
1909 ata_postreset_fn_t postreset)
1911 if (!(ap->flags & ATA_FLAG_LOADING)) {
1916 ata_eh_recover(ap, prereset, softreset, hardreset, postreset);