2 * libata-eh.c - libata error handling
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
8 * Copyright 2006 Tejun Heo <htejun@gmail.com>
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; either version 2, or
14 * (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
35 #include <linux/kernel.h>
36 #include <scsi/scsi.h>
37 #include <scsi/scsi_host.h>
38 #include <scsi/scsi_eh.h>
39 #include <scsi/scsi_device.h>
40 #include <scsi/scsi_cmnd.h>
41 #include "../scsi/scsi_transport_api.h"
43 #include <linux/libata.h>
48 ATA_EH_SPDN_NCQ_OFF = (1 << 0),
49 ATA_EH_SPDN_SPEED_DOWN = (1 << 1),
50 ATA_EH_SPDN_FALLBACK_TO_PIO = (1 << 2),
53 /* Waiting in ->prereset can never be reliable. It's sometimes nice
54 * to wait there but it can't be depended upon; otherwise, we wouldn't
55 * be resetting. Just give it enough time for most drives to spin up.
58 ATA_EH_PRERESET_TIMEOUT = 10 * HZ,
59 ATA_EH_FASTDRAIN_INTERVAL = 3 * HZ,
62 /* The following table determines how we sequence resets. Each entry
63 * represents timeout for that try. The first try can be soft or
64 * hardreset. All others are hardreset if available. In most cases
65 * the first reset w/ 10sec timeout should succeed. Following entries
66 * are mostly for error handling, hotplug and retarded devices.
68 static const unsigned long ata_eh_reset_timeouts[] = {
69 10 * HZ, /* most drives spin up by 10sec */
70 10 * HZ, /* > 99% working drives spin up before 20sec */
71 35 * HZ, /* give > 30 secs of idleness for retarded devices */
72 5 * HZ, /* and sweet one last chance */
73 /* > 1 min has elapsed, give up */
76 static void __ata_port_freeze(struct ata_port *ap);
77 static void ata_eh_finish(struct ata_port *ap);
79 static void ata_eh_handle_port_suspend(struct ata_port *ap);
80 static void ata_eh_handle_port_resume(struct ata_port *ap);
82 static void ata_eh_handle_port_suspend(struct ata_port *ap)
85 static void ata_eh_handle_port_resume(struct ata_port *ap)
87 #endif /* CONFIG_PM */
89 static void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, const char *fmt,
92 ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len,
93 ATA_EH_DESC_LEN - ehi->desc_len,
98 * __ata_ehi_push_desc - push error description without adding separator
100 * @fmt: printf format string
102 * Format string according to @fmt and append it to @ehi->desc.
105 * spin_lock_irqsave(host lock)
107 void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
112 __ata_ehi_pushv_desc(ehi, fmt, args);
117 * ata_ehi_push_desc - push error description with separator
119 * @fmt: printf format string
121 * Format string according to @fmt and append it to @ehi->desc.
122 * If @ehi->desc is not empty, ", " is added in-between.
125 * spin_lock_irqsave(host lock)
127 void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
132 __ata_ehi_push_desc(ehi, ", ");
135 __ata_ehi_pushv_desc(ehi, fmt, args);
140 * ata_ehi_clear_desc - clean error description
146 * spin_lock_irqsave(host lock)
148 void ata_ehi_clear_desc(struct ata_eh_info *ehi)
154 static void ata_ering_record(struct ata_ering *ering, int is_io,
155 unsigned int err_mask)
157 struct ata_ering_entry *ent;
162 ering->cursor %= ATA_ERING_SIZE;
164 ent = &ering->ring[ering->cursor];
166 ent->err_mask = err_mask;
167 ent->timestamp = get_jiffies_64();
170 static void ata_ering_clear(struct ata_ering *ering)
172 memset(ering, 0, sizeof(*ering));
175 static int ata_ering_map(struct ata_ering *ering,
176 int (*map_fn)(struct ata_ering_entry *, void *),
180 struct ata_ering_entry *ent;
184 ent = &ering->ring[idx];
187 rc = map_fn(ent, arg);
190 idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE;
191 } while (idx != ering->cursor);
196 static unsigned int ata_eh_dev_action(struct ata_device *dev)
198 struct ata_eh_context *ehc = &dev->link->eh_context;
200 return ehc->i.action | ehc->i.dev_action[dev->devno];
203 static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev,
204 struct ata_eh_info *ehi, unsigned int action)
206 struct ata_device *tdev;
209 ehi->action &= ~action;
210 ata_link_for_each_dev(tdev, link)
211 ehi->dev_action[tdev->devno] &= ~action;
213 /* doesn't make sense for port-wide EH actions */
214 WARN_ON(!(action & ATA_EH_PERDEV_MASK));
216 /* break ehi->action into ehi->dev_action */
217 if (ehi->action & action) {
218 ata_link_for_each_dev(tdev, link)
219 ehi->dev_action[tdev->devno] |=
220 ehi->action & action;
221 ehi->action &= ~action;
224 /* turn off the specified per-dev action */
225 ehi->dev_action[dev->devno] &= ~action;
230 * ata_scsi_timed_out - SCSI layer time out callback
231 * @cmd: timed out SCSI command
233 * Handles SCSI layer timeout. We race with normal completion of
234 * the qc for @cmd. If the qc is already gone, we lose and let
235 * the scsi command finish (EH_HANDLED). Otherwise, the qc has
236 * timed out and EH should be invoked. Prevent ata_qc_complete()
237 * from finishing it by setting EH_SCHEDULED and return
240 * TODO: kill this function once old EH is gone.
243 * Called from timer context
246 * EH_HANDLED or EH_NOT_HANDLED
248 enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
250 struct Scsi_Host *host = cmd->device->host;
251 struct ata_port *ap = ata_shost_to_port(host);
253 struct ata_queued_cmd *qc;
254 enum scsi_eh_timer_return ret;
258 if (ap->ops->error_handler) {
259 ret = EH_NOT_HANDLED;
264 spin_lock_irqsave(ap->lock, flags);
265 qc = ata_qc_from_tag(ap, ap->link.active_tag);
267 WARN_ON(qc->scsicmd != cmd);
268 qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
269 qc->err_mask |= AC_ERR_TIMEOUT;
270 ret = EH_NOT_HANDLED;
272 spin_unlock_irqrestore(ap->lock, flags);
275 DPRINTK("EXIT, ret=%d\n", ret);
280 * ata_scsi_error - SCSI layer error handler callback
281 * @host: SCSI host on which error occurred
283 * Handles SCSI-layer-thrown error events.
286 * Inherited from SCSI layer (none, can sleep)
291 void ata_scsi_error(struct Scsi_Host *host)
293 struct ata_port *ap = ata_shost_to_port(host);
294 int i, repeat_cnt = ATA_EH_MAX_REPEAT;
299 /* synchronize with port task */
300 ata_port_flush_task(ap);
302 /* synchronize with host lock and sort out timeouts */
304 /* For new EH, all qcs are finished in one of three ways -
305 * normal completion, error completion, and SCSI timeout.
306 * Both cmpletions can race against SCSI timeout. When normal
307 * completion wins, the qc never reaches EH. When error
308 * completion wins, the qc has ATA_QCFLAG_FAILED set.
310 * When SCSI timeout wins, things are a bit more complex.
311 * Normal or error completion can occur after the timeout but
312 * before this point. In such cases, both types of
313 * completions are honored. A scmd is determined to have
314 * timed out iff its associated qc is active and not failed.
316 if (ap->ops->error_handler) {
317 struct scsi_cmnd *scmd, *tmp;
320 spin_lock_irqsave(ap->lock, flags);
322 list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) {
323 struct ata_queued_cmd *qc;
325 for (i = 0; i < ATA_MAX_QUEUE; i++) {
326 qc = __ata_qc_from_tag(ap, i);
327 if (qc->flags & ATA_QCFLAG_ACTIVE &&
332 if (i < ATA_MAX_QUEUE) {
333 /* the scmd has an associated qc */
334 if (!(qc->flags & ATA_QCFLAG_FAILED)) {
335 /* which hasn't failed yet, timeout */
336 qc->err_mask |= AC_ERR_TIMEOUT;
337 qc->flags |= ATA_QCFLAG_FAILED;
341 /* Normal completion occurred after
342 * SCSI timeout but before this point.
343 * Successfully complete it.
345 scmd->retries = scmd->allowed;
346 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
350 /* If we have timed out qcs. They belong to EH from
351 * this point but the state of the controller is
352 * unknown. Freeze the port to make sure the IRQ
353 * handler doesn't diddle with those qcs. This must
354 * be done atomically w.r.t. setting QCFLAG_FAILED.
357 __ata_port_freeze(ap);
359 spin_unlock_irqrestore(ap->lock, flags);
361 spin_unlock_wait(ap->lock);
364 /* invoke error handler */
365 if (ap->ops->error_handler) {
366 struct ata_link *link;
368 /* kill fast drain timer */
369 del_timer_sync(&ap->fastdrain_timer);
371 /* process port resume request */
372 ata_eh_handle_port_resume(ap);
374 /* fetch & clear EH info */
375 spin_lock_irqsave(ap->lock, flags);
377 __ata_port_for_each_link(link, ap) {
378 memset(&link->eh_context, 0, sizeof(link->eh_context));
379 link->eh_context.i = link->eh_info;
380 memset(&link->eh_info, 0, sizeof(link->eh_info));
383 ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS;
384 ap->pflags &= ~ATA_PFLAG_EH_PENDING;
386 spin_unlock_irqrestore(ap->lock, flags);
388 /* invoke EH, skip if unloading or suspended */
389 if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED)))
390 ap->ops->error_handler(ap);
394 /* process port suspend request */
395 ata_eh_handle_port_suspend(ap);
397 /* Exception might have happend after ->error_handler
398 * recovered the port but before this point. Repeat
401 spin_lock_irqsave(ap->lock, flags);
403 if (ap->pflags & ATA_PFLAG_EH_PENDING) {
405 ata_port_printk(ap, KERN_INFO,
406 "EH pending after completion, "
407 "repeating EH (cnt=%d)\n", repeat_cnt);
408 spin_unlock_irqrestore(ap->lock, flags);
411 ata_port_printk(ap, KERN_ERR, "EH pending after %d "
412 "tries, giving up\n", ATA_EH_MAX_REPEAT);
413 ap->pflags &= ~ATA_PFLAG_EH_PENDING;
416 /* this run is complete, make sure EH info is clear */
417 __ata_port_for_each_link(link, ap)
418 memset(&link->eh_info, 0, sizeof(link->eh_info));
420 /* Clear host_eh_scheduled while holding ap->lock such
421 * that if exception occurs after this point but
422 * before EH completion, SCSI midlayer will
425 host->host_eh_scheduled = 0;
427 spin_unlock_irqrestore(ap->lock, flags);
429 WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL);
430 ap->ops->eng_timeout(ap);
433 /* finish or retry handled scmd's and clean up */
434 WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q));
436 scsi_eh_flush_done_q(&ap->eh_done_q);
439 spin_lock_irqsave(ap->lock, flags);
441 if (ap->pflags & ATA_PFLAG_LOADING)
442 ap->pflags &= ~ATA_PFLAG_LOADING;
443 else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG)
444 queue_delayed_work(ata_aux_wq, &ap->hotplug_task, 0);
446 if (ap->pflags & ATA_PFLAG_RECOVERED)
447 ata_port_printk(ap, KERN_INFO, "EH complete\n");
449 ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED);
451 /* tell wait_eh that we're done */
452 ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS;
453 wake_up_all(&ap->eh_wait_q);
455 spin_unlock_irqrestore(ap->lock, flags);
461 * ata_port_wait_eh - Wait for the currently pending EH to complete
462 * @ap: Port to wait EH for
464 * Wait until the currently pending EH is complete.
467 * Kernel thread context (may sleep).
469 void ata_port_wait_eh(struct ata_port *ap)
475 spin_lock_irqsave(ap->lock, flags);
477 while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) {
478 prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
479 spin_unlock_irqrestore(ap->lock, flags);
481 spin_lock_irqsave(ap->lock, flags);
483 finish_wait(&ap->eh_wait_q, &wait);
485 spin_unlock_irqrestore(ap->lock, flags);
487 /* make sure SCSI EH is complete */
488 if (scsi_host_in_recovery(ap->scsi_host)) {
495 * ata_qc_timeout - Handle timeout of queued command
496 * @qc: Command that timed out
498 * Some part of the kernel (currently, only the SCSI layer)
499 * has noticed that the active command on port @ap has not
500 * completed after a specified length of time. Handle this
501 * condition by disabling DMA (if necessary) and completing
502 * transactions, with error if necessary.
504 * This also handles the case of the "lost interrupt", where
505 * for some reason (possibly hardware bug, possibly driver bug)
506 * an interrupt was not delivered to the driver, even though the
507 * transaction completed successfully.
509 * TODO: kill this function once old EH is gone.
512 * Inherited from SCSI layer (none, can sleep)
514 static void ata_qc_timeout(struct ata_queued_cmd *qc)
516 struct ata_port *ap = qc->ap;
517 u8 host_stat = 0, drv_stat;
522 ap->hsm_task_state = HSM_ST_IDLE;
524 spin_lock_irqsave(ap->lock, flags);
526 switch (qc->tf.protocol) {
529 case ATA_PROT_ATAPI_DMA:
530 host_stat = ap->ops->bmdma_status(ap);
532 /* before we do anything else, clear DMA-Start bit */
533 ap->ops->bmdma_stop(qc);
539 drv_stat = ata_chk_status(ap);
541 /* ack bmdma irq events */
542 ap->ops->irq_clear(ap);
544 ata_dev_printk(qc->dev, KERN_ERR, "command 0x%x timeout, "
545 "stat 0x%x host_stat 0x%x\n",
546 qc->tf.command, drv_stat, host_stat);
548 /* complete taskfile transaction */
549 qc->err_mask |= AC_ERR_TIMEOUT;
553 spin_unlock_irqrestore(ap->lock, flags);
555 ata_eh_qc_complete(qc);
561 * ata_eng_timeout - Handle timeout of queued command
562 * @ap: Port on which timed-out command is active
564 * Some part of the kernel (currently, only the SCSI layer)
565 * has noticed that the active command on port @ap has not
566 * completed after a specified length of time. Handle this
567 * condition by disabling DMA (if necessary) and completing
568 * transactions, with error if necessary.
570 * This also handles the case of the "lost interrupt", where
571 * for some reason (possibly hardware bug, possibly driver bug)
572 * an interrupt was not delivered to the driver, even though the
573 * transaction completed successfully.
575 * TODO: kill this function once old EH is gone.
578 * Inherited from SCSI layer (none, can sleep)
580 void ata_eng_timeout(struct ata_port *ap)
584 ata_qc_timeout(ata_qc_from_tag(ap, ap->link.active_tag));
589 static int ata_eh_nr_in_flight(struct ata_port *ap)
594 /* count only non-internal commands */
595 for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++)
596 if (ata_qc_from_tag(ap, tag))
602 void ata_eh_fastdrain_timerfn(unsigned long arg)
604 struct ata_port *ap = (void *)arg;
608 spin_lock_irqsave(ap->lock, flags);
610 cnt = ata_eh_nr_in_flight(ap);
616 if (cnt == ap->fastdrain_cnt) {
619 /* No progress during the last interval, tag all
620 * in-flight qcs as timed out and freeze the port.
622 for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) {
623 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
625 qc->err_mask |= AC_ERR_TIMEOUT;
630 /* some qcs have finished, give it another chance */
631 ap->fastdrain_cnt = cnt;
632 ap->fastdrain_timer.expires =
633 jiffies + ATA_EH_FASTDRAIN_INTERVAL;
634 add_timer(&ap->fastdrain_timer);
638 spin_unlock_irqrestore(ap->lock, flags);
642 * ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain
643 * @ap: target ATA port
644 * @fastdrain: activate fast drain
646 * Set ATA_PFLAG_EH_PENDING and activate fast drain if @fastdrain
647 * is non-zero and EH wasn't pending before. Fast drain ensures
648 * that EH kicks in in timely manner.
651 * spin_lock_irqsave(host lock)
653 static void ata_eh_set_pending(struct ata_port *ap, int fastdrain)
657 /* already scheduled? */
658 if (ap->pflags & ATA_PFLAG_EH_PENDING)
661 ap->pflags |= ATA_PFLAG_EH_PENDING;
666 /* do we have in-flight qcs? */
667 cnt = ata_eh_nr_in_flight(ap);
671 /* activate fast drain */
672 ap->fastdrain_cnt = cnt;
673 ap->fastdrain_timer.expires = jiffies + ATA_EH_FASTDRAIN_INTERVAL;
674 add_timer(&ap->fastdrain_timer);
678 * ata_qc_schedule_eh - schedule qc for error handling
679 * @qc: command to schedule error handling for
681 * Schedule error handling for @qc. EH will kick in as soon as
682 * other commands are drained.
685 * spin_lock_irqsave(host lock)
687 void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
689 struct ata_port *ap = qc->ap;
691 WARN_ON(!ap->ops->error_handler);
693 qc->flags |= ATA_QCFLAG_FAILED;
694 ata_eh_set_pending(ap, 1);
696 /* The following will fail if timeout has already expired.
697 * ata_scsi_error() takes care of such scmds on EH entry.
698 * Note that ATA_QCFLAG_FAILED is unconditionally set after
699 * this function completes.
701 scsi_req_abort_cmd(qc->scsicmd);
705 * ata_port_schedule_eh - schedule error handling without a qc
706 * @ap: ATA port to schedule EH for
708 * Schedule error handling for @ap. EH will kick in as soon as
709 * all commands are drained.
712 * spin_lock_irqsave(host lock)
714 void ata_port_schedule_eh(struct ata_port *ap)
716 WARN_ON(!ap->ops->error_handler);
718 if (ap->pflags & ATA_PFLAG_INITIALIZING)
721 ata_eh_set_pending(ap, 1);
722 scsi_schedule_eh(ap->scsi_host);
724 DPRINTK("port EH scheduled\n");
727 static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
729 int tag, nr_aborted = 0;
731 WARN_ON(!ap->ops->error_handler);
733 /* we're gonna abort all commands, no need for fast drain */
734 ata_eh_set_pending(ap, 0);
736 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
737 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
739 if (qc && (!link || qc->dev->link == link)) {
740 qc->flags |= ATA_QCFLAG_FAILED;
747 ata_port_schedule_eh(ap);
753 * ata_link_abort - abort all qc's on the link
754 * @link: ATA link to abort qc's for
756 * Abort all active qc's active on @link and schedule EH.
759 * spin_lock_irqsave(host lock)
762 * Number of aborted qc's.
764 int ata_link_abort(struct ata_link *link)
766 return ata_do_link_abort(link->ap, link);
770 * ata_port_abort - abort all qc's on the port
771 * @ap: ATA port to abort qc's for
773 * Abort all active qc's of @ap and schedule EH.
776 * spin_lock_irqsave(host_set lock)
779 * Number of aborted qc's.
781 int ata_port_abort(struct ata_port *ap)
783 return ata_do_link_abort(ap, NULL);
787 * __ata_port_freeze - freeze port
788 * @ap: ATA port to freeze
790 * This function is called when HSM violation or some other
791 * condition disrupts normal operation of the port. Frozen port
792 * is not allowed to perform any operation until the port is
793 * thawed, which usually follows a successful reset.
795 * ap->ops->freeze() callback can be used for freezing the port
796 * hardware-wise (e.g. mask interrupt and stop DMA engine). If a
797 * port cannot be frozen hardware-wise, the interrupt handler
798 * must ack and clear interrupts unconditionally while the port
802 * spin_lock_irqsave(host lock)
804 static void __ata_port_freeze(struct ata_port *ap)
806 WARN_ON(!ap->ops->error_handler);
811 ap->pflags |= ATA_PFLAG_FROZEN;
813 DPRINTK("ata%u port frozen\n", ap->print_id);
817 * ata_port_freeze - abort & freeze port
818 * @ap: ATA port to freeze
820 * Abort and freeze @ap.
823 * spin_lock_irqsave(host lock)
826 * Number of aborted commands.
828 int ata_port_freeze(struct ata_port *ap)
832 WARN_ON(!ap->ops->error_handler);
834 nr_aborted = ata_port_abort(ap);
835 __ata_port_freeze(ap);
841 * ata_eh_freeze_port - EH helper to freeze port
842 * @ap: ATA port to freeze
849 void ata_eh_freeze_port(struct ata_port *ap)
853 if (!ap->ops->error_handler)
856 spin_lock_irqsave(ap->lock, flags);
857 __ata_port_freeze(ap);
858 spin_unlock_irqrestore(ap->lock, flags);
862 * ata_port_thaw_port - EH helper to thaw port
863 * @ap: ATA port to thaw
865 * Thaw frozen port @ap.
870 void ata_eh_thaw_port(struct ata_port *ap)
874 if (!ap->ops->error_handler)
877 spin_lock_irqsave(ap->lock, flags);
879 ap->pflags &= ~ATA_PFLAG_FROZEN;
884 spin_unlock_irqrestore(ap->lock, flags);
886 DPRINTK("ata%u port thawed\n", ap->print_id);
889 static void ata_eh_scsidone(struct scsi_cmnd *scmd)
894 static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
896 struct ata_port *ap = qc->ap;
897 struct scsi_cmnd *scmd = qc->scsicmd;
900 spin_lock_irqsave(ap->lock, flags);
901 qc->scsidone = ata_eh_scsidone;
902 __ata_qc_complete(qc);
903 WARN_ON(ata_tag_valid(qc->tag));
904 spin_unlock_irqrestore(ap->lock, flags);
906 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
910 * ata_eh_qc_complete - Complete an active ATA command from EH
911 * @qc: Command to complete
913 * Indicate to the mid and upper layers that an ATA command has
914 * completed. To be used from EH.
916 void ata_eh_qc_complete(struct ata_queued_cmd *qc)
918 struct scsi_cmnd *scmd = qc->scsicmd;
919 scmd->retries = scmd->allowed;
920 __ata_eh_qc_complete(qc);
924 * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
925 * @qc: Command to retry
927 * Indicate to the mid and upper layers that an ATA command
928 * should be retried. To be used from EH.
930 * SCSI midlayer limits the number of retries to scmd->allowed.
931 * scmd->retries is decremented for commands which get retried
932 * due to unrelated failures (qc->err_mask is zero).
934 void ata_eh_qc_retry(struct ata_queued_cmd *qc)
936 struct scsi_cmnd *scmd = qc->scsicmd;
937 if (!qc->err_mask && scmd->retries)
939 __ata_eh_qc_complete(qc);
943 * ata_eh_detach_dev - detach ATA device
944 * @dev: ATA device to detach
951 static void ata_eh_detach_dev(struct ata_device *dev)
953 struct ata_link *link = dev->link;
954 struct ata_port *ap = link->ap;
957 ata_dev_disable(dev);
959 spin_lock_irqsave(ap->lock, flags);
961 dev->flags &= ~ATA_DFLAG_DETACH;
963 if (ata_scsi_offline_dev(dev)) {
964 dev->flags |= ATA_DFLAG_DETACHED;
965 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
968 /* clear per-dev EH actions */
969 ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK);
970 ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK);
972 spin_unlock_irqrestore(ap->lock, flags);
976 * ata_eh_about_to_do - about to perform eh_action
977 * @link: target ATA link
978 * @dev: target ATA dev for per-dev action (can be NULL)
979 * @action: action about to be performed
981 * Called just before performing EH actions to clear related bits
982 * in @link->eh_info such that eh actions are not unnecessarily
988 static void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev,
991 struct ata_port *ap = link->ap;
992 struct ata_eh_info *ehi = &link->eh_info;
993 struct ata_eh_context *ehc = &link->eh_context;
996 spin_lock_irqsave(ap->lock, flags);
998 /* Reset is represented by combination of actions and EHI
999 * flags. Suck in all related bits before clearing eh_info to
1000 * avoid losing requested action.
1002 if (action & ATA_EH_RESET_MASK) {
1003 ehc->i.action |= ehi->action & ATA_EH_RESET_MASK;
1004 ehc->i.flags |= ehi->flags & ATA_EHI_RESET_MODIFIER_MASK;
1006 /* make sure all reset actions are cleared & clear EHI flags */
1007 action |= ATA_EH_RESET_MASK;
1008 ehi->flags &= ~ATA_EHI_RESET_MODIFIER_MASK;
1011 ata_eh_clear_action(link, dev, ehi, action);
1013 if (!(ehc->i.flags & ATA_EHI_QUIET))
1014 ap->pflags |= ATA_PFLAG_RECOVERED;
1016 spin_unlock_irqrestore(ap->lock, flags);
1020 * ata_eh_done - EH action complete
1021 * @ap: target ATA port
1022 * @dev: target ATA dev for per-dev action (can be NULL)
1023 * @action: action just completed
1025 * Called right after performing EH actions to clear related bits
1026 * in @link->eh_context.
1031 static void ata_eh_done(struct ata_link *link, struct ata_device *dev,
1032 unsigned int action)
1034 struct ata_eh_context *ehc = &link->eh_context;
1036 /* if reset is complete, clear all reset actions & reset modifier */
1037 if (action & ATA_EH_RESET_MASK) {
1038 action |= ATA_EH_RESET_MASK;
1039 ehc->i.flags &= ~ATA_EHI_RESET_MODIFIER_MASK;
1042 ata_eh_clear_action(link, dev, &ehc->i, action);
1046 * ata_err_string - convert err_mask to descriptive string
1047 * @err_mask: error mask to convert to string
1049 * Convert @err_mask to descriptive string. Errors are
1050 * prioritized according to severity and only the most severe
1051 * error is reported.
1057 * Descriptive string for @err_mask
1059 static const char * ata_err_string(unsigned int err_mask)
1061 if (err_mask & AC_ERR_HOST_BUS)
1062 return "host bus error";
1063 if (err_mask & AC_ERR_ATA_BUS)
1064 return "ATA bus error";
1065 if (err_mask & AC_ERR_TIMEOUT)
1067 if (err_mask & AC_ERR_HSM)
1068 return "HSM violation";
1069 if (err_mask & AC_ERR_SYSTEM)
1070 return "internal error";
1071 if (err_mask & AC_ERR_MEDIA)
1072 return "media error";
1073 if (err_mask & AC_ERR_INVALID)
1074 return "invalid argument";
1075 if (err_mask & AC_ERR_DEV)
1076 return "device error";
1077 return "unknown error";
1081 * ata_read_log_page - read a specific log page
1082 * @dev: target device
1083 * @page: page to read
1084 * @buf: buffer to store read page
1085 * @sectors: number of sectors to read
1087 * Read log page using READ_LOG_EXT command.
1090 * Kernel thread context (may sleep).
1093 * 0 on success, AC_ERR_* mask otherwise.
1095 static unsigned int ata_read_log_page(struct ata_device *dev,
1096 u8 page, void *buf, unsigned int sectors)
1098 struct ata_taskfile tf;
1099 unsigned int err_mask;
1101 DPRINTK("read log page - page %d\n", page);
1103 ata_tf_init(dev, &tf);
1104 tf.command = ATA_CMD_READ_LOG_EXT;
1107 tf.hob_nsect = sectors >> 8;
1108 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
1109 tf.protocol = ATA_PROT_PIO;
1111 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1112 buf, sectors * ATA_SECT_SIZE);
1114 DPRINTK("EXIT, err_mask=%x\n", err_mask);
1119 * ata_eh_read_log_10h - Read log page 10h for NCQ error details
1120 * @dev: Device to read log page 10h from
1121 * @tag: Resulting tag of the failed command
1122 * @tf: Resulting taskfile registers of the failed command
1124 * Read log page 10h to obtain NCQ error details and clear error
1128 * Kernel thread context (may sleep).
1131 * 0 on success, -errno otherwise.
1133 static int ata_eh_read_log_10h(struct ata_device *dev,
1134 int *tag, struct ata_taskfile *tf)
1136 u8 *buf = dev->link->ap->sector_buf;
1137 unsigned int err_mask;
1141 err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, buf, 1);
1146 for (i = 0; i < ATA_SECT_SIZE; i++)
1149 ata_dev_printk(dev, KERN_WARNING,
1150 "invalid checksum 0x%x on log page 10h\n", csum);
1155 *tag = buf[0] & 0x1f;
1157 tf->command = buf[2];
1158 tf->feature = buf[3];
1162 tf->device = buf[7];
1163 tf->hob_lbal = buf[8];
1164 tf->hob_lbam = buf[9];
1165 tf->hob_lbah = buf[10];
1166 tf->nsect = buf[12];
1167 tf->hob_nsect = buf[13];
1173 * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
1174 * @dev: device to perform REQUEST_SENSE to
1175 * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
1177 * Perform ATAPI REQUEST_SENSE after the device reported CHECK
1178 * SENSE. This function is EH helper.
1181 * Kernel thread context (may sleep).
1184 * 0 on success, AC_ERR_* mask on failure
1186 static unsigned int atapi_eh_request_sense(struct ata_queued_cmd *qc)
1188 struct ata_device *dev = qc->dev;
1189 unsigned char *sense_buf = qc->scsicmd->sense_buffer;
1190 struct ata_port *ap = dev->link->ap;
1191 struct ata_taskfile tf;
1192 u8 cdb[ATAPI_CDB_LEN];
1194 DPRINTK("ATAPI request sense\n");
1196 /* FIXME: is this needed? */
1197 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
1199 /* initialize sense_buf with the error register,
1200 * for the case where they are -not- overwritten
1202 sense_buf[0] = 0x70;
1203 sense_buf[2] = qc->result_tf.feature >> 4;
1205 /* some devices time out if garbage left in tf */
1206 ata_tf_init(dev, &tf);
1208 memset(cdb, 0, ATAPI_CDB_LEN);
1209 cdb[0] = REQUEST_SENSE;
1210 cdb[4] = SCSI_SENSE_BUFFERSIZE;
1212 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1213 tf.command = ATA_CMD_PACKET;
1215 /* is it pointless to prefer PIO for "safety reasons"? */
1216 if (ap->flags & ATA_FLAG_PIO_DMA) {
1217 tf.protocol = ATA_PROT_ATAPI_DMA;
1218 tf.feature |= ATAPI_PKT_DMA;
1220 tf.protocol = ATA_PROT_ATAPI;
1221 tf.lbam = (8 * 1024) & 0xff;
1222 tf.lbah = (8 * 1024) >> 8;
1225 return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
1226 sense_buf, SCSI_SENSE_BUFFERSIZE);
1230 * ata_eh_analyze_serror - analyze SError for a failed port
1231 * @link: ATA link to analyze SError for
1233 * Analyze SError if available and further determine cause of
1239 static void ata_eh_analyze_serror(struct ata_link *link)
1241 struct ata_eh_context *ehc = &link->eh_context;
1242 u32 serror = ehc->i.serror;
1243 unsigned int err_mask = 0, action = 0;
1245 if (serror & SERR_PERSISTENT) {
1246 err_mask |= AC_ERR_ATA_BUS;
1247 action |= ATA_EH_HARDRESET;
1250 (SERR_DATA_RECOVERED | SERR_COMM_RECOVERED | SERR_DATA)) {
1251 err_mask |= AC_ERR_ATA_BUS;
1252 action |= ATA_EH_SOFTRESET;
1254 if (serror & SERR_PROTOCOL) {
1255 err_mask |= AC_ERR_HSM;
1256 action |= ATA_EH_SOFTRESET;
1258 if (serror & SERR_INTERNAL) {
1259 err_mask |= AC_ERR_SYSTEM;
1260 action |= ATA_EH_HARDRESET;
1262 if (serror & (SERR_PHYRDY_CHG | SERR_DEV_XCHG))
1263 ata_ehi_hotplugged(&ehc->i);
1265 ehc->i.err_mask |= err_mask;
1266 ehc->i.action |= action;
1270 * ata_eh_analyze_ncq_error - analyze NCQ error
1271 * @link: ATA link to analyze NCQ error for
1273 * Read log page 10h, determine the offending qc and acquire
1274 * error status TF. For NCQ device errors, all LLDDs have to do
1275 * is setting AC_ERR_DEV in ehi->err_mask. This function takes
1279 * Kernel thread context (may sleep).
1281 static void ata_eh_analyze_ncq_error(struct ata_link *link)
1283 struct ata_port *ap = link->ap;
1284 struct ata_eh_context *ehc = &link->eh_context;
1285 struct ata_device *dev = link->device;
1286 struct ata_queued_cmd *qc;
1287 struct ata_taskfile tf;
1290 /* if frozen, we can't do much */
1291 if (ap->pflags & ATA_PFLAG_FROZEN)
1294 /* is it NCQ device error? */
1295 if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV))
1298 /* has LLDD analyzed already? */
1299 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1300 qc = __ata_qc_from_tag(ap, tag);
1302 if (!(qc->flags & ATA_QCFLAG_FAILED))
1309 /* okay, this error is ours */
1310 rc = ata_eh_read_log_10h(dev, &tag, &tf);
1312 ata_link_printk(link, KERN_ERR, "failed to read log page 10h "
1313 "(errno=%d)\n", rc);
1317 if (!(link->sactive & (1 << tag))) {
1318 ata_link_printk(link, KERN_ERR, "log page 10h reported "
1319 "inactive tag %d\n", tag);
1323 /* we've got the perpetrator, condemn it */
1324 qc = __ata_qc_from_tag(ap, tag);
1325 memcpy(&qc->result_tf, &tf, sizeof(tf));
1326 qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
1327 ehc->i.err_mask &= ~AC_ERR_DEV;
1331 * ata_eh_analyze_tf - analyze taskfile of a failed qc
1332 * @qc: qc to analyze
1333 * @tf: Taskfile registers to analyze
1335 * Analyze taskfile of @qc and further determine cause of
1336 * failure. This function also requests ATAPI sense data if
1340 * Kernel thread context (may sleep).
1343 * Determined recovery action
1345 static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
1346 const struct ata_taskfile *tf)
1348 unsigned int tmp, action = 0;
1349 u8 stat = tf->command, err = tf->feature;
1351 if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) {
1352 qc->err_mask |= AC_ERR_HSM;
1353 return ATA_EH_SOFTRESET;
1356 if (stat & (ATA_ERR | ATA_DF))
1357 qc->err_mask |= AC_ERR_DEV;
1361 switch (qc->dev->class) {
1364 qc->err_mask |= AC_ERR_ATA_BUS;
1366 qc->err_mask |= AC_ERR_MEDIA;
1368 qc->err_mask |= AC_ERR_INVALID;
1372 if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) {
1373 tmp = atapi_eh_request_sense(qc);
1375 /* ATA_QCFLAG_SENSE_VALID is used to
1376 * tell atapi_qc_complete() that sense
1377 * data is already valid.
1379 * TODO: interpret sense data and set
1380 * appropriate err_mask.
1382 qc->flags |= ATA_QCFLAG_SENSE_VALID;
1384 qc->err_mask |= tmp;
1388 if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS))
1389 action |= ATA_EH_SOFTRESET;
1394 static int ata_eh_categorize_error(int is_io, unsigned int err_mask)
1396 if (err_mask & AC_ERR_ATA_BUS)
1399 if (err_mask & AC_ERR_TIMEOUT)
1403 if (err_mask & AC_ERR_HSM)
1406 (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV)
1413 struct speed_down_verdict_arg {
1418 static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg)
1420 struct speed_down_verdict_arg *arg = void_arg;
1421 int cat = ata_eh_categorize_error(ent->is_io, ent->err_mask);
1423 if (ent->timestamp < arg->since)
1426 arg->nr_errors[cat]++;
1431 * ata_eh_speed_down_verdict - Determine speed down verdict
1432 * @dev: Device of interest
1434 * This function examines error ring of @dev and determines
1435 * whether NCQ needs to be turned off, transfer speed should be
1436 * stepped down, or falling back to PIO is necessary.
1438 * Cat-1 is ATA_BUS error for any command.
1440 * Cat-2 is TIMEOUT for any command or HSM violation for known
1441 * supported commands.
1443 * Cat-3 is is unclassified DEV error for known supported
1446 * NCQ needs to be turned off if there have been more than 3
1447 * Cat-2 + Cat-3 errors during last 10 minutes.
1449 * Speed down is necessary if there have been more than 3 Cat-1 +
1450 * Cat-2 errors or 10 Cat-3 errors during last 10 minutes.
1452 * Falling back to PIO mode is necessary if there have been more
1453 * than 10 Cat-1 + Cat-2 + Cat-3 errors during last 5 minutes.
1456 * Inherited from caller.
1459 * OR of ATA_EH_SPDN_* flags.
1461 static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev)
1463 const u64 j5mins = 5LLU * 60 * HZ, j10mins = 10LLU * 60 * HZ;
1464 u64 j64 = get_jiffies_64();
1465 struct speed_down_verdict_arg arg;
1466 unsigned int verdict = 0;
1468 /* scan past 10 mins of error history */
1469 memset(&arg, 0, sizeof(arg));
1470 arg.since = j64 - min(j64, j10mins);
1471 ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
1473 if (arg.nr_errors[2] + arg.nr_errors[3] > 3)
1474 verdict |= ATA_EH_SPDN_NCQ_OFF;
1475 if (arg.nr_errors[1] + arg.nr_errors[2] > 3 || arg.nr_errors[3] > 10)
1476 verdict |= ATA_EH_SPDN_SPEED_DOWN;
1478 /* scan past 3 mins of error history */
1479 memset(&arg, 0, sizeof(arg));
1480 arg.since = j64 - min(j64, j5mins);
1481 ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
1483 if (arg.nr_errors[1] + arg.nr_errors[2] + arg.nr_errors[3] > 10)
1484 verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO;
1490 * ata_eh_speed_down - record error and speed down if necessary
1491 * @dev: Failed device
1492 * @is_io: Did the device fail during normal IO?
1493 * @err_mask: err_mask of the error
1495 * Record error and examine error history to determine whether
1496 * adjusting transmission speed is necessary. It also sets
1497 * transmission limits appropriately if such adjustment is
1501 * Kernel thread context (may sleep).
1504 * Determined recovery action.
1506 static unsigned int ata_eh_speed_down(struct ata_device *dev, int is_io,
1507 unsigned int err_mask)
1509 unsigned int verdict;
1510 unsigned int action = 0;
1512 /* don't bother if Cat-0 error */
1513 if (ata_eh_categorize_error(is_io, err_mask) == 0)
1516 /* record error and determine whether speed down is necessary */
1517 ata_ering_record(&dev->ering, is_io, err_mask);
1518 verdict = ata_eh_speed_down_verdict(dev);
1521 if ((verdict & ATA_EH_SPDN_NCQ_OFF) &&
1522 (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ |
1523 ATA_DFLAG_NCQ_OFF)) == ATA_DFLAG_NCQ) {
1524 dev->flags |= ATA_DFLAG_NCQ_OFF;
1525 ata_dev_printk(dev, KERN_WARNING,
1526 "NCQ disabled due to excessive errors\n");
1531 if (verdict & ATA_EH_SPDN_SPEED_DOWN) {
1532 /* speed down SATA link speed if possible */
1533 if (sata_down_spd_limit(dev->link) == 0) {
1534 action |= ATA_EH_HARDRESET;
1538 /* lower transfer mode */
1539 if (dev->spdn_cnt < 2) {
1540 static const int dma_dnxfer_sel[] =
1541 { ATA_DNXFER_DMA, ATA_DNXFER_40C };
1542 static const int pio_dnxfer_sel[] =
1543 { ATA_DNXFER_PIO, ATA_DNXFER_FORCE_PIO0 };
1546 if (dev->xfer_shift != ATA_SHIFT_PIO)
1547 sel = dma_dnxfer_sel[dev->spdn_cnt];
1549 sel = pio_dnxfer_sel[dev->spdn_cnt];
1553 if (ata_down_xfermask_limit(dev, sel) == 0) {
1554 action |= ATA_EH_SOFTRESET;
1560 /* Fall back to PIO? Slowing down to PIO is meaningless for
1561 * SATA. Consider it only for PATA.
1563 if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) &&
1564 (dev->link->ap->cbl != ATA_CBL_SATA) &&
1565 (dev->xfer_shift != ATA_SHIFT_PIO)) {
1566 if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) {
1568 action |= ATA_EH_SOFTRESET;
1575 /* device has been slowed down, blow error history */
1576 ata_ering_clear(&dev->ering);
1581 * ata_eh_link_autopsy - analyze error and determine recovery action
1582 * @link: host link to perform autopsy on
1584 * Analyze why @link failed and determine which recovery actions
1585 * are needed. This function also sets more detailed AC_ERR_*
1586 * values and fills sense data for ATAPI CHECK SENSE.
1589 * Kernel thread context (may sleep).
1591 static void ata_eh_link_autopsy(struct ata_link *link)
1593 struct ata_port *ap = link->ap;
1594 struct ata_eh_context *ehc = &link->eh_context;
1595 unsigned int all_err_mask = 0;
1602 if (ehc->i.flags & ATA_EHI_NO_AUTOPSY)
1605 /* obtain and analyze SError */
1606 rc = sata_scr_read(link, SCR_ERROR, &serror);
1608 ehc->i.serror |= serror;
1609 ata_eh_analyze_serror(link);
1610 } else if (rc != -EOPNOTSUPP) {
1611 /* SError read failed, force hardreset and probing */
1612 ata_ehi_schedule_probe(&ehc->i);
1613 ehc->i.action |= ATA_EH_HARDRESET;
1614 ehc->i.err_mask |= AC_ERR_OTHER;
1617 /* analyze NCQ failure */
1618 ata_eh_analyze_ncq_error(link);
1620 /* any real error trumps AC_ERR_OTHER */
1621 if (ehc->i.err_mask & ~AC_ERR_OTHER)
1622 ehc->i.err_mask &= ~AC_ERR_OTHER;
1624 all_err_mask |= ehc->i.err_mask;
1626 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1627 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1629 if (!(qc->flags & ATA_QCFLAG_FAILED) || qc->dev->link != link)
1632 /* inherit upper level err_mask */
1633 qc->err_mask |= ehc->i.err_mask;
1636 ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf);
1638 /* DEV errors are probably spurious in case of ATA_BUS error */
1639 if (qc->err_mask & AC_ERR_ATA_BUS)
1640 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA |
1643 /* any real error trumps unknown error */
1644 if (qc->err_mask & ~AC_ERR_OTHER)
1645 qc->err_mask &= ~AC_ERR_OTHER;
1647 /* SENSE_VALID trumps dev/unknown error and revalidation */
1648 if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
1649 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
1650 ehc->i.action &= ~ATA_EH_REVALIDATE;
1653 /* accumulate error info */
1654 ehc->i.dev = qc->dev;
1655 all_err_mask |= qc->err_mask;
1656 if (qc->flags & ATA_QCFLAG_IO)
1660 /* enforce default EH actions */
1661 if (ap->pflags & ATA_PFLAG_FROZEN ||
1662 all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT))
1663 ehc->i.action |= ATA_EH_SOFTRESET;
1664 else if (all_err_mask)
1665 ehc->i.action |= ATA_EH_REVALIDATE;
1667 /* if we have offending qcs and the associated failed device */
1670 ehc->i.action |= ata_eh_speed_down(ehc->i.dev, is_io,
1673 /* perform per-dev EH action only on the offending device */
1674 ehc->i.dev_action[ehc->i.dev->devno] |=
1675 ehc->i.action & ATA_EH_PERDEV_MASK;
1676 ehc->i.action &= ~ATA_EH_PERDEV_MASK;
1683 * ata_eh_autopsy - analyze error and determine recovery action
1684 * @ap: host port to perform autopsy on
1686 * Analyze all links of @ap and determine why they failed and
1687 * which recovery actions are needed.
1690 * Kernel thread context (may sleep).
1692 static void ata_eh_autopsy(struct ata_port *ap)
1694 struct ata_link *link;
1696 __ata_port_for_each_link(link, ap)
1697 ata_eh_link_autopsy(link);
1701 * ata_eh_link_report - report error handling to user
1702 * @link: ATA link EH is going on
1704 * Report EH to user.
1709 static void ata_eh_link_report(struct ata_link *link)
1711 struct ata_port *ap = link->ap;
1712 struct ata_eh_context *ehc = &link->eh_context;
1713 const char *frozen, *desc;
1714 int tag, nr_failed = 0;
1717 if (ehc->i.desc[0] != '\0')
1720 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1721 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1723 if (!(qc->flags & ATA_QCFLAG_FAILED) || qc->dev->link != link)
1725 if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask)
1731 if (!nr_failed && !ehc->i.err_mask)
1735 if (ap->pflags & ATA_PFLAG_FROZEN)
1739 ata_dev_printk(ehc->i.dev, KERN_ERR, "exception Emask 0x%x "
1740 "SAct 0x%x SErr 0x%x action 0x%x%s\n",
1741 ehc->i.err_mask, link->sactive,
1742 ehc->i.serror, ehc->i.action, frozen);
1744 ata_dev_printk(ehc->i.dev, KERN_ERR, "%s\n", desc);
1746 ata_link_printk(link, KERN_ERR, "exception Emask 0x%x "
1747 "SAct 0x%x SErr 0x%x action 0x%x%s\n",
1748 ehc->i.err_mask, link->sactive,
1749 ehc->i.serror, ehc->i.action, frozen);
1751 ata_link_printk(link, KERN_ERR, "%s\n", desc);
1754 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1755 static const char *dma_str[] = {
1756 [DMA_BIDIRECTIONAL] = "bidi",
1757 [DMA_TO_DEVICE] = "out",
1758 [DMA_FROM_DEVICE] = "in",
1761 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1762 struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf;
1764 if (!(qc->flags & ATA_QCFLAG_FAILED) ||
1765 qc->dev->link != link || !qc->err_mask)
1768 ata_dev_printk(qc->dev, KERN_ERR,
1769 "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
1770 "tag %d cdb 0x%x data %u %s\n "
1771 "res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
1772 "Emask 0x%x (%s)%s\n",
1773 cmd->command, cmd->feature, cmd->nsect,
1774 cmd->lbal, cmd->lbam, cmd->lbah,
1775 cmd->hob_feature, cmd->hob_nsect,
1776 cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah,
1777 cmd->device, qc->tag, qc->cdb[0], qc->nbytes,
1778 dma_str[qc->dma_dir],
1779 res->command, res->feature, res->nsect,
1780 res->lbal, res->lbam, res->lbah,
1781 res->hob_feature, res->hob_nsect,
1782 res->hob_lbal, res->hob_lbam, res->hob_lbah,
1783 res->device, qc->err_mask, ata_err_string(qc->err_mask),
1784 qc->err_mask & AC_ERR_NCQ ? " <F>" : "");
1789 * ata_eh_report - report error handling to user
1790 * @ap: ATA port to report EH about
1792 * Report EH to user.
1797 static void ata_eh_report(struct ata_port *ap)
1799 struct ata_link *link;
1801 __ata_port_for_each_link(link, ap)
1802 ata_eh_link_report(link);
1805 static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset,
1806 unsigned int *classes, unsigned long deadline)
1808 struct ata_device *dev;
1811 ata_link_for_each_dev(dev, link)
1812 classes[dev->devno] = ATA_DEV_UNKNOWN;
1814 rc = reset(link, classes, deadline);
1818 /* If any class isn't ATA_DEV_UNKNOWN, consider classification
1819 * is complete and convert all ATA_DEV_UNKNOWN to
1822 ata_link_for_each_dev(dev, link)
1823 if (classes[dev->devno] != ATA_DEV_UNKNOWN)
1827 ata_link_for_each_dev(dev, link) {
1828 if (classes[dev->devno] == ATA_DEV_UNKNOWN)
1829 classes[dev->devno] = ATA_DEV_NONE;
1836 static int ata_eh_followup_srst_needed(int rc, int classify,
1837 const unsigned int *classes)
1843 if (classify && classes[0] == ATA_DEV_UNKNOWN)
1848 static int ata_eh_reset(struct ata_link *link, int classify,
1849 ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
1850 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
1852 struct ata_eh_context *ehc = &link->eh_context;
1853 unsigned int *classes = ehc->classes;
1854 int verbose = !(ehc->i.flags & ATA_EHI_QUIET);
1856 struct ata_device *dev;
1857 unsigned long deadline;
1858 unsigned int action;
1859 ata_reset_fn_t reset;
1862 /* about to reset */
1863 ata_eh_about_to_do(link, NULL, ehc->i.action & ATA_EH_RESET_MASK);
1865 /* Determine which reset to use and record in ehc->i.action.
1866 * prereset() may examine and modify it.
1868 action = ehc->i.action;
1869 ehc->i.action &= ~ATA_EH_RESET_MASK;
1870 if (softreset && (!hardreset || (!sata_set_spd_needed(link) &&
1871 !(action & ATA_EH_HARDRESET))))
1872 ehc->i.action |= ATA_EH_SOFTRESET;
1874 ehc->i.action |= ATA_EH_HARDRESET;
1877 rc = prereset(link, jiffies + ATA_EH_PRERESET_TIMEOUT);
1879 if (rc == -ENOENT) {
1880 ata_link_printk(link, KERN_DEBUG,
1881 "port disabled. ignoring.\n");
1882 ehc->i.action &= ~ATA_EH_RESET_MASK;
1884 ata_link_for_each_dev(dev, link)
1885 classes[dev->devno] = ATA_DEV_NONE;
1889 ata_link_printk(link, KERN_ERR,
1890 "prereset failed (errno=%d)\n", rc);
1895 /* prereset() might have modified ehc->i.action */
1896 if (ehc->i.action & ATA_EH_HARDRESET)
1898 else if (ehc->i.action & ATA_EH_SOFTRESET)
1901 /* prereset told us not to reset, bang classes and return */
1902 ata_link_for_each_dev(dev, link)
1903 classes[dev->devno] = ATA_DEV_NONE;
1908 /* did prereset() screw up? if so, fix up to avoid oopsing */
1917 deadline = jiffies + ata_eh_reset_timeouts[try++];
1919 /* shut up during boot probing */
1921 ata_link_printk(link, KERN_INFO, "%s resetting link\n",
1922 reset == softreset ? "soft" : "hard");
1924 /* mark that this EH session started with reset */
1925 if (reset == hardreset)
1926 ehc->i.flags |= ATA_EHI_DID_HARDRESET;
1928 ehc->i.flags |= ATA_EHI_DID_SOFTRESET;
1930 rc = ata_do_reset(link, reset, classes, deadline);
1932 if (reset == hardreset &&
1933 ata_eh_followup_srst_needed(rc, classify, classes)) {
1934 /* okay, let's do follow-up softreset */
1938 ata_link_printk(link, KERN_ERR,
1939 "follow-up softreset required "
1940 "but no softreset avaliable\n");
1945 ata_eh_about_to_do(link, NULL, ATA_EH_RESET_MASK);
1946 rc = ata_do_reset(link, reset, classes, deadline);
1948 if (rc == 0 && classify &&
1949 classes[0] == ATA_DEV_UNKNOWN) {
1950 ata_link_printk(link, KERN_ERR,
1951 "classification failed\n");
1957 if (rc && try < ARRAY_SIZE(ata_eh_reset_timeouts)) {
1958 unsigned long now = jiffies;
1960 if (time_before(now, deadline)) {
1961 unsigned long delta = deadline - jiffies;
1963 ata_link_printk(link, KERN_WARNING, "reset failed "
1964 "(errno=%d), retrying in %u secs\n",
1965 rc, (jiffies_to_msecs(delta) + 999) / 1000);
1967 schedule_timeout_uninterruptible(delta);
1971 try == ARRAY_SIZE(ata_eh_reset_timeouts) - 1)
1972 sata_down_spd_limit(link);
1981 /* After the reset, the device state is PIO 0 and the
1982 * controller state is undefined. Record the mode.
1984 ata_link_for_each_dev(dev, link)
1985 dev->pio_mode = XFER_PIO_0;
1987 /* record current link speed */
1988 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0)
1989 link->sata_spd = (sstatus >> 4) & 0xf;
1992 postreset(link, classes);
1994 /* reset successful, schedule revalidation */
1995 ata_eh_done(link, NULL, ehc->i.action & ATA_EH_RESET_MASK);
1996 ehc->i.action |= ATA_EH_REVALIDATE;
1999 /* clear hotplug flag */
2000 ehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
2004 static int ata_eh_revalidate_and_attach(struct ata_link *link,
2005 struct ata_device **r_failed_dev)
2007 struct ata_port *ap = link->ap;
2008 struct ata_eh_context *ehc = &link->eh_context;
2009 struct ata_device *dev;
2010 unsigned int new_mask = 0;
2011 unsigned long flags;
2016 /* For PATA drive side cable detection to work, IDENTIFY must
2017 * be done backwards such that PDIAG- is released by the slave
2018 * device before the master device is identified.
2020 ata_link_for_each_dev_reverse(dev, link) {
2021 unsigned int action = ata_eh_dev_action(dev);
2022 unsigned int readid_flags = 0;
2024 if (ehc->i.flags & ATA_EHI_DID_RESET)
2025 readid_flags |= ATA_READID_POSTRESET;
2027 if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) {
2028 if (ata_link_offline(link)) {
2033 ata_eh_about_to_do(link, dev, ATA_EH_REVALIDATE);
2034 rc = ata_dev_revalidate(dev, readid_flags);
2038 ata_eh_done(link, dev, ATA_EH_REVALIDATE);
2040 /* Configuration may have changed, reconfigure
2043 ehc->i.flags |= ATA_EHI_SETMODE;
2045 /* schedule the scsi_rescan_device() here */
2046 queue_work(ata_aux_wq, &(ap->scsi_rescan_task));
2047 } else if (dev->class == ATA_DEV_UNKNOWN &&
2048 ehc->tries[dev->devno] &&
2049 ata_class_enabled(ehc->classes[dev->devno])) {
2050 dev->class = ehc->classes[dev->devno];
2052 rc = ata_dev_read_id(dev, &dev->class, readid_flags,
2056 new_mask |= 1 << dev->devno;
2059 /* IDENTIFY was issued to non-existent
2060 * device. No need to reset. Just
2061 * thaw and kill the device.
2063 ata_eh_thaw_port(ap);
2064 dev->class = ATA_DEV_UNKNOWN;
2067 dev->class = ATA_DEV_UNKNOWN;
2073 /* PDIAG- should have been released, ask cable type if post-reset */
2074 if (ata_is_host_link(link) && ap->ops->cable_detect &&
2075 (ehc->i.flags & ATA_EHI_DID_RESET))
2076 ap->cbl = ap->ops->cable_detect(ap);
2078 /* Configure new devices forward such that user doesn't see
2079 * device detection messages backwards.
2081 ata_link_for_each_dev(dev, link) {
2082 if (!(new_mask & (1 << dev->devno)))
2085 ehc->i.flags |= ATA_EHI_PRINTINFO;
2086 rc = ata_dev_configure(dev);
2087 ehc->i.flags &= ~ATA_EHI_PRINTINFO;
2091 spin_lock_irqsave(ap->lock, flags);
2092 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
2093 spin_unlock_irqrestore(ap->lock, flags);
2095 /* new device discovered, configure xfermode */
2096 ehc->i.flags |= ATA_EHI_SETMODE;
2102 *r_failed_dev = dev;
2103 DPRINTK("EXIT rc=%d\n", rc);
2107 static int ata_link_nr_enabled(struct ata_link *link)
2109 struct ata_device *dev;
2112 ata_link_for_each_dev(dev, link)
2113 if (ata_dev_enabled(dev))
2118 static int ata_link_nr_vacant(struct ata_link *link)
2120 struct ata_device *dev;
2123 ata_link_for_each_dev(dev, link)
2124 if (dev->class == ATA_DEV_UNKNOWN)
2129 static int ata_eh_skip_recovery(struct ata_link *link)
2131 struct ata_eh_context *ehc = &link->eh_context;
2132 struct ata_device *dev;
2134 /* thaw frozen port, resume link and recover failed devices */
2135 if ((link->ap->pflags & ATA_PFLAG_FROZEN) ||
2136 (ehc->i.flags & ATA_EHI_RESUME_LINK) || ata_link_nr_enabled(link))
2139 /* skip if class codes for all vacant slots are ATA_DEV_NONE */
2140 ata_link_for_each_dev(dev, link) {
2141 if (dev->class == ATA_DEV_UNKNOWN &&
2142 ehc->classes[dev->devno] != ATA_DEV_NONE)
2149 static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
2151 struct ata_eh_context *ehc = &dev->link->eh_context;
2153 ehc->tries[dev->devno]--;
2157 /* device missing or wrong IDENTIFY data, schedule probing */
2158 ehc->i.probe_mask |= (1 << dev->devno);
2160 /* give it just one more chance */
2161 ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1);
2163 if (ehc->tries[dev->devno] == 1) {
2164 /* This is the last chance, better to slow
2165 * down than lose it.
2167 sata_down_spd_limit(dev->link);
2168 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2172 if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) {
2173 /* disable device if it has used up all its chances */
2174 ata_dev_disable(dev);
2176 /* detach if offline */
2177 if (ata_link_offline(dev->link))
2178 ata_eh_detach_dev(dev);
2180 /* probe if requested */
2181 if ((ehc->i.probe_mask & (1 << dev->devno)) &&
2182 !(ehc->did_probe_mask & (1 << dev->devno))) {
2183 ata_eh_detach_dev(dev);
2186 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
2187 ehc->did_probe_mask |= (1 << dev->devno);
2188 ehc->i.action |= ATA_EH_SOFTRESET;
2193 /* soft didn't work? be haaaaard */
2194 if (ehc->i.flags & ATA_EHI_DID_RESET)
2195 ehc->i.action |= ATA_EH_HARDRESET;
2197 ehc->i.action |= ATA_EH_SOFTRESET;
2204 * ata_eh_recover - recover host port after error
2205 * @ap: host port to recover
2206 * @prereset: prereset method (can be NULL)
2207 * @softreset: softreset method (can be NULL)
2208 * @hardreset: hardreset method (can be NULL)
2209 * @postreset: postreset method (can be NULL)
2210 * @r_failed_link: out parameter for failed link
2212 * This is the alpha and omega, eum and yang, heart and soul of
2213 * libata exception handling. On entry, actions required to
2214 * recover each link and hotplug requests are recorded in the
2215 * link's eh_context. This function executes all the operations
2216 * with appropriate retrials and fallbacks to resurrect failed
2217 * devices, detach goners and greet newcomers.
2220 * Kernel thread context (may sleep).
2223 * 0 on success, -errno on failure.
2225 static int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
2226 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
2227 ata_postreset_fn_t postreset,
2228 struct ata_link **r_failed_link)
2230 struct ata_link *link;
2231 struct ata_device *dev;
2232 int nr_failed_devs, nr_disabled_devs;
2237 /* prep for recovery */
2238 ata_port_for_each_link(link, ap) {
2239 struct ata_eh_context *ehc = &link->eh_context;
2241 ata_link_for_each_dev(dev, link) {
2242 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
2244 /* collect port action mask recorded in dev actions */
2245 ehc->i.action |= ehc->i.dev_action[dev->devno] &
2246 ~ATA_EH_PERDEV_MASK;
2247 ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK;
2249 /* process hotplug request */
2250 if (dev->flags & ATA_DFLAG_DETACH)
2251 ata_eh_detach_dev(dev);
2253 if (!ata_dev_enabled(dev) &&
2254 ((ehc->i.probe_mask & (1 << dev->devno)) &&
2255 !(ehc->did_probe_mask & (1 << dev->devno)))) {
2256 ata_eh_detach_dev(dev);
2258 ehc->did_probe_mask |= (1 << dev->devno);
2259 ehc->i.action |= ATA_EH_SOFTRESET;
2267 nr_disabled_devs = 0;
2270 /* if UNLOADING, finish immediately */
2271 if (ap->pflags & ATA_PFLAG_UNLOADING)
2275 ata_port_for_each_link(link, ap) {
2276 struct ata_eh_context *ehc = &link->eh_context;
2278 /* skip EH if possible. */
2279 if (ata_eh_skip_recovery(link))
2282 /* do we need to reset? */
2283 if (ehc->i.action & ATA_EH_RESET_MASK)
2286 ata_link_for_each_dev(dev, link)
2287 ehc->classes[dev->devno] = ATA_DEV_UNKNOWN;
2292 ata_eh_freeze_port(ap);
2294 ata_port_for_each_link(link, ap) {
2295 struct ata_eh_context *ehc = &link->eh_context;
2297 if (!(ehc->i.action & ATA_EH_RESET_MASK))
2300 rc = ata_eh_reset(link, ata_link_nr_vacant(link),
2301 prereset, softreset, hardreset,
2304 ata_link_printk(link, KERN_ERR,
2305 "reset failed, giving up\n");
2310 ata_eh_thaw_port(ap);
2314 ata_port_for_each_link(link, ap) {
2315 struct ata_eh_context *ehc = &link->eh_context;
2317 /* revalidate existing devices and attach new ones */
2318 rc = ata_eh_revalidate_and_attach(link, &dev);
2322 /* configure transfer mode if necessary */
2323 if (ehc->i.flags & ATA_EHI_SETMODE) {
2324 rc = ata_set_mode(link, &dev);
2327 ehc->i.flags &= ~ATA_EHI_SETMODE;
2330 /* this link is okay now */
2336 if (ata_eh_handle_dev_fail(dev, rc))
2339 if (ap->pflags & ATA_PFLAG_FROZEN)
2343 if (nr_failed_devs) {
2344 if (nr_failed_devs != nr_disabled_devs) {
2345 ata_port_printk(ap, KERN_WARNING, "failed to recover "
2346 "some devices, retrying in 5 secs\n");
2349 /* no device left to recover, repeat fast */
2357 if (rc && r_failed_link)
2358 *r_failed_link = link;
2360 DPRINTK("EXIT, rc=%d\n", rc);
2365 * ata_eh_finish - finish up EH
2366 * @ap: host port to finish EH for
2368 * Recovery is complete. Clean up EH states and retry or finish
2374 static void ata_eh_finish(struct ata_port *ap)
2378 /* retry or finish qcs */
2379 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2380 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2382 if (!(qc->flags & ATA_QCFLAG_FAILED))
2386 /* FIXME: Once EH migration is complete,
2387 * generate sense data in this function,
2388 * considering both err_mask and tf.
2390 if (qc->err_mask & AC_ERR_INVALID)
2391 ata_eh_qc_complete(qc);
2393 ata_eh_qc_retry(qc);
2395 if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
2396 ata_eh_qc_complete(qc);
2398 /* feed zero TF to sense generation */
2399 memset(&qc->result_tf, 0, sizeof(qc->result_tf));
2400 ata_eh_qc_retry(qc);
2407 * ata_do_eh - do standard error handling
2408 * @ap: host port to handle error for
2409 * @prereset: prereset method (can be NULL)
2410 * @softreset: softreset method (can be NULL)
2411 * @hardreset: hardreset method (can be NULL)
2412 * @postreset: postreset method (can be NULL)
2414 * Perform standard error handling sequence.
2417 * Kernel thread context (may sleep).
2419 void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
2420 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
2421 ata_postreset_fn_t postreset)
2423 struct ata_device *dev;
2429 rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset,
2432 ata_link_for_each_dev(dev, &ap->link)
2433 ata_dev_disable(dev);
2441 * ata_eh_handle_port_suspend - perform port suspend operation
2442 * @ap: port to suspend
2447 * Kernel thread context (may sleep).
2449 static void ata_eh_handle_port_suspend(struct ata_port *ap)
2451 unsigned long flags;
2454 /* are we suspending? */
2455 spin_lock_irqsave(ap->lock, flags);
2456 if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
2457 ap->pm_mesg.event == PM_EVENT_ON) {
2458 spin_unlock_irqrestore(ap->lock, flags);
2461 spin_unlock_irqrestore(ap->lock, flags);
2463 WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED);
2465 /* tell ACPI we're suspending */
2466 rc = ata_acpi_on_suspend(ap);
2471 ata_eh_freeze_port(ap);
2473 if (ap->ops->port_suspend)
2474 rc = ap->ops->port_suspend(ap, ap->pm_mesg);
2478 spin_lock_irqsave(ap->lock, flags);
2480 ap->pflags &= ~ATA_PFLAG_PM_PENDING;
2482 ap->pflags |= ATA_PFLAG_SUSPENDED;
2483 else if (ap->pflags & ATA_PFLAG_FROZEN)
2484 ata_port_schedule_eh(ap);
2486 if (ap->pm_result) {
2487 *ap->pm_result = rc;
2488 ap->pm_result = NULL;
2491 spin_unlock_irqrestore(ap->lock, flags);
2497 * ata_eh_handle_port_resume - perform port resume operation
2498 * @ap: port to resume
2503 * Kernel thread context (may sleep).
2505 static void ata_eh_handle_port_resume(struct ata_port *ap)
2507 unsigned long flags;
2510 /* are we resuming? */
2511 spin_lock_irqsave(ap->lock, flags);
2512 if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
2513 ap->pm_mesg.event != PM_EVENT_ON) {
2514 spin_unlock_irqrestore(ap->lock, flags);
2517 spin_unlock_irqrestore(ap->lock, flags);
2519 WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED));
2521 if (ap->ops->port_resume)
2522 rc = ap->ops->port_resume(ap);
2524 /* tell ACPI that we're resuming */
2525 ata_acpi_on_resume(ap);
2528 spin_lock_irqsave(ap->lock, flags);
2529 ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED);
2530 if (ap->pm_result) {
2531 *ap->pm_result = rc;
2532 ap->pm_result = NULL;
2534 spin_unlock_irqrestore(ap->lock, flags);
2536 #endif /* CONFIG_PM */