[SCSI] libsas: execute transport link resets with libata-eh via host workqueue
[linux-2.6.git] / drivers / ata / libata-eh.c
index a93247c..c61316e 100644 (file)
 
 #include <linux/kernel.h>
 #include <linux/blkdev.h>
+#include <linux/export.h>
 #include <linux/pci.h>
 #include <scsi/scsi.h>
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_eh.h>
 #include <scsi/scsi_device.h>
 #include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_dbg.h>
 #include "../scsi/scsi_transport_api.h"
 
 #include <linux/libata.h>
@@ -56,6 +58,7 @@ enum {
        /* error flags */
        ATA_EFLAG_IS_IO                 = (1 << 0),
        ATA_EFLAG_DUBIOUS_XFER          = (1 << 1),
+       ATA_EFLAG_OLD_ER                = (1 << 31),
 
        /* error categories */
        ATA_ECAT_NONE                   = 0,
@@ -82,6 +85,10 @@ enum {
        ATA_EH_FASTDRAIN_INTERVAL       =  3000,
 
        ATA_EH_UA_TRIES                 = 5,
+
+       /* probe speed down parameters, see ata_eh_schedule_probe() */
+       ATA_EH_PROBE_TRIAL_INTERVAL     = 60000,        /* 1 min */
+       ATA_EH_PROBE_TRIALS             = 2,
 };
 
 /* The following table determines how we sequence resets.  Each entry
@@ -105,6 +112,13 @@ static const unsigned long ata_eh_identify_timeouts[] = {
        ULONG_MAX,
 };
 
+static const unsigned long ata_eh_flush_timeouts[] = {
+       15000,  /* be generous with flush */
+       15000,  /* ditto */
+       30000,  /* and even more generous */
+       ULONG_MAX,
+};
+
 static const unsigned long ata_eh_other_timeouts[] = {
         5000,  /* same rationale as identify timeout */
        10000,  /* ditto */
@@ -142,6 +156,8 @@ ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = {
          .timeouts = ata_eh_other_timeouts, },
        { .commands = CMDS(ATA_CMD_INIT_DEV_PARAMS),
          .timeouts = ata_eh_other_timeouts, },
+       { .commands = CMDS(ATA_CMD_FLUSH, ATA_CMD_FLUSH_EXT),
+         .timeouts = ata_eh_flush_timeouts },
 };
 #undef CMDS
 
@@ -382,14 +398,9 @@ static struct ata_ering_entry *ata_ering_top(struct ata_ering *ering)
        return NULL;
 }
 
-static void ata_ering_clear(struct ata_ering *ering)
-{
-       memset(ering, 0, sizeof(*ering));
-}
-
-static int ata_ering_map(struct ata_ering *ering,
-                        int (*map_fn)(struct ata_ering_entry *, void *),
-                        void *arg)
+int ata_ering_map(struct ata_ering *ering,
+                 int (*map_fn)(struct ata_ering_entry *, void *),
+                 void *arg)
 {
        int idx, rc = 0;
        struct ata_ering_entry *ent;
@@ -408,6 +419,17 @@ static int ata_ering_map(struct ata_ering *ering,
        return rc;
 }
 
+int ata_ering_clear_cb(struct ata_ering_entry *ent, void *void_arg)
+{
+       ent->eflags |= ATA_EFLAG_OLD_ER;
+       return 0;
+}
+
+static void ata_ering_clear(struct ata_ering *ering)
+{
+       ata_ering_map(ering, ata_ering_clear_cb, NULL);
+}
+
 static unsigned int ata_eh_dev_action(struct ata_device *dev)
 {
        struct ata_eh_context *ehc = &dev->link->eh_context;
@@ -422,7 +444,7 @@ static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev,
 
        if (!dev) {
                ehi->action &= ~action;
-               ata_link_for_each_dev(tdev, link)
+               ata_for_each_dev(tdev, link, ALL)
                        ehi->dev_action[tdev->devno] &= ~action;
        } else {
                /* doesn't make sense for port-wide EH actions */
@@ -430,7 +452,7 @@ static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev,
 
                /* break ehi->action into ehi->dev_action */
                if (ehi->action & action) {
-                       ata_link_for_each_dev(tdev, link)
+                       ata_for_each_dev(tdev, link, ALL)
                                ehi->dev_action[tdev->devno] |=
                                        ehi->action & action;
                        ehi->action &= ~action;
@@ -442,6 +464,41 @@ static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev,
 }
 
 /**
+ *     ata_eh_acquire - acquire EH ownership
+ *     @ap: ATA port to acquire EH ownership for
+ *
+ *     Acquire EH ownership for @ap.  This is the basic exclusion
+ *     mechanism for ports sharing a host.  Only one port hanging off
+ *     the same host can claim the ownership of EH.
+ *
+ *     LOCKING:
+ *     EH context.
+ */
+void ata_eh_acquire(struct ata_port *ap)
+{
+       mutex_lock(&ap->host->eh_mutex);
+       WARN_ON_ONCE(ap->host->eh_owner);
+       ap->host->eh_owner = current;
+}
+
+/**
+ *     ata_eh_release - release EH ownership
+ *     @ap: ATA port to release EH ownership for
+ *
+ *     Release EH ownership for @ap if the caller.  The caller must
+ *     have acquired EH ownership using ata_eh_acquire() previously.
+ *
+ *     LOCKING:
+ *     EH context.
+ */
+void ata_eh_release(struct ata_port *ap)
+{
+       WARN_ON_ONCE(ap->host->eh_owner != current);
+       ap->host->eh_owner = NULL;
+       mutex_unlock(&ap->host->eh_mutex);
+}
+
+/**
  *     ata_scsi_timed_out - SCSI layer time out callback
  *     @cmd: timed out SCSI command
  *
@@ -491,6 +548,31 @@ enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
        return ret;
 }
 
+static void ata_eh_unload(struct ata_port *ap)
+{
+       struct ata_link *link;
+       struct ata_device *dev;
+       unsigned long flags;
+
+       /* Restore SControl IPM and SPD for the next driver and
+        * disable attached devices.
+        */
+       ata_for_each_link(link, ap, PMP_FIRST) {
+               sata_scr_write(link, SCR_CONTROL, link->saved_scontrol & 0xff0);
+               ata_for_each_dev(dev, link, ALL)
+                       ata_dev_disable(dev);
+       }
+
+       /* freeze and set UNLOADED */
+       spin_lock_irqsave(ap->lock, flags);
+
+       ata_port_freeze(ap);                    /* won't be thawed */
+       ap->pflags &= ~ATA_PFLAG_EH_PENDING;    /* clear pending from freeze */
+       ap->pflags |= ATA_PFLAG_UNLOADED;
+
+       spin_unlock_irqrestore(ap->lock, flags);
+}
+
 /**
  *     ata_scsi_error - SCSI layer error handler callback
  *     @host: SCSI host on which error occurred
@@ -506,19 +588,51 @@ enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
 void ata_scsi_error(struct Scsi_Host *host)
 {
        struct ata_port *ap = ata_shost_to_port(host);
-       int i;
        unsigned long flags;
+       LIST_HEAD(eh_work_q);
 
        DPRINTK("ENTER\n");
 
-       /* synchronize with port task */
-       ata_port_flush_task(ap);
+       spin_lock_irqsave(host->host_lock, flags);
+       list_splice_init(&host->eh_cmd_q, &eh_work_q);
+       spin_unlock_irqrestore(host->host_lock, flags);
+
+       ata_scsi_cmd_error_handler(host, ap, &eh_work_q);
+
+       /* If we timed raced normal completion and there is nothing to
+          recover nr_timedout == 0 why exactly are we doing error recovery ? */
+       ata_scsi_port_error_handler(host, ap);
+
+       /* finish or retry handled scmd's and clean up */
+       WARN_ON(host->host_failed || !list_empty(&eh_work_q));
+
+       DPRINTK("EXIT\n");
+}
+
+/**
+ * ata_scsi_cmd_error_handler - error callback for a list of commands
+ * @host:      scsi host containing the port
+ * @ap:                ATA port within the host
+ * @eh_work_q: list of commands to process
+ *
+ * process the given list of commands and return those finished to the
+ * ap->eh_done_q.  This function is the first part of the libata error
+ * handler which processes a given list of failed commands.
+ */
+void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap,
+                               struct list_head *eh_work_q)
+{
+       int i;
+       unsigned long flags;
+
+       /* make sure sff pio task is not running */
+       ata_sff_flush_pio_task(ap);
 
        /* synchronize with host lock and sort out timeouts */
 
        /* For new EH, all qcs are finished in one of three ways -
         * normal completion, error completion, and SCSI timeout.
-        * Both cmpletions can race against SCSI timeout.  When normal
+        * Both completions can race against SCSI timeout.  When normal
         * completion wins, the qc never reaches EH.  When error
         * completion wins, the qc has ATA_QCFLAG_FAILED set.
         *
@@ -534,7 +648,19 @@ void ata_scsi_error(struct Scsi_Host *host)
 
                spin_lock_irqsave(ap->lock, flags);
 
-               list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) {
+               /* This must occur under the ap->lock as we don't want
+                  a polled recovery to race the real interrupt handler
+
+                  The lost_interrupt handler checks for any completed but
+                  non-notified command and completes much like an IRQ handler.
+
+                  We then fall into the error recovery code which will treat
+                  this as if normal completion won the race */
+
+               if (ap->ops->lost_interrupt)
+                       ap->ops->lost_interrupt(ap);
+
+               list_for_each_entry_safe(scmd, tmp, eh_work_q, eh_entry) {
                        struct ata_queued_cmd *qc;
 
                        for (i = 0; i < ATA_MAX_QUEUE; i++) {
@@ -578,11 +704,28 @@ void ata_scsi_error(struct Scsi_Host *host)
        } else
                spin_unlock_wait(ap->lock);
 
- repeat:
+}
+EXPORT_SYMBOL(ata_scsi_cmd_error_handler);
+
+/**
+ * ata_scsi_port_error_handler - recover the port after the commands
+ * @host:      SCSI host containing the port
+ * @ap:                the ATA port
+ *
+ * Handle the recovery of the port @ap after all the commands
+ * have been recovered.
+ */
+void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap)
+{
+       unsigned long flags;
+
        /* invoke error handler */
        if (ap->ops->error_handler) {
                struct ata_link *link;
 
+               /* acquire EH ownership */
+               ata_eh_acquire(ap);
+ repeat:
                /* kill fast drain timer */
                del_timer_sync(&ap->fastdrain_timer);
 
@@ -592,7 +735,7 @@ void ata_scsi_error(struct Scsi_Host *host)
                /* fetch & clear EH info */
                spin_lock_irqsave(ap->lock, flags);
 
-               __ata_port_for_each_link(link, ap) {
+               ata_for_each_link(link, ap, HOST_FIRST) {
                        struct ata_eh_context *ehc = &link->eh_context;
                        struct ata_device *dev;
 
@@ -600,16 +743,13 @@ void ata_scsi_error(struct Scsi_Host *host)
                        link->eh_context.i = link->eh_info;
                        memset(&link->eh_info, 0, sizeof(link->eh_info));
 
-                       ata_link_for_each_dev(dev, link) {
+                       ata_for_each_dev(dev, link, ENABLED) {
                                int devno = dev->devno;
 
                                ehc->saved_xfer_mode[devno] = dev->xfer_mode;
                                if (ata_ncq_enabled(dev))
                                        ehc->saved_ncq_enabled |= 1 << devno;
                        }
-
-                       /* set last reset timestamp to some time in the past */
-                       ehc->last_reset = jiffies - 60 * HZ;
                }
 
                ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS;
@@ -621,13 +761,18 @@ void ata_scsi_error(struct Scsi_Host *host)
                /* invoke EH, skip if unloading or suspended */
                if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED)))
                        ap->ops->error_handler(ap);
-               else
+               else {
+                       /* if unloading, commence suicide */
+                       if ((ap->pflags & ATA_PFLAG_UNLOADING) &&
+                           !(ap->pflags & ATA_PFLAG_UNLOADED))
+                               ata_eh_unload(ap);
                        ata_eh_finish(ap);
+               }
 
                /* process port suspend request */
                ata_eh_handle_port_suspend(ap);
 
-               /* Exception might have happend after ->error_handler
+               /* Exception might have happened after ->error_handler
                 * recovered the port but before this point.  Repeat
                 * EH in such case.
                 */
@@ -638,13 +783,14 @@ void ata_scsi_error(struct Scsi_Host *host)
                                spin_unlock_irqrestore(ap->lock, flags);
                                goto repeat;
                        }
-                       ata_port_printk(ap, KERN_ERR, "EH pending after %d "
-                                       "tries, giving up\n", ATA_EH_MAX_TRIES);
+                       ata_port_err(ap,
+                                    "EH pending after %d tries, giving up\n",
+                                    ATA_EH_MAX_TRIES);
                        ap->pflags &= ~ATA_PFLAG_EH_PENDING;
                }
 
                /* this run is complete, make sure EH info is clear */
-               __ata_port_for_each_link(link, ap)
+               ata_for_each_link(link, ap, HOST_FIRST)
                        memset(&link->eh_info, 0, sizeof(link->eh_info));
 
                /* Clear host_eh_scheduled while holding ap->lock such
@@ -655,14 +801,12 @@ void ata_scsi_error(struct Scsi_Host *host)
                host->host_eh_scheduled = 0;
 
                spin_unlock_irqrestore(ap->lock, flags);
+               ata_eh_release(ap);
        } else {
                WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL);
                ap->ops->eng_timeout(ap);
        }
 
-       /* finish or retry handled scmd's and clean up */
-       WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q));
-
        scsi_eh_flush_done_q(&ap->eh_done_q);
 
        /* clean up */
@@ -671,10 +815,10 @@ void ata_scsi_error(struct Scsi_Host *host)
        if (ap->pflags & ATA_PFLAG_LOADING)
                ap->pflags &= ~ATA_PFLAG_LOADING;
        else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG)
-               queue_delayed_work(ata_aux_wq, &ap->hotplug_task, 0);
+               schedule_delayed_work(&ap->hotplug_task, 0);
 
        if (ap->pflags & ATA_PFLAG_RECOVERED)
-               ata_port_printk(ap, KERN_INFO, "EH complete\n");
+               ata_port_info(ap, "EH complete\n");
 
        ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED);
 
@@ -683,9 +827,8 @@ void ata_scsi_error(struct Scsi_Host *host)
        wake_up_all(&ap->eh_wait_q);
 
        spin_unlock_irqrestore(ap->lock, flags);
-
-       DPRINTK("EXIT\n");
 }
+EXPORT_SYMBOL_GPL(ata_scsi_port_error_handler);
 
 /**
  *     ata_port_wait_eh - Wait for the currently pending EH to complete
@@ -716,10 +859,11 @@ void ata_port_wait_eh(struct ata_port *ap)
 
        /* make sure SCSI EH is complete */
        if (scsi_host_in_recovery(ap->scsi_host)) {
-               msleep(10);
+               ata_msleep(ap, 10);
                goto retry;
        }
 }
+EXPORT_SYMBOL_GPL(ata_port_wait_eh);
 
 static int ata_eh_nr_in_flight(struct ata_port *ap)
 {
@@ -823,6 +967,8 @@ static void ata_eh_set_pending(struct ata_port *ap, int fastdrain)
 void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
 {
        struct ata_port *ap = qc->ap;
+       struct request_queue *q = qc->scsicmd->device->request_queue;
+       unsigned long flags;
 
        WARN_ON(!ap->ops->error_handler);
 
@@ -834,7 +980,9 @@ void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
         * Note that ATA_QCFLAG_FAILED is unconditionally set after
         * this function completes.
         */
+       spin_lock_irqsave(q->queue_lock, flags);
        blk_abort_request(qc->scsicmd->request);
+       spin_unlock_irqrestore(q->queue_lock, flags);
 }
 
 /**
@@ -953,7 +1101,9 @@ static void __ata_port_freeze(struct ata_port *ap)
  *     ata_port_freeze - abort & freeze port
  *     @ap: ATA port to freeze
  *
- *     Abort and freeze @ap.
+ *     Abort and freeze @ap.  The freeze operation must be called
+ *     first, because some hardware requires special operations
+ *     before the taskfile registers are accessible.
  *
  *     LOCKING:
  *     spin_lock_irqsave(host lock)
@@ -967,8 +1117,8 @@ int ata_port_freeze(struct ata_port *ap)
 
        WARN_ON(!ap->ops->error_handler);
 
-       nr_aborted = ata_port_abort(ap);
        __ata_port_freeze(ap);
+       nr_aborted = ata_port_abort(ap);
 
        return nr_aborted;
 }
@@ -1025,7 +1175,7 @@ int sata_async_notification(struct ata_port *ap)
                struct ata_link *link;
 
                /* check and notify ATAPI AN */
-               ata_port_for_each_link(link, ap) {
+               ata_for_each_link(link, ap, EDGE) {
                        if (!(sntf & (1 << link->pmp)))
                                continue;
 
@@ -1149,6 +1299,32 @@ void ata_eh_qc_retry(struct ata_queued_cmd *qc)
 }
 
 /**
+ *     ata_dev_disable - disable ATA device
+ *     @dev: ATA device to disable
+ *
+ *     Disable @dev.
+ *
+ *     Locking:
+ *     EH context.
+ */
+void ata_dev_disable(struct ata_device *dev)
+{
+       if (!ata_dev_enabled(dev))
+               return;
+
+       if (ata_msg_drv(dev->link->ap))
+               ata_dev_warn(dev, "disabled\n");
+       ata_acpi_on_disable(dev);
+       ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | ATA_DNXFER_QUIET);
+       dev->class++;
+
+       /* From now till the next successful probe, ering is used to
+        * track probe failures.  Clear accumulated device error info.
+        */
+       ata_ering_clear(&dev->ering);
+}
+
+/**
  *     ata_eh_detach_dev - detach ATA device
  *     @dev: ATA device to detach
  *
@@ -1161,6 +1337,7 @@ void ata_eh_detach_dev(struct ata_device *dev)
 {
        struct ata_link *link = dev->link;
        struct ata_port *ap = link->ap;
+       struct ata_eh_context *ehc = &link->eh_context;
        unsigned long flags;
 
        ata_dev_disable(dev);
@@ -1174,9 +1351,11 @@ void ata_eh_detach_dev(struct ata_device *dev)
                ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
        }
 
-       /* clear per-dev EH actions */
+       /* clear per-dev EH info */
        ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK);
        ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK);
+       ehc->saved_xfer_mode[dev->devno] = 0;
+       ehc->saved_ncq_enabled &= ~(1 << dev->devno);
 
        spin_unlock_irqrestore(ap->lock, flags);
 }
@@ -1206,7 +1385,10 @@ void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev,
 
        ata_eh_clear_action(link, dev, ehi, action);
 
-       if (!(ehc->i.flags & ATA_EHI_QUIET))
+       /* About to take EH action, set RECOVERED.  Ignore actions on
+        * slave links as master will do them again.
+        */
+       if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link)
                ap->pflags |= ATA_PFLAG_RECOVERED;
 
        spin_unlock_irqrestore(ap->lock, flags);
@@ -1336,8 +1518,8 @@ static int ata_eh_read_log_10h(struct ata_device *dev,
        for (i = 0; i < ATA_SECT_SIZE; i++)
                csum += buf[i];
        if (csum)
-               ata_dev_printk(dev, KERN_WARNING,
-                              "invalid checksum 0x%x on log page 10h\n", csum);
+               ata_dev_warn(dev, "invalid checksum 0x%x on log page 10h\n",
+                            csum);
 
        if (buf[0] & 0x80)
                return -ENOENT;
@@ -1479,9 +1661,9 @@ static void ata_eh_analyze_serror(struct ata_link *link)
         * host links.  For disabled PMP links, only N bit is
         * considered as X bit is left at 1 for link plugging.
         */
-       hotplug_mask = 0;
-
-       if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link))
+       if (link->lpm_policy > ATA_LPM_MAX_POWER)
+               hotplug_mask = 0;       /* hotplug doesn't work w/ LPM */
+       else if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link))
                hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG;
        else
                hotplug_mask = SERR_PHYRDY_CHG;
@@ -1534,16 +1716,17 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
        }
 
        /* okay, this error is ours */
+       memset(&tf, 0, sizeof(tf));
        rc = ata_eh_read_log_10h(dev, &tag, &tf);
        if (rc) {
-               ata_link_printk(link, KERN_ERR, "failed to read log page 10h "
-                               "(errno=%d)\n", rc);
+               ata_link_err(link, "failed to read log page 10h (errno=%d)\n",
+                            rc);
                return;
        }
 
        if (!(link->sactive & (1 << tag))) {
-               ata_link_printk(link, KERN_ERR, "log page 10h reported "
-                               "inactive tag %d\n", tag);
+               ata_link_err(link, "log page 10h reported inactive tag %d\n",
+                            tag);
                return;
        }
 
@@ -1562,7 +1745,7 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
  *
  *     Analyze taskfile of @qc and further determine cause of
  *     failure.  This function also requests ATAPI sense data if
- *     avaliable.
+ *     available.
  *
  *     LOCKING:
  *     Kernel thread context (may sleep).
@@ -1660,7 +1843,7 @@ static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg)
        struct speed_down_verdict_arg *arg = void_arg;
        int cat;
 
-       if (ent->timestamp < arg->since)
+       if ((ent->eflags & ATA_EFLAG_OLD_ER) || (ent->timestamp < arg->since))
                return -1;
 
        cat = ata_eh_categorize_error(ent->eflags, ent->err_mask,
@@ -1713,7 +1896,7 @@ static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg)
  *        occurred during last 5 mins, NCQ_OFF.
  *
  *     3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors
- *        ocurred during last 5 mins, FALLBACK_TO_PIO
+ *        occurred during last 5 mins, FALLBACK_TO_PIO
  *
  *     4. If more than 3 TOUT_HSM or UNK_DEV errors occurred
  *        during last 10 mins, NCQ_OFF.
@@ -1808,15 +1991,14 @@ static unsigned int ata_eh_speed_down(struct ata_device *dev,
            (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ |
                           ATA_DFLAG_NCQ_OFF)) == ATA_DFLAG_NCQ) {
                dev->flags |= ATA_DFLAG_NCQ_OFF;
-               ata_dev_printk(dev, KERN_WARNING,
-                              "NCQ disabled due to excessive errors\n");
+               ata_dev_warn(dev, "NCQ disabled due to excessive errors\n");
                goto done;
        }
 
        /* speed down? */
        if (verdict & ATA_EH_SPDN_SPEED_DOWN) {
                /* speed down SATA link speed if possible */
-               if (sata_down_spd_limit(link) == 0) {
+               if (sata_down_spd_limit(link, 0) == 0) {
                        action |= ATA_EH_RESET;
                        goto done;
                }
@@ -1938,8 +2120,9 @@ static void ata_eh_link_autopsy(struct ata_link *link)
                        qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
 
                /* determine whether the command is worth retrying */
-               if (!(qc->err_mask & AC_ERR_INVALID) &&
-                   ((qc->flags & ATA_QCFLAG_IO) || qc->err_mask != AC_ERR_DEV))
+               if (qc->flags & ATA_QCFLAG_IO ||
+                   (!(qc->err_mask & AC_ERR_INVALID) &&
+                    qc->err_mask != AC_ERR_DEV))
                        qc->flags |= ATA_QCFLAG_RETRY;
 
                /* accumulate error info */
@@ -1999,7 +2182,7 @@ void ata_eh_autopsy(struct ata_port *ap)
 {
        struct ata_link *link;
 
-       ata_port_for_each_link(link, ap)
+       ata_for_each_link(link, ap, EDGE)
                ata_eh_link_autopsy(link);
 
        /* Handle the frigging slave link.  Autopsy is done similarly
@@ -2010,8 +2193,13 @@ void ata_eh_autopsy(struct ata_port *ap)
                struct ata_eh_context *mehc = &ap->link.eh_context;
                struct ata_eh_context *sehc = &ap->slave_link->eh_context;
 
+               /* transfer control flags from master to slave */
+               sehc->i.flags |= mehc->i.flags & ATA_EHI_TO_SLAVE_MASK;
+
+               /* perform autopsy on the slave link */
                ata_eh_link_autopsy(ap->slave_link);
 
+               /* transfer actions from slave to master and clear slave */
                ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
                mehc->i.action          |= sehc->i.action;
                mehc->i.dev_action[1]   |= sehc->i.dev_action[1];
@@ -2027,6 +2215,117 @@ void ata_eh_autopsy(struct ata_port *ap)
 }
 
 /**
+ *     ata_get_cmd_descript - get description for ATA command
+ *     @command: ATA command code to get description for
+ *
+ *     Return a textual description of the given command, or NULL if the
+ *     command is not known.
+ *
+ *     LOCKING:
+ *     None
+ */
+const char *ata_get_cmd_descript(u8 command)
+{
+#ifdef CONFIG_ATA_VERBOSE_ERROR
+       static const struct
+       {
+               u8 command;
+               const char *text;
+       } cmd_descr[] = {
+               { ATA_CMD_DEV_RESET,            "DEVICE RESET" },
+               { ATA_CMD_CHK_POWER,            "CHECK POWER MODE" },
+               { ATA_CMD_STANDBY,              "STANDBY" },
+               { ATA_CMD_IDLE,                 "IDLE" },
+               { ATA_CMD_EDD,                  "EXECUTE DEVICE DIAGNOSTIC" },
+               { ATA_CMD_DOWNLOAD_MICRO,       "DOWNLOAD MICROCODE" },
+               { ATA_CMD_NOP,                  "NOP" },
+               { ATA_CMD_FLUSH,                "FLUSH CACHE" },
+               { ATA_CMD_FLUSH_EXT,            "FLUSH CACHE EXT" },
+               { ATA_CMD_ID_ATA,               "IDENTIFY DEVICE" },
+               { ATA_CMD_ID_ATAPI,             "IDENTIFY PACKET DEVICE" },
+               { ATA_CMD_SERVICE,              "SERVICE" },
+               { ATA_CMD_READ,                 "READ DMA" },
+               { ATA_CMD_READ_EXT,             "READ DMA EXT" },
+               { ATA_CMD_READ_QUEUED,          "READ DMA QUEUED" },
+               { ATA_CMD_READ_STREAM_EXT,      "READ STREAM EXT" },
+               { ATA_CMD_READ_STREAM_DMA_EXT,  "READ STREAM DMA EXT" },
+               { ATA_CMD_WRITE,                "WRITE DMA" },
+               { ATA_CMD_WRITE_EXT,            "WRITE DMA EXT" },
+               { ATA_CMD_WRITE_QUEUED,         "WRITE DMA QUEUED EXT" },
+               { ATA_CMD_WRITE_STREAM_EXT,     "WRITE STREAM EXT" },
+               { ATA_CMD_WRITE_STREAM_DMA_EXT, "WRITE STREAM DMA EXT" },
+               { ATA_CMD_WRITE_FUA_EXT,        "WRITE DMA FUA EXT" },
+               { ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" },
+               { ATA_CMD_FPDMA_READ,           "READ FPDMA QUEUED" },
+               { ATA_CMD_FPDMA_WRITE,          "WRITE FPDMA QUEUED" },
+               { ATA_CMD_PIO_READ,             "READ SECTOR(S)" },
+               { ATA_CMD_PIO_READ_EXT,         "READ SECTOR(S) EXT" },
+               { ATA_CMD_PIO_WRITE,            "WRITE SECTOR(S)" },
+               { ATA_CMD_PIO_WRITE_EXT,        "WRITE SECTOR(S) EXT" },
+               { ATA_CMD_READ_MULTI,           "READ MULTIPLE" },
+               { ATA_CMD_READ_MULTI_EXT,       "READ MULTIPLE EXT" },
+               { ATA_CMD_WRITE_MULTI,          "WRITE MULTIPLE" },
+               { ATA_CMD_WRITE_MULTI_EXT,      "WRITE MULTIPLE EXT" },
+               { ATA_CMD_WRITE_MULTI_FUA_EXT,  "WRITE MULTIPLE FUA EXT" },
+               { ATA_CMD_SET_FEATURES,         "SET FEATURES" },
+               { ATA_CMD_SET_MULTI,            "SET MULTIPLE MODE" },
+               { ATA_CMD_VERIFY,               "READ VERIFY SECTOR(S)" },
+               { ATA_CMD_VERIFY_EXT,           "READ VERIFY SECTOR(S) EXT" },
+               { ATA_CMD_WRITE_UNCORR_EXT,     "WRITE UNCORRECTABLE EXT" },
+               { ATA_CMD_STANDBYNOW1,          "STANDBY IMMEDIATE" },
+               { ATA_CMD_IDLEIMMEDIATE,        "IDLE IMMEDIATE" },
+               { ATA_CMD_SLEEP,                "SLEEP" },
+               { ATA_CMD_INIT_DEV_PARAMS,      "INITIALIZE DEVICE PARAMETERS" },
+               { ATA_CMD_READ_NATIVE_MAX,      "READ NATIVE MAX ADDRESS" },
+               { ATA_CMD_READ_NATIVE_MAX_EXT,  "READ NATIVE MAX ADDRESS EXT" },
+               { ATA_CMD_SET_MAX,              "SET MAX ADDRESS" },
+               { ATA_CMD_SET_MAX_EXT,          "SET MAX ADDRESS EXT" },
+               { ATA_CMD_READ_LOG_EXT,         "READ LOG EXT" },
+               { ATA_CMD_WRITE_LOG_EXT,        "WRITE LOG EXT" },
+               { ATA_CMD_READ_LOG_DMA_EXT,     "READ LOG DMA EXT" },
+               { ATA_CMD_WRITE_LOG_DMA_EXT,    "WRITE LOG DMA EXT" },
+               { ATA_CMD_TRUSTED_RCV,          "TRUSTED RECEIVE" },
+               { ATA_CMD_TRUSTED_RCV_DMA,      "TRUSTED RECEIVE DMA" },
+               { ATA_CMD_TRUSTED_SND,          "TRUSTED SEND" },
+               { ATA_CMD_TRUSTED_SND_DMA,      "TRUSTED SEND DMA" },
+               { ATA_CMD_PMP_READ,             "READ BUFFER" },
+               { ATA_CMD_PMP_WRITE,            "WRITE BUFFER" },
+               { ATA_CMD_CONF_OVERLAY,         "DEVICE CONFIGURATION OVERLAY" },
+               { ATA_CMD_SEC_SET_PASS,         "SECURITY SET PASSWORD" },
+               { ATA_CMD_SEC_UNLOCK,           "SECURITY UNLOCK" },
+               { ATA_CMD_SEC_ERASE_PREP,       "SECURITY ERASE PREPARE" },
+               { ATA_CMD_SEC_ERASE_UNIT,       "SECURITY ERASE UNIT" },
+               { ATA_CMD_SEC_FREEZE_LOCK,      "SECURITY FREEZE LOCK" },
+               { ATA_CMD_SEC_DISABLE_PASS,     "SECURITY DISABLE PASSWORD" },
+               { ATA_CMD_CONFIG_STREAM,        "CONFIGURE STREAM" },
+               { ATA_CMD_SMART,                "SMART" },
+               { ATA_CMD_MEDIA_LOCK,           "DOOR LOCK" },
+               { ATA_CMD_MEDIA_UNLOCK,         "DOOR UNLOCK" },
+               { ATA_CMD_DSM,                  "DATA SET MANAGEMENT" },
+               { ATA_CMD_CHK_MED_CRD_TYP,      "CHECK MEDIA CARD TYPE" },
+               { ATA_CMD_CFA_REQ_EXT_ERR,      "CFA REQUEST EXTENDED ERROR" },
+               { ATA_CMD_CFA_WRITE_NE,         "CFA WRITE SECTORS WITHOUT ERASE" },
+               { ATA_CMD_CFA_TRANS_SECT,       "CFA TRANSLATE SECTOR" },
+               { ATA_CMD_CFA_ERASE,            "CFA ERASE SECTORS" },
+               { ATA_CMD_CFA_WRITE_MULT_NE,    "CFA WRITE MULTIPLE WITHOUT ERASE" },
+               { ATA_CMD_READ_LONG,            "READ LONG (with retries)" },
+               { ATA_CMD_READ_LONG_ONCE,       "READ LONG (without retries)" },
+               { ATA_CMD_WRITE_LONG,           "WRITE LONG (with retries)" },
+               { ATA_CMD_WRITE_LONG_ONCE,      "WRITE LONG (without retries)" },
+               { ATA_CMD_RESTORE,              "RECALIBRATE" },
+               { 0,                            NULL } /* terminate list */
+       };
+
+       unsigned int i;
+       for (i = 0; cmd_descr[i].text; i++)
+               if (cmd_descr[i].command == command)
+                       return cmd_descr[i].text;
+#endif
+
+       return NULL;
+}
+
+/**
  *     ata_eh_link_report - report error handling to user
  *     @link: ATA link EH is going on
  *
@@ -2077,23 +2376,24 @@ static void ata_eh_link_report(struct ata_link *link)
                         ap->eh_tries);
 
        if (ehc->i.dev) {
-               ata_dev_printk(ehc->i.dev, KERN_ERR, "exception Emask 0x%x "
-                              "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
-                              ehc->i.err_mask, link->sactive, ehc->i.serror,
-                              ehc->i.action, frozen, tries_buf);
+               ata_dev_err(ehc->i.dev, "exception Emask 0x%x "
+                           "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
+                           ehc->i.err_mask, link->sactive, ehc->i.serror,
+                           ehc->i.action, frozen, tries_buf);
                if (desc)
-                       ata_dev_printk(ehc->i.dev, KERN_ERR, "%s\n", desc);
+                       ata_dev_err(ehc->i.dev, "%s\n", desc);
        } else {
-               ata_link_printk(link, KERN_ERR, "exception Emask 0x%x "
-                               "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
-                               ehc->i.err_mask, link->sactive, ehc->i.serror,
-                               ehc->i.action, frozen, tries_buf);
+               ata_link_err(link, "exception Emask 0x%x "
+                            "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
+                            ehc->i.err_mask, link->sactive, ehc->i.serror,
+                            ehc->i.action, frozen, tries_buf);
                if (desc)
-                       ata_link_printk(link, KERN_ERR, "%s\n", desc);
+                       ata_link_err(link, "%s\n", desc);
        }
 
+#ifdef CONFIG_ATA_VERBOSE_ERROR
        if (ehc->i.serror)
-               ata_link_printk(link, KERN_ERR,
+               ata_link_err(link,
                  "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n",
                  ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "",
                  ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "",
@@ -2112,6 +2412,7 @@ static void ata_eh_link_report(struct ata_link *link)
                  ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "",
                  ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "",
                  ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : "");
+#endif
 
        for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
                struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
@@ -2143,16 +2444,25 @@ static void ata_eh_link_report(struct ata_link *link)
                                 dma_str[qc->dma_dir]);
                }
 
-               if (ata_is_atapi(qc->tf.protocol))
-                       snprintf(cdb_buf, sizeof(cdb_buf),
+               if (ata_is_atapi(qc->tf.protocol)) {
+                       if (qc->scsicmd)
+                               scsi_print_command(qc->scsicmd);
+                       else
+                               snprintf(cdb_buf, sizeof(cdb_buf),
                                 "cdb %02x %02x %02x %02x %02x %02x %02x %02x  "
                                 "%02x %02x %02x %02x %02x %02x %02x %02x\n         ",
                                 cdb[0], cdb[1], cdb[2], cdb[3],
                                 cdb[4], cdb[5], cdb[6], cdb[7],
                                 cdb[8], cdb[9], cdb[10], cdb[11],
                                 cdb[12], cdb[13], cdb[14], cdb[15]);
+               } else {
+                       const char *descr = ata_get_cmd_descript(cmd->command);
+                       if (descr)
+                               ata_dev_err(qc->dev, "failed command: %s\n",
+                                           descr);
+               }
 
-               ata_dev_printk(qc->dev, KERN_ERR,
+               ata_dev_err(qc->dev,
                        "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
                        "tag %d%s\n         %s"
                        "res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
@@ -2169,14 +2479,13 @@ static void ata_eh_link_report(struct ata_link *link)
                        res->device, qc->err_mask, ata_err_string(qc->err_mask),
                        qc->err_mask & AC_ERR_NCQ ? " <F>" : "");
 
+#ifdef CONFIG_ATA_VERBOSE_ERROR
                if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
                                    ATA_ERR)) {
                        if (res->command & ATA_BUSY)
-                               ata_dev_printk(qc->dev, KERN_ERR,
-                                 "status: { Busy }\n");
+                               ata_dev_err(qc->dev, "status: { Busy }\n");
                        else
-                               ata_dev_printk(qc->dev, KERN_ERR,
-                                 "status: { %s%s%s%s}\n",
+                               ata_dev_err(qc->dev, "status: { %s%s%s%s}\n",
                                  res->command & ATA_DRDY ? "DRDY " : "",
                                  res->command & ATA_DF ? "DF " : "",
                                  res->command & ATA_DRQ ? "DRQ " : "",
@@ -2186,12 +2495,12 @@ static void ata_eh_link_report(struct ata_link *link)
                if (cmd->command != ATA_CMD_PACKET &&
                    (res->feature & (ATA_ICRC | ATA_UNC | ATA_IDNF |
                                     ATA_ABORTED)))
-                       ata_dev_printk(qc->dev, KERN_ERR,
-                         "error: { %s%s%s%s}\n",
+                       ata_dev_err(qc->dev, "error: { %s%s%s%s}\n",
                          res->feature & ATA_ICRC ? "ICRC " : "",
                          res->feature & ATA_UNC ? "UNC " : "",
                          res->feature & ATA_IDNF ? "IDNF " : "",
                          res->feature & ATA_ABORTED ? "ABRT " : "");
+#endif
        }
 }
 
@@ -2208,7 +2517,7 @@ void ata_eh_report(struct ata_port *ap)
 {
        struct ata_link *link;
 
-       __ata_port_for_each_link(link, ap)
+       ata_for_each_link(link, ap, HOST_FIRST)
                ata_eh_link_report(link);
 }
 
@@ -2219,14 +2528,13 @@ static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset,
        struct ata_device *dev;
 
        if (clear_classes)
-               ata_link_for_each_dev(dev, link)
+               ata_for_each_dev(dev, link, ALL)
                        classes[dev->devno] = ATA_DEV_UNKNOWN;
 
        return reset(link, classes, deadline);
 }
 
-static int ata_eh_followup_srst_needed(struct ata_link *link,
-                                      int rc, const unsigned int *classes)
+static int ata_eh_followup_srst_needed(struct ata_link *link, int rc)
 {
        if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link))
                return 0;
@@ -2244,7 +2552,7 @@ int ata_eh_reset(struct ata_link *link, int classify,
        struct ata_port *ap = link->ap;
        struct ata_link *slave = ap->slave_link;
        struct ata_eh_context *ehc = &link->eh_context;
-       struct ata_eh_context *sehc = &slave->eh_context;
+       struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL;
        unsigned int *classes = ehc->classes;
        unsigned int lflags = link->flags;
        int verbose = !(ehc->i.flags & ATA_EHI_QUIET);
@@ -2267,19 +2575,23 @@ int ata_eh_reset(struct ata_link *link, int classify,
        if (link->flags & ATA_LFLAG_NO_SRST)
                softreset = NULL;
 
-       now = jiffies;
-       deadline = ata_deadline(ehc->last_reset, ATA_EH_RESET_COOL_DOWN);
-       if (time_before(now, deadline))
-               schedule_timeout_uninterruptible(deadline - now);
+       /* make sure each reset attempt is at least COOL_DOWN apart */
+       if (ehc->i.flags & ATA_EHI_DID_RESET) {
+               now = jiffies;
+               WARN_ON(time_after(ehc->last_reset, now));
+               deadline = ata_deadline(ehc->last_reset,
+                                       ATA_EH_RESET_COOL_DOWN);
+               if (time_before(now, deadline))
+                       schedule_timeout_uninterruptible(deadline - now);
+       }
 
        spin_lock_irqsave(ap->lock, flags);
        ap->pflags |= ATA_PFLAG_RESETTING;
        spin_unlock_irqrestore(ap->lock, flags);
 
        ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
-       ehc->last_reset = jiffies;
 
-       ata_link_for_each_dev(dev, link) {
+       ata_for_each_dev(dev, link, ALL) {
                /* If we issue an SRST then an ATA drive (not ATAPI)
                 * may change configuration and be in PIO0 timing. If
                 * we do a hard reset (or are coming from power on)
@@ -2336,26 +2648,29 @@ int ata_eh_reset(struct ata_link *link, int classify,
 
                if (rc) {
                        if (rc == -ENOENT) {
-                               ata_link_printk(link, KERN_DEBUG,
-                                               "port disabled. ignoring.\n");
+                               ata_link_dbg(link, "port disabled--ignoring\n");
                                ehc->i.action &= ~ATA_EH_RESET;
 
-                               ata_link_for_each_dev(dev, link)
+                               ata_for_each_dev(dev, link, ALL)
                                        classes[dev->devno] = ATA_DEV_NONE;
 
                                rc = 0;
                        } else
-                               ata_link_printk(link, KERN_ERR,
-                                       "prereset failed (errno=%d)\n", rc);
+                               ata_link_err(link,
+                                            "prereset failed (errno=%d)\n",
+                                            rc);
                        goto out;
                }
 
                /* prereset() might have cleared ATA_EH_RESET.  If so,
-                * bang classes and return.
+                * bang classes, thaw and return.
                 */
                if (reset && !(ehc->i.action & ATA_EH_RESET)) {
-                       ata_link_for_each_dev(dev, link)
+                       ata_for_each_dev(dev, link, ALL)
                                classes[dev->devno] = ATA_DEV_NONE;
+                       if ((ap->pflags & ATA_PFLAG_FROZEN) &&
+                           ata_is_host_link(link))
+                               ata_eh_thaw_port(ap);
                        rc = 0;
                        goto out;
                }
@@ -2365,7 +2680,6 @@ int ata_eh_reset(struct ata_link *link, int classify,
        /*
         * Perform reset
         */
-       ehc->last_reset = jiffies;
        if (ata_is_host_link(link))
                ata_eh_freeze_port(ap);
 
@@ -2373,10 +2687,11 @@ int ata_eh_reset(struct ata_link *link, int classify,
 
        if (reset) {
                if (verbose)
-                       ata_link_printk(link, KERN_INFO, "%s resetting link\n",
-                                       reset == softreset ? "soft" : "hard");
+                       ata_link_info(link, "%s resetting link\n",
+                                     reset == softreset ? "soft" : "hard");
 
                /* mark that this EH session started with reset */
+               ehc->last_reset = jiffies;
                if (reset == hardreset)
                        ehc->i.flags |= ATA_EHI_DID_HARDRESET;
                else
@@ -2393,8 +2708,7 @@ int ata_eh_reset(struct ata_link *link, int classify,
                        int tmp;
 
                        if (verbose)
-                               ata_link_printk(slave, KERN_INFO,
-                                               "hard resetting link\n");
+                               ata_link_info(slave, "hard resetting link\n");
 
                        ata_eh_about_to_do(slave, NULL, ATA_EH_RESET);
                        tmp = ata_do_reset(slave, reset, classes, deadline,
@@ -2413,13 +2727,12 @@ int ata_eh_reset(struct ata_link *link, int classify,
 
                /* perform follow-up SRST if necessary */
                if (reset == hardreset &&
-                   ata_eh_followup_srst_needed(link, rc, classes)) {
+                   ata_eh_followup_srst_needed(link, rc)) {
                        reset = softreset;
 
                        if (!reset) {
-                               ata_link_printk(link, KERN_ERR,
-                                               "follow-up softreset required "
-                                               "but no softreset avaliable\n");
+                               ata_link_err(link,
+            "follow-up softreset required but no softreset available\n");
                                failed_link = link;
                                rc = -EINVAL;
                                goto fail;
@@ -2427,11 +2740,15 @@ int ata_eh_reset(struct ata_link *link, int classify,
 
                        ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
                        rc = ata_do_reset(link, reset, classes, deadline, true);
+                       if (rc) {
+                               failed_link = link;
+                               goto fail;
+                       }
                }
        } else {
                if (verbose)
-                       ata_link_printk(link, KERN_INFO, "no reset method "
-                                       "available, skipping reset\n");
+                       ata_link_info(link,
+       "no reset method available, skipping reset\n");
                if (!(lflags & ATA_LFLAG_ASSUME_CLASS))
                        lflags |= ATA_LFLAG_ASSUME_ATA;
        }
@@ -2439,7 +2756,7 @@ int ata_eh_reset(struct ata_link *link, int classify,
        /*
         * Post-reset processing
         */
-       ata_link_for_each_dev(dev, link) {
+       ata_for_each_dev(dev, link, ALL) {
                /* After the reset, the device state is PIO 0 and the
                 * controller state is undefined.  Reset also wakes up
                 * drives from sleeping mode.
@@ -2454,7 +2771,7 @@ int ata_eh_reset(struct ata_link *link, int classify,
                if (lflags & ATA_LFLAG_ASSUME_ATA)
                        classes[dev->devno] = ATA_DEV_ATA;
                else if (lflags & ATA_LFLAG_ASSUME_SEMB)
-                       classes[dev->devno] = ATA_DEV_SEMB_UNSUP; /* not yet */
+                       classes[dev->devno] = ATA_DEV_SEMB_UNSUP;
        }
 
        /* record current link speed */
@@ -2480,49 +2797,73 @@ int ata_eh_reset(struct ata_link *link, int classify,
                        postreset(slave, classes);
        }
 
-       /* clear cached SError */
+       /*
+        * Some controllers can't be frozen very well and may set spurious
+        * error conditions during reset.  Clear accumulated error
+        * information and re-thaw the port if frozen.  As reset is the
+        * final recovery action and we cross check link onlineness against
+        * device classification later, no hotplug event is lost by this.
+        */
        spin_lock_irqsave(link->ap->lock, flags);
-       link->eh_info.serror = 0;
+       memset(&link->eh_info, 0, sizeof(link->eh_info));
        if (slave)
-               slave->eh_info.serror = 0;
+               memset(&slave->eh_info, 0, sizeof(link->eh_info));
+       ap->pflags &= ~ATA_PFLAG_EH_PENDING;
        spin_unlock_irqrestore(link->ap->lock, flags);
 
-       /* Make sure onlineness and classification result correspond.
+       if (ap->pflags & ATA_PFLAG_FROZEN)
+               ata_eh_thaw_port(ap);
+
+       /*
+        * Make sure onlineness and classification result correspond.
         * Hotplug could have happened during reset and some
         * controllers fail to wait while a drive is spinning up after
         * being hotplugged causing misdetection.  By cross checking
-        * link onlineness and classification result, those conditions
-        * can be reliably detected and retried.
+        * link on/offlineness and classification result, those
+        * conditions can be reliably detected and retried.
         */
        nr_unknown = 0;
-       ata_link_for_each_dev(dev, link) {
-               /* convert all ATA_DEV_UNKNOWN to ATA_DEV_NONE */
-               if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
-                       classes[dev->devno] = ATA_DEV_NONE;
-                       if (ata_phys_link_online(ata_dev_phys_link(dev)))
+       ata_for_each_dev(dev, link, ALL) {
+               if (ata_phys_link_online(ata_dev_phys_link(dev))) {
+                       if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
+                               ata_dev_dbg(dev, "link online but device misclassified\n");
+                               classes[dev->devno] = ATA_DEV_NONE;
                                nr_unknown++;
+                       }
+               } else if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
+                       if (ata_class_enabled(classes[dev->devno]))
+                               ata_dev_dbg(dev,
+                                           "link offline, clearing class %d to NONE\n",
+                                           classes[dev->devno]);
+                       classes[dev->devno] = ATA_DEV_NONE;
+               } else if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
+                       ata_dev_dbg(dev,
+                                   "link status unknown, clearing UNKNOWN to NONE\n");
+                       classes[dev->devno] = ATA_DEV_NONE;
                }
        }
 
        if (classify && nr_unknown) {
                if (try < max_tries) {
-                       ata_link_printk(link, KERN_WARNING, "link online but "
-                                      "device misclassified, retrying\n");
+                       ata_link_warn(link,
+                                     "link online but %d devices misclassified, retrying\n",
+                                     nr_unknown);
                        failed_link = link;
                        rc = -EAGAIN;
                        goto fail;
                }
-               ata_link_printk(link, KERN_WARNING,
-                              "link online but device misclassified, "
-                              "device detection might fail\n");
+               ata_link_warn(link,
+                             "link online but %d devices misclassified, "
+                             "device detection might fail\n", nr_unknown);
        }
 
        /* reset successful, schedule revalidation */
        ata_eh_done(link, NULL, ATA_EH_RESET);
        if (slave)
                ata_eh_done(slave, NULL, ATA_EH_RESET);
-       ehc->last_reset = jiffies;
+       ehc->last_reset = jiffies;              /* update to completion time */
        ehc->i.action |= ATA_EH_REVALIDATE;
+       link->lpm_policy = ATA_LPM_UNKNOWN;     /* reset LPM state */
 
        rc = 0;
  out:
@@ -2543,27 +2884,48 @@ int ata_eh_reset(struct ata_link *link, int classify,
            sata_scr_read(link, SCR_STATUS, &sstatus))
                rc = -ERESTART;
 
-       if (rc == -ERESTART || try >= max_tries)
+       if (try >= max_tries) {
+               /*
+                * Thaw host port even if reset failed, so that the port
+                * can be retried on the next phy event.  This risks
+                * repeated EH runs but seems to be a better tradeoff than
+                * shutting down a port after a botched hotplug attempt.
+                */
+               if (ata_is_host_link(link))
+                       ata_eh_thaw_port(ap);
                goto out;
+       }
 
        now = jiffies;
        if (time_before(now, deadline)) {
                unsigned long delta = deadline - now;
 
-               ata_link_printk(failed_link, KERN_WARNING,
+               ata_link_warn(failed_link,
                        "reset failed (errno=%d), retrying in %u secs\n",
                        rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000));
 
+               ata_eh_release(ap);
                while (delta)
                        delta = schedule_timeout_uninterruptible(delta);
+               ata_eh_acquire(ap);
+       }
+
+       /*
+        * While disks spinup behind PMP, some controllers fail sending SRST.
+        * They need to be reset - as well as the PMP - before retrying.
+        */
+       if (rc == -ERESTART) {
+               if (ata_is_host_link(link))
+                       ata_eh_thaw_port(ap);
+               goto out;
        }
 
        if (try == max_tries - 1) {
-               sata_down_spd_limit(link);
+               sata_down_spd_limit(link, 0);
                if (slave)
-                       sata_down_spd_limit(slave);
+                       sata_down_spd_limit(slave, 0);
        } else if (rc == -EPIPE)
-               sata_down_spd_limit(failed_link);
+               sata_down_spd_limit(failed_link, 0);
 
        if (hardreset)
                reset = hardreset;
@@ -2604,8 +2966,8 @@ static inline void ata_eh_pull_park_action(struct ata_port *ap)
 
        spin_lock_irqsave(ap->lock, flags);
        INIT_COMPLETION(ap->park_req_pending);
-       ata_port_for_each_link(link, ap) {
-               ata_link_for_each_dev(dev, link) {
+       ata_for_each_link(link, ap, EDGE) {
+               ata_for_each_dev(dev, link, ALL) {
                        struct ata_eh_info *ehi = &link->eh_info;
 
                        link->eh_context.i.dev_action[dev->devno] |=
@@ -2639,7 +3001,7 @@ static void ata_eh_park_issue_cmd(struct ata_device *dev, int park)
        tf.protocol |= ATA_PROT_NODATA;
        err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
        if (park && (err_mask || tf.lbal != 0xc4)) {
-               ata_dev_printk(dev, KERN_ERR, "head unload failed!\n");
+               ata_dev_err(dev, "head unload failed!\n");
                ehc->unloaded_mask &= ~(1 << dev->devno);
        }
 }
@@ -2660,7 +3022,7 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link,
         * be done backwards such that PDIAG- is released by the slave
         * device before the master device is identified.
         */
-       ata_link_for_each_dev_reverse(dev, link) {
+       ata_for_each_dev(dev, link, ALL_REVERSE) {
                unsigned int action = ata_eh_dev_action(dev);
                unsigned int readid_flags = 0;
 
@@ -2689,10 +3051,16 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link,
                        ehc->i.flags |= ATA_EHI_SETMODE;
 
                        /* schedule the scsi_rescan_device() here */
-                       queue_work(ata_aux_wq, &(ap->scsi_rescan_task));
+                       schedule_work(&(ap->scsi_rescan_task));
                } else if (dev->class == ATA_DEV_UNKNOWN &&
                           ehc->tries[dev->devno] &&
                           ata_class_enabled(ehc->classes[dev->devno])) {
+                       /* Temporarily set dev->class, it will be
+                        * permanently set once all configurations are
+                        * complete.  This is necessary because new
+                        * device configuration is done in two
+                        * separate loops.
+                        */
                        dev->class = ehc->classes[dev->devno];
 
                        if (dev->class == ATA_DEV_PMP)
@@ -2700,20 +3068,25 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link,
                        else
                                rc = ata_dev_read_id(dev, &dev->class,
                                                     readid_flags, dev->id);
+
+                       /* read_id might have changed class, store and reset */
+                       ehc->classes[dev->devno] = dev->class;
+                       dev->class = ATA_DEV_UNKNOWN;
+
                        switch (rc) {
                        case 0:
+                               /* clear error info accumulated during probe */
+                               ata_ering_clear(&dev->ering);
                                new_mask |= 1 << dev->devno;
                                break;
                        case -ENOENT:
                                /* IDENTIFY was issued to non-existent
                                 * device.  No need to reset.  Just
-                                * thaw and kill the device.
+                                * thaw and ignore the device.
                                 */
                                ata_eh_thaw_port(ap);
-                               dev->class = ATA_DEV_UNKNOWN;
                                break;
                        default:
-                               dev->class = ATA_DEV_UNKNOWN;
                                goto err;
                        }
                }
@@ -2729,16 +3102,22 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link,
        /* Configure new devices forward such that user doesn't see
         * device detection messages backwards.
         */
-       ata_link_for_each_dev(dev, link) {
-               if (!(new_mask & (1 << dev->devno)) ||
-                   dev->class == ATA_DEV_PMP)
+       ata_for_each_dev(dev, link, ALL) {
+               if (!(new_mask & (1 << dev->devno)))
+                       continue;
+
+               dev->class = ehc->classes[dev->devno];
+
+               if (dev->class == ATA_DEV_PMP)
                        continue;
 
                ehc->i.flags |= ATA_EHI_PRINTINFO;
                rc = ata_dev_configure(dev);
                ehc->i.flags &= ~ATA_EHI_PRINTINFO;
-               if (rc)
+               if (rc) {
+                       dev->class = ATA_DEV_UNKNOWN;
                        goto err;
+               }
 
                spin_lock_irqsave(ap->lock, flags);
                ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
@@ -2759,7 +3138,7 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link,
 /**
  *     ata_set_mode - Program timings and issue SET FEATURES - XFER
  *     @link: link on which timings will be programmed
- *     @r_failed_dev: out paramter for failed device
+ *     @r_failed_dev: out parameter for failed device
  *
  *     Set ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
  *     ata_set_mode() fails, pointer to the failing device is
@@ -2778,7 +3157,7 @@ int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
        int rc;
 
        /* if data transfer is verified, clear DUBIOUS_XFER on ering top */
-       ata_link_for_each_dev(dev, link) {
+       ata_for_each_dev(dev, link, ENABLED) {
                if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) {
                        struct ata_ering_entry *ent;
 
@@ -2795,7 +3174,7 @@ int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
                rc = ata_do_set_mode(link, r_failed_dev);
 
        /* if transfer mode has changed, set DUBIOUS_XFER on device */
-       ata_link_for_each_dev(dev, link) {
+       ata_for_each_dev(dev, link, ENABLED) {
                struct ata_eh_context *ehc = &link->eh_context;
                u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno];
                u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno));
@@ -2827,14 +3206,15 @@ static int atapi_eh_clear_ua(struct ata_device *dev)
        int i;
 
        for (i = 0; i < ATA_EH_UA_TRIES; i++) {
-               u8 sense_buffer[SCSI_SENSE_BUFFERSIZE];
+               u8 *sense_buffer = dev->link->ap->sector_buf;
                u8 sense_key = 0;
                unsigned int err_mask;
 
                err_mask = atapi_eh_tur(dev, &sense_key);
                if (err_mask != 0 && err_mask != AC_ERR_DEV) {
-                       ata_dev_printk(dev, KERN_WARNING, "TEST_UNIT_READY "
-                               "failed (err_mask=0x%x)\n", err_mask);
+                       ata_dev_warn(dev,
+                                    "TEST_UNIT_READY failed (err_mask=0x%x)\n",
+                                    err_mask);
                        return -EIO;
                }
 
@@ -2843,26 +3223,231 @@ static int atapi_eh_clear_ua(struct ata_device *dev)
 
                err_mask = atapi_eh_request_sense(dev, sense_buffer, sense_key);
                if (err_mask) {
-                       ata_dev_printk(dev, KERN_WARNING, "failed to clear "
+                       ata_dev_warn(dev, "failed to clear "
                                "UNIT ATTENTION (err_mask=0x%x)\n", err_mask);
                        return -EIO;
                }
        }
 
-       ata_dev_printk(dev, KERN_WARNING,
-               "UNIT ATTENTION persists after %d tries\n", ATA_EH_UA_TRIES);
+       ata_dev_warn(dev, "UNIT ATTENTION persists after %d tries\n",
+                    ATA_EH_UA_TRIES);
+
+       return 0;
+}
+
+/**
+ *     ata_eh_maybe_retry_flush - Retry FLUSH if necessary
+ *     @dev: ATA device which may need FLUSH retry
+ *
+ *     If @dev failed FLUSH, it needs to be reported upper layer
+ *     immediately as it means that @dev failed to remap and already
+ *     lost at least a sector and further FLUSH retrials won't make
+ *     any difference to the lost sector.  However, if FLUSH failed
+ *     for other reasons, for example transmission error, FLUSH needs
+ *     to be retried.
+ *
+ *     This function determines whether FLUSH failure retry is
+ *     necessary and performs it if so.
+ *
+ *     RETURNS:
+ *     0 if EH can continue, -errno if EH needs to be repeated.
+ */
+static int ata_eh_maybe_retry_flush(struct ata_device *dev)
+{
+       struct ata_link *link = dev->link;
+       struct ata_port *ap = link->ap;
+       struct ata_queued_cmd *qc;
+       struct ata_taskfile tf;
+       unsigned int err_mask;
+       int rc = 0;
+
+       /* did flush fail for this device? */
+       if (!ata_tag_valid(link->active_tag))
+               return 0;
+
+       qc = __ata_qc_from_tag(ap, link->active_tag);
+       if (qc->dev != dev || (qc->tf.command != ATA_CMD_FLUSH_EXT &&
+                              qc->tf.command != ATA_CMD_FLUSH))
+               return 0;
+
+       /* if the device failed it, it should be reported to upper layers */
+       if (qc->err_mask & AC_ERR_DEV)
+               return 0;
+
+       /* flush failed for some other reason, give it another shot */
+       ata_tf_init(dev, &tf);
+
+       tf.command = qc->tf.command;
+       tf.flags |= ATA_TFLAG_DEVICE;
+       tf.protocol = ATA_PROT_NODATA;
+
+       ata_dev_warn(dev, "retrying FLUSH 0x%x Emask 0x%x\n",
+                      tf.command, qc->err_mask);
+
+       err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
+       if (!err_mask) {
+               /*
+                * FLUSH is complete but there's no way to
+                * successfully complete a failed command from EH.
+                * Making sure retry is allowed at least once and
+                * retrying it should do the trick - whatever was in
+                * the cache is already on the platter and this won't
+                * cause infinite loop.
+                */
+               qc->scsicmd->allowed = max(qc->scsicmd->allowed, 1);
+       } else {
+               ata_dev_warn(dev, "FLUSH failed Emask 0x%x\n",
+                              err_mask);
+               rc = -EIO;
+
+               /* if device failed it, report it to upper layers */
+               if (err_mask & AC_ERR_DEV) {
+                       qc->err_mask |= AC_ERR_DEV;
+                       qc->result_tf = tf;
+                       if (!(ap->pflags & ATA_PFLAG_FROZEN))
+                               rc = 0;
+               }
+       }
+       return rc;
+}
+
+/**
+ *     ata_eh_set_lpm - configure SATA interface power management
+ *     @link: link to configure power management
+ *     @policy: the link power management policy
+ *     @r_failed_dev: out parameter for failed device
+ *
+ *     Enable SATA Interface power management.  This will enable
+ *     Device Interface Power Management (DIPM) for min_power
+ *     policy, and then call driver specific callbacks for
+ *     enabling Host Initiated Power management.
+ *
+ *     LOCKING:
+ *     EH context.
+ *
+ *     RETURNS:
+ *     0 on success, -errno on failure.
+ */
+static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
+                         struct ata_device **r_failed_dev)
+{
+       struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL;
+       struct ata_eh_context *ehc = &link->eh_context;
+       struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL;
+       enum ata_lpm_policy old_policy = link->lpm_policy;
+       bool no_dipm = link->ap->flags & ATA_FLAG_NO_DIPM;
+       unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM;
+       unsigned int err_mask;
+       int rc;
+
+       /* if the link or host doesn't do LPM, noop */
+       if ((link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm))
+               return 0;
+
+       /*
+        * DIPM is enabled only for MIN_POWER as some devices
+        * misbehave when the host NACKs transition to SLUMBER.  Order
+        * device and link configurations such that the host always
+        * allows DIPM requests.
+        */
+       ata_for_each_dev(dev, link, ENABLED) {
+               bool hipm = ata_id_has_hipm(dev->id);
+               bool dipm = ata_id_has_dipm(dev->id) && !no_dipm;
+
+               /* find the first enabled and LPM enabled devices */
+               if (!link_dev)
+                       link_dev = dev;
+
+               if (!lpm_dev && (hipm || dipm))
+                       lpm_dev = dev;
+
+               hints &= ~ATA_LPM_EMPTY;
+               if (!hipm)
+                       hints &= ~ATA_LPM_HIPM;
+
+               /* disable DIPM before changing link config */
+               if (policy != ATA_LPM_MIN_POWER && dipm) {
+                       err_mask = ata_dev_set_feature(dev,
+                                       SETFEATURES_SATA_DISABLE, SATA_DIPM);
+                       if (err_mask && err_mask != AC_ERR_DEV) {
+                               ata_dev_warn(dev,
+                                            "failed to disable DIPM, Emask 0x%x\n",
+                                            err_mask);
+                               rc = -EIO;
+                               goto fail;
+                       }
+               }
+       }
+
+       if (ap) {
+               rc = ap->ops->set_lpm(link, policy, hints);
+               if (!rc && ap->slave_link)
+                       rc = ap->ops->set_lpm(ap->slave_link, policy, hints);
+       } else
+               rc = sata_pmp_set_lpm(link, policy, hints);
+
+       /*
+        * Attribute link config failure to the first (LPM) enabled
+        * device on the link.
+        */
+       if (rc) {
+               if (rc == -EOPNOTSUPP) {
+                       link->flags |= ATA_LFLAG_NO_LPM;
+                       return 0;
+               }
+               dev = lpm_dev ? lpm_dev : link_dev;
+               goto fail;
+       }
+
+       /*
+        * Low level driver acked the transition.  Issue DIPM command
+        * with the new policy set.
+        */
+       link->lpm_policy = policy;
+       if (ap && ap->slave_link)
+               ap->slave_link->lpm_policy = policy;
+
+       /* host config updated, enable DIPM if transitioning to MIN_POWER */
+       ata_for_each_dev(dev, link, ENABLED) {
+               if (policy == ATA_LPM_MIN_POWER && !no_dipm &&
+                   ata_id_has_dipm(dev->id)) {
+                       err_mask = ata_dev_set_feature(dev,
+                                       SETFEATURES_SATA_ENABLE, SATA_DIPM);
+                       if (err_mask && err_mask != AC_ERR_DEV) {
+                               ata_dev_warn(dev,
+                                       "failed to enable DIPM, Emask 0x%x\n",
+                                       err_mask);
+                               rc = -EIO;
+                               goto fail;
+                       }
+               }
+       }
 
        return 0;
+
+fail:
+       /* restore the old policy */
+       link->lpm_policy = old_policy;
+       if (ap && ap->slave_link)
+               ap->slave_link->lpm_policy = old_policy;
+
+       /* if no device or only one more chance is left, disable LPM */
+       if (!dev || ehc->tries[dev->devno] <= 2) {
+               ata_link_warn(link, "disabling LPM on the link\n");
+               link->flags |= ATA_LFLAG_NO_LPM;
+       }
+       if (r_failed_dev)
+               *r_failed_dev = dev;
+       return rc;
 }
 
-static int ata_link_nr_enabled(struct ata_link *link)
+int ata_link_nr_enabled(struct ata_link *link)
 {
        struct ata_device *dev;
        int cnt = 0;
 
-       ata_link_for_each_dev(dev, link)
-               if (ata_dev_enabled(dev))
-                       cnt++;
+       ata_for_each_dev(dev, link, ENABLED)
+               cnt++;
        return cnt;
 }
 
@@ -2871,7 +3456,7 @@ static int ata_link_nr_vacant(struct ata_link *link)
        struct ata_device *dev;
        int cnt = 0;
 
-       ata_link_for_each_dev(dev, link)
+       ata_for_each_dev(dev, link, ALL)
                if (dev->class == ATA_DEV_UNKNOWN)
                        cnt++;
        return cnt;
@@ -2887,6 +3472,10 @@ static int ata_eh_skip_recovery(struct ata_link *link)
        if (link->flags & ATA_LFLAG_DISABLED)
                return 1;
 
+       /* skip if explicitly requested */
+       if (ehc->i.flags & ATA_EHI_NO_RECOVERY)
+               return 1;
+
        /* thaw frozen port and recover failed devices */
        if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link))
                return 0;
@@ -2897,7 +3486,7 @@ static int ata_eh_skip_recovery(struct ata_link *link)
                return 0;
 
        /* skip if class codes for all vacant slots are ATA_DEV_NONE */
-       ata_link_for_each_dev(dev, link) {
+       ata_for_each_dev(dev, link, ALL) {
                if (dev->class == ATA_DEV_UNKNOWN &&
                    ehc->classes[dev->devno] != ATA_DEV_NONE)
                        return 0;
@@ -2906,9 +3495,24 @@ static int ata_eh_skip_recovery(struct ata_link *link)
        return 1;
 }
 
+static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg)
+{
+       u64 interval = msecs_to_jiffies(ATA_EH_PROBE_TRIAL_INTERVAL);
+       u64 now = get_jiffies_64();
+       int *trials = void_arg;
+
+       if (ent->timestamp < now - min(now, interval))
+               return -1;
+
+       (*trials)++;
+       return 0;
+}
+
 static int ata_eh_schedule_probe(struct ata_device *dev)
 {
        struct ata_eh_context *ehc = &dev->link->eh_context;
+       struct ata_link *link = ata_dev_phys_link(dev);
+       int trials = 0;
 
        if (!(ehc->i.probe_mask & (1 << dev->devno)) ||
            (ehc->did_probe_mask & (1 << dev->devno)))
@@ -2921,6 +3525,35 @@ static int ata_eh_schedule_probe(struct ata_device *dev)
        ehc->saved_xfer_mode[dev->devno] = 0;
        ehc->saved_ncq_enabled &= ~(1 << dev->devno);
 
+       /* the link maybe in a deep sleep, wake it up */
+       if (link->lpm_policy > ATA_LPM_MAX_POWER) {
+               if (ata_is_host_link(link))
+                       link->ap->ops->set_lpm(link, ATA_LPM_MAX_POWER,
+                                              ATA_LPM_EMPTY);
+               else
+                       sata_pmp_set_lpm(link, ATA_LPM_MAX_POWER,
+                                        ATA_LPM_EMPTY);
+       }
+
+       /* Record and count probe trials on the ering.  The specific
+        * error mask used is irrelevant.  Because a successful device
+        * detection clears the ering, this count accumulates only if
+        * there are consecutive failed probes.
+        *
+        * If the count is equal to or higher than ATA_EH_PROBE_TRIALS
+        * in the last ATA_EH_PROBE_TRIAL_INTERVAL, link speed is
+        * forced to 1.5Gbps.
+        *
+        * This is to work around cases where failed link speed
+        * negotiation results in device misdetection leading to
+        * infinite DEVXCHG or PHRDY CHG events.
+        */
+       ata_ering_record(&dev->ering, 0, AC_ERR_OTHER);
+       ata_ering_map(&dev->ering, ata_count_probe_trials_cb, &trials);
+
+       if (trials > ATA_EH_PROBE_TRIALS)
+               sata_down_spd_limit(link, 1);
+
        return 1;
 }
 
@@ -2928,7 +3561,11 @@ static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
 {
        struct ata_eh_context *ehc = &dev->link->eh_context;
 
-       ehc->tries[dev->devno]--;
+       /* -EAGAIN from EH routine indicates retry without prejudice.
+        * The requester is responsible for ensuring forward progress.
+        */
+       if (err != -EAGAIN)
+               ehc->tries[dev->devno]--;
 
        switch (err) {
        case -ENODEV:
@@ -2938,12 +3575,13 @@ static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
                /* give it just one more chance */
                ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1);
        case -EIO:
-               if (ehc->tries[dev->devno] == 1 && dev->pio_mode > XFER_PIO_0) {
+               if (ehc->tries[dev->devno] == 1) {
                        /* This is the last chance, better to slow
                         * down than lose it.
                         */
-                       sata_down_spd_limit(ata_dev_phys_link(dev));
-                       ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
+                       sata_down_spd_limit(ata_dev_phys_link(dev), 0);
+                       if (dev->pio_mode > XFER_PIO_0)
+                               ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
                }
        }
 
@@ -2998,14 +3636,13 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
 {
        struct ata_link *link;
        struct ata_device *dev;
-       int nr_failed_devs;
-       int rc;
+       int rc, nr_fails;
        unsigned long flags, deadline;
 
        DPRINTK("ENTER\n");
 
        /* prep for recovery */
-       ata_port_for_each_link(link, ap) {
+       ata_for_each_link(link, ap, EDGE) {
                struct ata_eh_context *ehc = &link->eh_context;
 
                /* re-enable link? */
@@ -3017,7 +3654,7 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
                        ata_eh_done(link, NULL, ATA_EH_ENABLE_LINK);
                }
 
-               ata_link_for_each_dev(dev, link) {
+               ata_for_each_dev(dev, link, ALL) {
                        if (link->flags & ATA_LFLAG_NO_RETRY)
                                ehc->tries[dev->devno] = 1;
                        else
@@ -3040,26 +3677,25 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
 
  retry:
        rc = 0;
-       nr_failed_devs = 0;
 
        /* if UNLOADING, finish immediately */
        if (ap->pflags & ATA_PFLAG_UNLOADING)
                goto out;
 
        /* prep for EH */
-       ata_port_for_each_link(link, ap) {
+       ata_for_each_link(link, ap, EDGE) {
                struct ata_eh_context *ehc = &link->eh_context;
 
                /* skip EH if possible. */
                if (ata_eh_skip_recovery(link))
                        ehc->i.action = 0;
 
-               ata_link_for_each_dev(dev, link)
+               ata_for_each_dev(dev, link, ALL)
                        ehc->classes[dev->devno] = ATA_DEV_UNKNOWN;
        }
 
        /* reset */
-       ata_port_for_each_link(link, ap) {
+       ata_for_each_link(link, ap, EDGE) {
                struct ata_eh_context *ehc = &link->eh_context;
 
                if (!(ehc->i.action & ATA_EH_RESET))
@@ -3068,8 +3704,7 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
                rc = ata_eh_reset(link, ata_link_nr_vacant(link),
                                  prereset, softreset, hardreset, postreset);
                if (rc) {
-                       ata_link_printk(link, KERN_ERR,
-                                       "reset failed, giving up\n");
+                       ata_link_err(link, "reset failed, giving up\n");
                        goto out;
                }
        }
@@ -3084,8 +3719,8 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
                ata_eh_pull_park_action(ap);
 
                deadline = jiffies;
-               ata_port_for_each_link(link, ap) {
-                       ata_link_for_each_dev(dev, link) {
+               ata_for_each_link(link, ap, EDGE) {
+                       ata_for_each_dev(dev, link, ALL) {
                                struct ata_eh_context *ehc = &link->eh_context;
                                unsigned long tmp;
 
@@ -3110,11 +3745,13 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
                if (time_before_eq(deadline, now))
                        break;
 
+               ata_eh_release(ap);
                deadline = wait_for_completion_timeout(&ap->park_req_pending,
                                                       deadline - now);
+               ata_eh_acquire(ap);
        } while (deadline);
-       ata_port_for_each_link(link, ap) {
-               ata_link_for_each_dev(dev, link) {
+       ata_for_each_link(link, ap, EDGE) {
+               ata_for_each_dev(dev, link, ALL) {
                        if (!(link->eh_context.unloaded_mask &
                              (1 << dev->devno)))
                                continue;
@@ -3125,13 +3762,17 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
        }
 
        /* the rest */
-       ata_port_for_each_link(link, ap) {
+       nr_fails = 0;
+       ata_for_each_link(link, ap, PMP_FIRST) {
                struct ata_eh_context *ehc = &link->eh_context;
 
+               if (sata_pmp_attached(ap) && ata_is_host_link(link))
+                       goto config_lpm;
+
                /* revalidate existing devices and attach new ones */
                rc = ata_eh_revalidate_and_attach(link, &dev);
                if (rc)
-                       goto dev_fail;
+                       goto rest_fail;
 
                /* if PMP got attached, return, pmp EH will take care of it */
                if (link->device->class == ATA_DEV_PMP) {
@@ -3143,7 +3784,7 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
                if (ehc->i.flags & ATA_EHI_SETMODE) {
                        rc = ata_set_mode(link, &dev);
                        if (rc)
-                               goto dev_fail;
+                               goto rest_fail;
                        ehc->i.flags &= ~ATA_EHI_SETMODE;
                }
 
@@ -3151,27 +3792,40 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
                 * disrupting the current users of the device.
                 */
                if (ehc->i.flags & ATA_EHI_DID_RESET) {
-                       ata_link_for_each_dev(dev, link) {
+                       ata_for_each_dev(dev, link, ALL) {
                                if (dev->class != ATA_DEV_ATAPI)
                                        continue;
                                rc = atapi_eh_clear_ua(dev);
                                if (rc)
-                                       goto dev_fail;
+                                       goto rest_fail;
                        }
                }
 
+               /* retry flush if necessary */
+               ata_for_each_dev(dev, link, ALL) {
+                       if (dev->class != ATA_DEV_ATA)
+                               continue;
+                       rc = ata_eh_maybe_retry_flush(dev);
+                       if (rc)
+                               goto rest_fail;
+               }
+
+       config_lpm:
                /* configure link power saving */
-               if (ehc->i.action & ATA_EH_LPM)
-                       ata_link_for_each_dev(dev, link)
-                               ata_dev_enable_pm(dev, ap->pm_policy);
+               if (link->lpm_policy != ap->target_lpm_policy) {
+                       rc = ata_eh_set_lpm(link, ap->target_lpm_policy, &dev);
+                       if (rc)
+                               goto rest_fail;
+               }
 
                /* this link is okay now */
                ehc->i.flags = 0;
                continue;
 
-dev_fail:
-               nr_failed_devs++;
-               ata_eh_handle_dev_fail(dev, rc);
+       rest_fail:
+               nr_fails++;
+               if (dev)
+                       ata_eh_handle_dev_fail(dev, rc);
 
                if (ap->pflags & ATA_PFLAG_FROZEN) {
                        /* PMP reset requires working host port.
@@ -3183,7 +3837,7 @@ dev_fail:
                }
        }
 
-       if (nr_failed_devs)
+       if (nr_fails)
                goto retry;
 
  out:
@@ -3267,7 +3921,7 @@ void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
        rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset,
                            NULL);
        if (rc) {
-               ata_link_for_each_dev(dev, &ap->link)
+               ata_for_each_dev(dev, &ap->link, ALL)
                        ata_dev_disable(dev);
        }
 
@@ -3289,7 +3943,7 @@ void ata_std_error_handler(struct ata_port *ap)
        ata_reset_fn_t hardreset = ops->hardreset;
 
        /* ignore built-in hardreset if SCR access is not available */
-       if (ata_is_builtin_hardreset(hardreset) && !sata_scr_valid(&ap->link))
+       if (hardreset == sata_std_hardreset && !sata_scr_valid(&ap->link))
                hardreset = NULL;
 
        ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset);
@@ -3364,6 +4018,8 @@ static void ata_eh_handle_port_suspend(struct ata_port *ap)
  */
 static void ata_eh_handle_port_resume(struct ata_port *ap)
 {
+       struct ata_link *link;
+       struct ata_device *dev;
        unsigned long flags;
        int rc = 0;
 
@@ -3378,6 +4034,17 @@ static void ata_eh_handle_port_resume(struct ata_port *ap)
 
        WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED));
 
+       /*
+        * Error timestamps are in jiffies which doesn't run while
+        * suspended and PHY events during resume isn't too uncommon.
+        * When the two are combined, it can lead to unnecessary speed
+        * downs if the machine is suspended and resumed repeatedly.
+        * Clear error history.
+        */
+       ata_for_each_link(link, ap, HOST_FIRST)
+               ata_for_each_dev(dev, link, ALL)
+                       ata_ering_clear(&dev->ering);
+
        ata_acpi_set_state(ap, PMSG_ON);
 
        if (ap->ops->port_resume)