USB: musb: Fix CPPI IRQs not being signaled
[linux-2.6.git] / drivers / dma / ioat / dma.c
index cc5c557..c524d36 100644 (file)
 #include "registers.h"
 #include "hw.h"
 
-static int ioat_pending_level = 4;
+int ioat_pending_level = 4;
 module_param(ioat_pending_level, int, 0644);
 MODULE_PARM_DESC(ioat_pending_level,
                 "high-water mark for pushing ioat descriptors (default: 4)");
 
-static void ioat_dma_chan_reset_part2(struct work_struct *work);
-static void ioat_dma_chan_watchdog(struct work_struct *work);
-
 /* internal functions */
-static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan);
-static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan);
-
-static struct ioat_desc_sw *
-ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan);
-static struct ioat_desc_sw *
-ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan);
-
-static inline struct ioat_dma_chan *
-ioat_chan_by_index(struct ioatdma_device *device, int index)
-{
-       return device->idx[index];
-}
+static void ioat1_cleanup(struct ioat_dma_chan *ioat);
+static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat);
 
 /**
  * ioat_dma_do_interrupt - handler used for single vector interrupt mode
@@ -69,7 +55,7 @@ ioat_chan_by_index(struct ioatdma_device *device, int index)
 static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
 {
        struct ioatdma_device *instance = data;
-       struct ioat_dma_chan *ioat_chan;
+       struct ioat_chan_common *chan;
        unsigned long attnstatus;
        int bit;
        u8 intrctrl;
@@ -86,8 +72,8 @@ static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
 
        attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
        for_each_bit(bit, &attnstatus, BITS_PER_LONG) {
-               ioat_chan = ioat_chan_by_index(instance, bit);
-               tasklet_schedule(&ioat_chan->cleanup_task);
+               chan = ioat_chan_by_index(instance, bit);
+               tasklet_schedule(&chan->cleanup_task);
        }
 
        writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
@@ -101,63 +87,85 @@ static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
  */
 static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
 {
-       struct ioat_dma_chan *ioat_chan = data;
+       struct ioat_chan_common *chan = data;
 
-       tasklet_schedule(&ioat_chan->cleanup_task);
+       tasklet_schedule(&chan->cleanup_task);
 
        return IRQ_HANDLED;
 }
 
-static void ioat_dma_cleanup_tasklet(unsigned long data);
+static void ioat1_cleanup_tasklet(unsigned long data);
+
+/* common channel initialization */
+void ioat_init_channel(struct ioatdma_device *device,
+                      struct ioat_chan_common *chan, int idx,
+                      void (*timer_fn)(unsigned long),
+                      void (*tasklet)(unsigned long),
+                      unsigned long ioat)
+{
+       struct dma_device *dma = &device->common;
+
+       chan->device = device;
+       chan->reg_base = device->reg_base + (0x80 * (idx + 1));
+       spin_lock_init(&chan->cleanup_lock);
+       chan->common.device = dma;
+       list_add_tail(&chan->common.device_node, &dma->channels);
+       device->idx[idx] = chan;
+       init_timer(&chan->timer);
+       chan->timer.function = timer_fn;
+       chan->timer.data = ioat;
+       tasklet_init(&chan->cleanup_task, tasklet, ioat);
+       tasklet_disable(&chan->cleanup_task);
+}
+
+static void ioat1_timer_event(unsigned long data);
 
 /**
- * ioat_dma_enumerate_channels - find and initialize the device's channels
+ * ioat1_dma_enumerate_channels - find and initialize the device's channels
  * @device: the device to be enumerated
  */
-static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
+static int ioat1_enumerate_channels(struct ioatdma_device *device)
 {
        u8 xfercap_scale;
        u32 xfercap;
        int i;
-       struct ioat_dma_chan *ioat_chan;
+       struct ioat_dma_chan *ioat;
        struct device *dev = &device->pdev->dev;
        struct dma_device *dma = &device->common;
 
        INIT_LIST_HEAD(&dma->channels);
        dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
+       dma->chancnt &= 0x1f; /* bits [4:0] valid */
+       if (dma->chancnt > ARRAY_SIZE(device->idx)) {
+               dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n",
+                        dma->chancnt, ARRAY_SIZE(device->idx));
+               dma->chancnt = ARRAY_SIZE(device->idx);
+       }
        xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
+       xfercap_scale &= 0x1f; /* bits [4:0] valid */
        xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
+       dev_dbg(dev, "%s: xfercap = %d\n", __func__, xfercap);
 
 #ifdef  CONFIG_I7300_IDLE_IOAT_CHANNEL
        if (i7300_idle_platform_probe(NULL, NULL, 1) == 0)
                dma->chancnt--;
 #endif
        for (i = 0; i < dma->chancnt; i++) {
-               ioat_chan = devm_kzalloc(dev, sizeof(*ioat_chan), GFP_KERNEL);
-               if (!ioat_chan) {
-                       dma->chancnt = i;
+               ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL);
+               if (!ioat)
                        break;
-               }
 
-               ioat_chan->device = device;
-               ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1));
-               ioat_chan->xfercap = xfercap;
-               ioat_chan->desccount = 0;
-               INIT_DELAYED_WORK(&ioat_chan->work, ioat_dma_chan_reset_part2);
-               spin_lock_init(&ioat_chan->cleanup_lock);
-               spin_lock_init(&ioat_chan->desc_lock);
-               INIT_LIST_HEAD(&ioat_chan->free_desc);
-               INIT_LIST_HEAD(&ioat_chan->used_desc);
-               /* This should be made common somewhere in dmaengine.c */
-               ioat_chan->common.device = &device->common;
-               list_add_tail(&ioat_chan->common.device_node, &dma->channels);
-               device->idx[i] = ioat_chan;
-               tasklet_init(&ioat_chan->cleanup_task,
-                            ioat_dma_cleanup_tasklet,
-                            (unsigned long) ioat_chan);
-               tasklet_disable(&ioat_chan->cleanup_task);
+               ioat_init_channel(device, &ioat->base, i,
+                                 ioat1_timer_event,
+                                 ioat1_cleanup_tasklet,
+                                 (unsigned long) ioat);
+               ioat->xfercap = xfercap;
+               spin_lock_init(&ioat->desc_lock);
+               INIT_LIST_HEAD(&ioat->free_desc);
+               INIT_LIST_HEAD(&ioat->used_desc);
        }
-       return dma->chancnt;
+       dma->chancnt = i;
+       return i;
 }
 
 /**
@@ -166,126 +174,45 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
  * @chan: DMA channel handle
  */
 static inline void
-__ioat1_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat_chan)
+__ioat1_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat)
 {
-       ioat_chan->pending = 0;
-       writeb(IOAT_CHANCMD_APPEND, ioat_chan->reg_base + IOAT1_CHANCMD_OFFSET);
-}
+       void __iomem *reg_base = ioat->base.reg_base;
 
-static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan)
-{
-       struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
-
-       if (ioat_chan->pending > 0) {
-               spin_lock_bh(&ioat_chan->desc_lock);
-               __ioat1_dma_memcpy_issue_pending(ioat_chan);
-               spin_unlock_bh(&ioat_chan->desc_lock);
-       }
+       dev_dbg(to_dev(&ioat->base), "%s: pending: %d\n",
+               __func__, ioat->pending);
+       ioat->pending = 0;
+       writeb(IOAT_CHANCMD_APPEND, reg_base + IOAT1_CHANCMD_OFFSET);
 }
 
-static inline void
-__ioat2_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat_chan)
-{
-       ioat_chan->pending = 0;
-       writew(ioat_chan->dmacount,
-              ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
-}
-
-static void ioat2_dma_memcpy_issue_pending(struct dma_chan *chan)
+static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan)
 {
-       struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
+       struct ioat_dma_chan *ioat = to_ioat_chan(chan);
 
-       if (ioat_chan->pending > 0) {
-               spin_lock_bh(&ioat_chan->desc_lock);
-               __ioat2_dma_memcpy_issue_pending(ioat_chan);
-               spin_unlock_bh(&ioat_chan->desc_lock);
+       if (ioat->pending > 0) {
+               spin_lock_bh(&ioat->desc_lock);
+               __ioat1_dma_memcpy_issue_pending(ioat);
+               spin_unlock_bh(&ioat->desc_lock);
        }
 }
 
-
 /**
- * ioat_dma_chan_reset_part2 - reinit the channel after a reset
+ * ioat1_reset_channel - restart a channel
+ * @ioat: IOAT DMA channel handle
  */
-static void ioat_dma_chan_reset_part2(struct work_struct *work)
-{
-       struct ioat_dma_chan *ioat_chan =
-               container_of(work, struct ioat_dma_chan, work.work);
-       struct ioat_desc_sw *desc;
-
-       spin_lock_bh(&ioat_chan->cleanup_lock);
-       spin_lock_bh(&ioat_chan->desc_lock);
-
-       ioat_chan->completion_virt->low = 0;
-       ioat_chan->completion_virt->high = 0;
-       ioat_chan->pending = 0;
-
-       /*
-        * count the descriptors waiting, and be sure to do it
-        * right for both the CB1 line and the CB2 ring
-        */
-       ioat_chan->dmacount = 0;
-       if (ioat_chan->used_desc.prev) {
-               desc = to_ioat_desc(ioat_chan->used_desc.prev);
-               do {
-                       ioat_chan->dmacount++;
-                       desc = to_ioat_desc(desc->node.next);
-               } while (&desc->node != ioat_chan->used_desc.next);
-       }
-
-       /*
-        * write the new starting descriptor address
-        * this puts channel engine into ARMED state
-        */
-       desc = to_ioat_desc(ioat_chan->used_desc.prev);
-       switch (ioat_chan->device->version) {
-       case IOAT_VER_1_2:
-               writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF,
-                      ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
-               writel(((u64) desc->txd.phys) >> 32,
-                      ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
-
-               writeb(IOAT_CHANCMD_START, ioat_chan->reg_base
-                       + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
-               break;
-       case IOAT_VER_2_0:
-               writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF,
-                      ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
-               writel(((u64) desc->txd.phys) >> 32,
-                      ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
-
-               /* tell the engine to go with what's left to be done */
-               writew(ioat_chan->dmacount,
-                      ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
-
-               break;
-       }
-       dev_err(to_dev(ioat_chan),
-               "chan%d reset - %d descs waiting, %d total desc\n",
-               chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount);
-
-       spin_unlock_bh(&ioat_chan->desc_lock);
-       spin_unlock_bh(&ioat_chan->cleanup_lock);
-}
-
-/**
- * ioat_dma_reset_channel - restart a channel
- * @ioat_chan: IOAT DMA channel handle
- */
-static void ioat_dma_reset_channel(struct ioat_dma_chan *ioat_chan)
+static void ioat1_reset_channel(struct ioat_dma_chan *ioat)
 {
+       struct ioat_chan_common *chan = &ioat->base;
+       void __iomem *reg_base = chan->reg_base;
        u32 chansts, chanerr;
 
-       if (!ioat_chan->used_desc.prev)
-               return;
-
-       chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
-       chansts = (ioat_chan->completion_virt->low
-                                       & IOAT_CHANSTS_DMA_TRANSFER_STATUS);
+       dev_warn(to_dev(chan), "reset\n");
+       chanerr = readl(reg_base + IOAT_CHANERR_OFFSET);
+       chansts = *chan->completion & IOAT_CHANSTS_STATUS;
        if (chanerr) {
-               dev_err(to_dev(ioat_chan),
+               dev_err(to_dev(chan),
                        "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n",
-                       chan_num(ioat_chan), chansts, chanerr);
-               writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
+                       chan_num(chan), chansts, chanerr);
+               writel(chanerr, reg_base + IOAT_CHANERR_OFFSET);
        }
 
        /*
@@ -296,260 +223,69 @@ static void ioat_dma_reset_channel(struct ioat_dma_chan *ioat_chan)
         * while we're waiting.
         */
 
-       spin_lock_bh(&ioat_chan->desc_lock);
-       ioat_chan->pending = INT_MIN;
+       ioat->pending = INT_MIN;
        writeb(IOAT_CHANCMD_RESET,
-              ioat_chan->reg_base
-              + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
-       spin_unlock_bh(&ioat_chan->desc_lock);
-
-       /* schedule the 2nd half instead of sleeping a long time */
-       schedule_delayed_work(&ioat_chan->work, RESET_DELAY);
-}
-
-/**
- * ioat_dma_chan_watchdog - watch for stuck channels
- */
-static void ioat_dma_chan_watchdog(struct work_struct *work)
-{
-       struct ioatdma_device *device =
-               container_of(work, struct ioatdma_device, work.work);
-       struct ioat_dma_chan *ioat_chan;
-       int i;
-
-       union {
-               u64 full;
-               struct {
-                       u32 low;
-                       u32 high;
-               };
-       } completion_hw;
-       unsigned long compl_desc_addr_hw;
-
-       for (i = 0; i < device->common.chancnt; i++) {
-               ioat_chan = ioat_chan_by_index(device, i);
-
-               if (ioat_chan->device->version == IOAT_VER_1_2
-                       /* have we started processing anything yet */
-                   && ioat_chan->last_completion
-                       /* have we completed any since last watchdog cycle? */
-                   && (ioat_chan->last_completion ==
-                               ioat_chan->watchdog_completion)
-                       /* has TCP stuck on one cookie since last watchdog? */
-                   && (ioat_chan->watchdog_tcp_cookie ==
-                               ioat_chan->watchdog_last_tcp_cookie)
-                   && (ioat_chan->watchdog_tcp_cookie !=
-                               ioat_chan->completed_cookie)
-                       /* is there something in the chain to be processed? */
-                       /* CB1 chain always has at least the last one processed */
-                   && (ioat_chan->used_desc.prev != ioat_chan->used_desc.next)
-                   && ioat_chan->pending == 0) {
-
-                       /*
-                        * check CHANSTS register for completed
-                        * descriptor address.
-                        * if it is different than completion writeback,
-                        * it is not zero
-                        * and it has changed since the last watchdog
-                        *     we can assume that channel
-                        *     is still working correctly
-                        *     and the problem is in completion writeback.
-                        *     update completion writeback
-                        *     with actual CHANSTS value
-                        * else
-                        *     try resetting the channel
-                        */
-
-                       completion_hw.low = readl(ioat_chan->reg_base +
-                               IOAT_CHANSTS_OFFSET_LOW(ioat_chan->device->version));
-                       completion_hw.high = readl(ioat_chan->reg_base +
-                               IOAT_CHANSTS_OFFSET_HIGH(ioat_chan->device->version));
-#if (BITS_PER_LONG == 64)
-                       compl_desc_addr_hw =
-                               completion_hw.full
-                               & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
-#else
-                       compl_desc_addr_hw =
-                               completion_hw.low & IOAT_LOW_COMPLETION_MASK;
-#endif
-
-                       if ((compl_desc_addr_hw != 0)
-                          && (compl_desc_addr_hw != ioat_chan->watchdog_completion)
-                          && (compl_desc_addr_hw != ioat_chan->last_compl_desc_addr_hw)) {
-                               ioat_chan->last_compl_desc_addr_hw = compl_desc_addr_hw;
-                               ioat_chan->completion_virt->low = completion_hw.low;
-                               ioat_chan->completion_virt->high = completion_hw.high;
-                       } else {
-                               ioat_dma_reset_channel(ioat_chan);
-                               ioat_chan->watchdog_completion = 0;
-                               ioat_chan->last_compl_desc_addr_hw = 0;
-                       }
-
-               /*
-                * for version 2.0 if there are descriptors yet to be processed
-                * and the last completed hasn't changed since the last watchdog
-                *      if they haven't hit the pending level
-                *          issue the pending to push them through
-                *      else
-                *          try resetting the channel
-                */
-               } else if (ioat_chan->device->version == IOAT_VER_2_0
-                   && ioat_chan->used_desc.prev
-                   && ioat_chan->last_completion
-                   && ioat_chan->last_completion == ioat_chan->watchdog_completion) {
-
-                       if (ioat_chan->pending < ioat_pending_level)
-                               ioat2_dma_memcpy_issue_pending(&ioat_chan->common);
-                       else {
-                               ioat_dma_reset_channel(ioat_chan);
-                               ioat_chan->watchdog_completion = 0;
-                       }
-               } else {
-                       ioat_chan->last_compl_desc_addr_hw = 0;
-                       ioat_chan->watchdog_completion
-                                       = ioat_chan->last_completion;
-               }
-
-               ioat_chan->watchdog_last_tcp_cookie =
-                       ioat_chan->watchdog_tcp_cookie;
-       }
-
-       schedule_delayed_work(&device->work, WATCHDOG_DELAY);
+              reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
+       set_bit(IOAT_RESET_PENDING, &chan->state);
+       mod_timer(&chan->timer, jiffies + RESET_DELAY);
 }
 
 static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
 {
-       struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
+       struct dma_chan *c = tx->chan;
+       struct ioat_dma_chan *ioat = to_ioat_chan(c);
        struct ioat_desc_sw *desc = tx_to_ioat_desc(tx);
+       struct ioat_chan_common *chan = &ioat->base;
        struct ioat_desc_sw *first;
        struct ioat_desc_sw *chain_tail;
        dma_cookie_t cookie;
 
-       spin_lock_bh(&ioat_chan->desc_lock);
+       spin_lock_bh(&ioat->desc_lock);
        /* cookie incr and addition to used_list must be atomic */
-       cookie = ioat_chan->common.cookie;
+       cookie = c->cookie;
        cookie++;
        if (cookie < 0)
                cookie = 1;
-       ioat_chan->common.cookie = tx->cookie = cookie;
+       c->cookie = cookie;
+       tx->cookie = cookie;
+       dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie);
 
        /* write address into NextDescriptor field of last desc in chain */
-       first = to_ioat_desc(tx->tx_list.next);
-       chain_tail = to_ioat_desc(ioat_chan->used_desc.prev);
+       first = to_ioat_desc(desc->tx_list.next);
+       chain_tail = to_ioat_desc(ioat->used_desc.prev);
        /* make descriptor updates globally visible before chaining */
        wmb();
        chain_tail->hw->next = first->txd.phys;
-       list_splice_tail_init(&tx->tx_list, &ioat_chan->used_desc);
+       list_splice_tail_init(&desc->tx_list, &ioat->used_desc);
+       dump_desc_dbg(ioat, chain_tail);
+       dump_desc_dbg(ioat, first);
 
-       ioat_chan->dmacount += desc->tx_cnt;
-       ioat_chan->pending += desc->tx_cnt;
-       if (ioat_chan->pending >= ioat_pending_level)
-               __ioat1_dma_memcpy_issue_pending(ioat_chan);
-       spin_unlock_bh(&ioat_chan->desc_lock);
+       if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state))
+               mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
 
-       return cookie;
-}
-
-static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx)
-{
-       struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
-       struct ioat_desc_sw *first = tx_to_ioat_desc(tx);
-       struct ioat_desc_sw *new;
-       struct ioat_dma_descriptor *hw;
-       dma_cookie_t cookie;
-       u32 copy;
-       size_t len;
-       dma_addr_t src, dst;
-       unsigned long orig_flags;
-       unsigned int desc_count = 0;
-
-       /* src and dest and len are stored in the initial descriptor */
-       len = first->len;
-       src = first->src;
-       dst = first->dst;
-       orig_flags = first->txd.flags;
-       new = first;
-
-       /*
-        * ioat_chan->desc_lock is still in force in version 2 path
-        * it gets unlocked at end of this function
-        */
-       do {
-               copy = min_t(size_t, len, ioat_chan->xfercap);
-
-               async_tx_ack(&new->txd);
-
-               hw = new->hw;
-               hw->size = copy;
-               hw->ctl = 0;
-               hw->src_addr = src;
-               hw->dst_addr = dst;
-
-               len -= copy;
-               dst += copy;
-               src += copy;
-               desc_count++;
-       } while (len && (new = ioat2_dma_get_next_descriptor(ioat_chan)));
-
-       if (!new) {
-               dev_err(to_dev(ioat_chan), "tx submit failed\n");
-               spin_unlock_bh(&ioat_chan->desc_lock);
-               return -ENOMEM;
-       }
-
-       hw->ctl_f.compl_write = 1;
-       if (first->txd.callback) {
-               hw->ctl_f.int_en = 1;
-               if (first != new) {
-                       /* move callback into to last desc */
-                       new->txd.callback = first->txd.callback;
-                       new->txd.callback_param
-                                       = first->txd.callback_param;
-                       first->txd.callback = NULL;
-                       first->txd.callback_param = NULL;
-               }
-       }
-
-       new->tx_cnt = desc_count;
-       new->txd.flags = orig_flags; /* client is in control of this ack */
-
-       /* store the original values for use in later cleanup */
-       if (new != first) {
-               new->src = first->src;
-               new->dst = first->dst;
-               new->len = first->len;
-       }
-
-       /* cookie incr and addition to used_list must be atomic */
-       cookie = ioat_chan->common.cookie;
-       cookie++;
-       if (cookie < 0)
-               cookie = 1;
-       ioat_chan->common.cookie = new->txd.cookie = cookie;
-
-       ioat_chan->dmacount += desc_count;
-       ioat_chan->pending += desc_count;
-       if (ioat_chan->pending >= ioat_pending_level)
-               __ioat2_dma_memcpy_issue_pending(ioat_chan);
-       spin_unlock_bh(&ioat_chan->desc_lock);
+       ioat->active += desc->hw->tx_cnt;
+       ioat->pending += desc->hw->tx_cnt;
+       if (ioat->pending >= ioat_pending_level)
+               __ioat1_dma_memcpy_issue_pending(ioat);
+       spin_unlock_bh(&ioat->desc_lock);
 
        return cookie;
 }
 
 /**
  * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair
- * @ioat_chan: the channel supplying the memory pool for the descriptors
+ * @ioat: the channel supplying the memory pool for the descriptors
  * @flags: allocation flags
  */
 static struct ioat_desc_sw *
-ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat_chan, gfp_t flags)
+ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat, gfp_t flags)
 {
        struct ioat_dma_descriptor *desc;
        struct ioat_desc_sw *desc_sw;
        struct ioatdma_device *ioatdma_device;
        dma_addr_t phys;
 
-       ioatdma_device = to_ioatdma_device(ioat_chan->common.device);
+       ioatdma_device = ioat->base.device;
        desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys);
        if (unlikely(!desc))
                return NULL;
@@ -561,19 +297,13 @@ ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat_chan, gfp_t flags)
        }
 
        memset(desc, 0, sizeof(*desc));
-       dma_async_tx_descriptor_init(&desc_sw->txd, &ioat_chan->common);
-       switch (ioat_chan->device->version) {
-       case IOAT_VER_1_2:
-               desc_sw->txd.tx_submit = ioat1_tx_submit;
-               break;
-       case IOAT_VER_2_0:
-       case IOAT_VER_3_0:
-               desc_sw->txd.tx_submit = ioat2_tx_submit;
-               break;
-       }
 
+       INIT_LIST_HEAD(&desc_sw->tx_list);
+       dma_async_tx_descriptor_init(&desc_sw->txd, &ioat->base.common);
+       desc_sw->txd.tx_submit = ioat1_tx_submit;
        desc_sw->hw = desc;
        desc_sw->txd.phys = phys;
+       set_desc_id(desc_sw, -1);
 
        return desc_sw;
 }
@@ -581,296 +311,164 @@ ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat_chan, gfp_t flags)
 static int ioat_initial_desc_count = 256;
 module_param(ioat_initial_desc_count, int, 0644);
 MODULE_PARM_DESC(ioat_initial_desc_count,
-                "initial descriptors per channel (default: 256)");
-
-/**
- * ioat2_dma_massage_chan_desc - link the descriptors into a circle
- * @ioat_chan: the channel to be massaged
- */
-static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat_chan)
-{
-       struct ioat_desc_sw *desc, *_desc;
-
-       /* setup used_desc */
-       ioat_chan->used_desc.next = ioat_chan->free_desc.next;
-       ioat_chan->used_desc.prev = NULL;
-
-       /* pull free_desc out of the circle so that every node is a hw
-        * descriptor, but leave it pointing to the list
-        */
-       ioat_chan->free_desc.prev->next = ioat_chan->free_desc.next;
-       ioat_chan->free_desc.next->prev = ioat_chan->free_desc.prev;
-
-       /* circle link the hw descriptors */
-       desc = to_ioat_desc(ioat_chan->free_desc.next);
-       desc->hw->next = to_ioat_desc(desc->node.next)->txd.phys;
-       list_for_each_entry_safe(desc, _desc, ioat_chan->free_desc.next, node) {
-               desc->hw->next = to_ioat_desc(desc->node.next)->txd.phys;
-       }
-}
-
+                "ioat1: initial descriptors per channel (default: 256)");
 /**
- * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors
+ * ioat1_dma_alloc_chan_resources - returns the number of allocated descriptors
  * @chan: the channel to be filled out
  */
-static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
+static int ioat1_dma_alloc_chan_resources(struct dma_chan *c)
 {
-       struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
+       struct ioat_dma_chan *ioat = to_ioat_chan(c);
+       struct ioat_chan_common *chan = &ioat->base;
        struct ioat_desc_sw *desc;
-       u16 chanctrl;
        u32 chanerr;
        int i;
        LIST_HEAD(tmp_list);
 
        /* have we already been set up? */
-       if (!list_empty(&ioat_chan->free_desc))
-               return ioat_chan->desccount;
+       if (!list_empty(&ioat->free_desc))
+               return ioat->desccount;
 
        /* Setup register to interrupt and write completion status on error */
-       chanctrl = IOAT_CHANCTRL_ERR_INT_EN |
-               IOAT_CHANCTRL_ANY_ERR_ABORT_EN |
-               IOAT_CHANCTRL_ERR_COMPLETION_EN;
-       writew(chanctrl, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
+       writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET);
 
-       chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
+       chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
        if (chanerr) {
-               dev_err(to_dev(ioat_chan), "CHANERR = %x, clearing\n", chanerr);
-               writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
+               dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr);
+               writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
        }
 
        /* Allocate descriptors */
        for (i = 0; i < ioat_initial_desc_count; i++) {
-               desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL);
+               desc = ioat_dma_alloc_descriptor(ioat, GFP_KERNEL);
                if (!desc) {
-                       dev_err(to_dev(ioat_chan),
-                               "Only %d initial descriptors\n", i);
+                       dev_err(to_dev(chan), "Only %d initial descriptors\n", i);
                        break;
                }
+               set_desc_id(desc, i);
                list_add_tail(&desc->node, &tmp_list);
        }
-       spin_lock_bh(&ioat_chan->desc_lock);
-       ioat_chan->desccount = i;
-       list_splice(&tmp_list, &ioat_chan->free_desc);
-       if (ioat_chan->device->version != IOAT_VER_1_2)
-               ioat2_dma_massage_chan_desc(ioat_chan);
-       spin_unlock_bh(&ioat_chan->desc_lock);
+       spin_lock_bh(&ioat->desc_lock);
+       ioat->desccount = i;
+       list_splice(&tmp_list, &ioat->free_desc);
+       spin_unlock_bh(&ioat->desc_lock);
 
        /* allocate a completion writeback area */
        /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
-       ioat_chan->completion_virt =
-               pci_pool_alloc(ioat_chan->device->completion_pool,
-                              GFP_KERNEL,
-                              &ioat_chan->completion_addr);
-       memset(ioat_chan->completion_virt, 0,
-              sizeof(*ioat_chan->completion_virt));
-       writel(((u64) ioat_chan->completion_addr) & 0x00000000FFFFFFFF,
-              ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
-       writel(((u64) ioat_chan->completion_addr) >> 32,
-              ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
-
-       tasklet_enable(&ioat_chan->cleanup_task);
-       ioat_dma_start_null_desc(ioat_chan);  /* give chain to dma device */
-       return ioat_chan->desccount;
+       chan->completion = pci_pool_alloc(chan->device->completion_pool,
+                                         GFP_KERNEL, &chan->completion_dma);
+       memset(chan->completion, 0, sizeof(*chan->completion));
+       writel(((u64) chan->completion_dma) & 0x00000000FFFFFFFF,
+              chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
+       writel(((u64) chan->completion_dma) >> 32,
+              chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
+
+       tasklet_enable(&chan->cleanup_task);
+       ioat1_dma_start_null_desc(ioat);  /* give chain to dma device */
+       dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n",
+               __func__, ioat->desccount);
+       return ioat->desccount;
 }
 
 /**
- * ioat_dma_free_chan_resources - release all the descriptors
+ * ioat1_dma_free_chan_resources - release all the descriptors
  * @chan: the channel to be cleaned
  */
-static void ioat_dma_free_chan_resources(struct dma_chan *chan)
+static void ioat1_dma_free_chan_resources(struct dma_chan *c)
 {
-       struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
-       struct ioatdma_device *ioatdma_device = to_ioatdma_device(chan->device);
+       struct ioat_dma_chan *ioat = to_ioat_chan(c);
+       struct ioat_chan_common *chan = &ioat->base;
+       struct ioatdma_device *ioatdma_device = chan->device;
        struct ioat_desc_sw *desc, *_desc;
        int in_use_descs = 0;
 
        /* Before freeing channel resources first check
         * if they have been previously allocated for this channel.
         */
-       if (ioat_chan->desccount == 0)
+       if (ioat->desccount == 0)
                return;
 
-       tasklet_disable(&ioat_chan->cleanup_task);
-       ioat_dma_memcpy_cleanup(ioat_chan);
+       tasklet_disable(&chan->cleanup_task);
+       del_timer_sync(&chan->timer);
+       ioat1_cleanup(ioat);
 
        /* Delay 100ms after reset to allow internal DMA logic to quiesce
         * before removing DMA descriptor resources.
         */
        writeb(IOAT_CHANCMD_RESET,
-              ioat_chan->reg_base
-                       + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
+              chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
        mdelay(100);
 
-       spin_lock_bh(&ioat_chan->desc_lock);
-       switch (ioat_chan->device->version) {
-       case IOAT_VER_1_2:
-               list_for_each_entry_safe(desc, _desc,
-                                        &ioat_chan->used_desc, node) {
-                       in_use_descs++;
-                       list_del(&desc->node);
-                       pci_pool_free(ioatdma_device->dma_pool, desc->hw,
-                                     desc->txd.phys);
-                       kfree(desc);
-               }
-               list_for_each_entry_safe(desc, _desc,
-                                        &ioat_chan->free_desc, node) {
-                       list_del(&desc->node);
-                       pci_pool_free(ioatdma_device->dma_pool, desc->hw,
-                                     desc->txd.phys);
-                       kfree(desc);
-               }
-               break;
-       case IOAT_VER_2_0:
-       case IOAT_VER_3_0:
-               list_for_each_entry_safe(desc, _desc,
-                                        ioat_chan->free_desc.next, node) {
-                       list_del(&desc->node);
-                       pci_pool_free(ioatdma_device->dma_pool, desc->hw,
-                                     desc->txd.phys);
-                       kfree(desc);
-               }
-               desc = to_ioat_desc(ioat_chan->free_desc.next);
+       spin_lock_bh(&ioat->desc_lock);
+       list_for_each_entry_safe(desc, _desc, &ioat->used_desc, node) {
+               dev_dbg(to_dev(chan), "%s: freeing %d from used list\n",
+                       __func__, desc_id(desc));
+               dump_desc_dbg(ioat, desc);
+               in_use_descs++;
+               list_del(&desc->node);
+               pci_pool_free(ioatdma_device->dma_pool, desc->hw,
+                             desc->txd.phys);
+               kfree(desc);
+       }
+       list_for_each_entry_safe(desc, _desc,
+                                &ioat->free_desc, node) {
+               list_del(&desc->node);
                pci_pool_free(ioatdma_device->dma_pool, desc->hw,
                              desc->txd.phys);
                kfree(desc);
-               INIT_LIST_HEAD(&ioat_chan->free_desc);
-               INIT_LIST_HEAD(&ioat_chan->used_desc);
-               break;
        }
-       spin_unlock_bh(&ioat_chan->desc_lock);
+       spin_unlock_bh(&ioat->desc_lock);
 
        pci_pool_free(ioatdma_device->completion_pool,
-                     ioat_chan->completion_virt,
-                     ioat_chan->completion_addr);
+                     chan->completion,
+                     chan->completion_dma);
 
        /* one is ok since we left it on there on purpose */
        if (in_use_descs > 1)
-               dev_err(to_dev(ioat_chan), "Freeing %d in use descriptors!\n",
+               dev_err(to_dev(chan), "Freeing %d in use descriptors!\n",
                        in_use_descs - 1);
 
-       ioat_chan->last_completion = ioat_chan->completion_addr = 0;
-       ioat_chan->pending = 0;
-       ioat_chan->dmacount = 0;
-       ioat_chan->desccount = 0;
-       ioat_chan->watchdog_completion = 0;
-       ioat_chan->last_compl_desc_addr_hw = 0;
-       ioat_chan->watchdog_tcp_cookie =
-               ioat_chan->watchdog_last_tcp_cookie = 0;
+       chan->last_completion = 0;
+       chan->completion_dma = 0;
+       ioat->pending = 0;
+       ioat->desccount = 0;
 }
 
 /**
- * ioat_dma_get_next_descriptor - return the next available descriptor
- * @ioat_chan: IOAT DMA channel handle
+ * ioat1_dma_get_next_descriptor - return the next available descriptor
+ * @ioat: IOAT DMA channel handle
  *
  * Gets the next descriptor from the chain, and must be called with the
  * channel's desc_lock held.  Allocates more descriptors if the channel
  * has run out.
  */
 static struct ioat_desc_sw *
-ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
+ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat)
 {
        struct ioat_desc_sw *new;
 
-       if (!list_empty(&ioat_chan->free_desc)) {
-               new = to_ioat_desc(ioat_chan->free_desc.next);
+       if (!list_empty(&ioat->free_desc)) {
+               new = to_ioat_desc(ioat->free_desc.next);
                list_del(&new->node);
        } else {
                /* try to get another desc */
-               new = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);
+               new = ioat_dma_alloc_descriptor(ioat, GFP_ATOMIC);
                if (!new) {
-                       dev_err(to_dev(ioat_chan), "alloc failed\n");
+                       dev_err(to_dev(&ioat->base), "alloc failed\n");
                        return NULL;
                }
        }
-
-       prefetch(new->hw);
-       return new;
-}
-
-static struct ioat_desc_sw *
-ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
-{
-       struct ioat_desc_sw *new;
-
-       /*
-        * used.prev points to where to start processing
-        * used.next points to next free descriptor
-        * if used.prev == NULL, there are none waiting to be processed
-        * if used.next == used.prev.prev, there is only one free descriptor,
-        *      and we need to use it to as a noop descriptor before
-        *      linking in a new set of descriptors, since the device
-        *      has probably already read the pointer to it
-        */
-       if (ioat_chan->used_desc.prev &&
-           ioat_chan->used_desc.next == ioat_chan->used_desc.prev->prev) {
-
-               struct ioat_desc_sw *desc;
-               struct ioat_desc_sw *noop_desc;
-               int i;
-
-               /* set up the noop descriptor */
-               noop_desc = to_ioat_desc(ioat_chan->used_desc.next);
-               /* set size to non-zero value (channel returns error when size is 0) */
-               noop_desc->hw->size = NULL_DESC_BUFFER_SIZE;
-               noop_desc->hw->ctl = 0;
-               noop_desc->hw->ctl_f.null = 1;
-               noop_desc->hw->src_addr = 0;
-               noop_desc->hw->dst_addr = 0;
-
-               ioat_chan->used_desc.next = ioat_chan->used_desc.next->next;
-               ioat_chan->pending++;
-               ioat_chan->dmacount++;
-
-               /* try to get a few more descriptors */
-               for (i = 16; i; i--) {
-                       desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);
-                       if (!desc) {
-                               dev_err(to_dev(ioat_chan), "alloc failed\n");
-                               break;
-                       }
-                       list_add_tail(&desc->node, ioat_chan->used_desc.next);
-
-                       desc->hw->next
-                               = to_ioat_desc(desc->node.next)->txd.phys;
-                       to_ioat_desc(desc->node.prev)->hw->next
-                               = desc->txd.phys;
-                       ioat_chan->desccount++;
-               }
-
-               ioat_chan->used_desc.next = noop_desc->node.next;
-       }
-       new = to_ioat_desc(ioat_chan->used_desc.next);
-       prefetch(new);
-       ioat_chan->used_desc.next = new->node.next;
-
-       if (ioat_chan->used_desc.prev == NULL)
-               ioat_chan->used_desc.prev = &new->node;
-
+       dev_dbg(to_dev(&ioat->base), "%s: allocated: %d\n",
+               __func__, desc_id(new));
        prefetch(new->hw);
        return new;
 }
 
-static struct ioat_desc_sw *
-ioat_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
-{
-       if (!ioat_chan)
-               return NULL;
-
-       switch (ioat_chan->device->version) {
-       case IOAT_VER_1_2:
-               return ioat1_dma_get_next_descriptor(ioat_chan);
-       case IOAT_VER_2_0:
-       case IOAT_VER_3_0:
-               return ioat2_dma_get_next_descriptor(ioat_chan);
-       }
-       return NULL;
-}
-
 static struct dma_async_tx_descriptor *
-ioat1_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
+ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
                      dma_addr_t dma_src, size_t len, unsigned long flags)
 {
-       struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
+       struct ioat_dma_chan *ioat = to_ioat_chan(c);
        struct ioat_desc_sw *desc;
        size_t copy;
        LIST_HEAD(chain);
@@ -880,14 +478,14 @@ ioat1_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
        struct ioat_dma_descriptor *hw = NULL;
        int tx_cnt = 0;
 
-       spin_lock_bh(&ioat_chan->desc_lock);
-       desc = ioat_dma_get_next_descriptor(ioat_chan);
+       spin_lock_bh(&ioat->desc_lock);
+       desc = ioat1_dma_get_next_descriptor(ioat);
        do {
                if (!desc)
                        break;
 
                tx_cnt++;
-               copy = min_t(size_t, len, ioat_chan->xfercap);
+               copy = min_t(size_t, len, ioat->xfercap);
 
                hw = desc->hw;
                hw->size = copy;
@@ -904,319 +502,266 @@ ioat1_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
                        struct ioat_desc_sw *next;
 
                        async_tx_ack(&desc->txd);
-                       next = ioat_dma_get_next_descriptor(ioat_chan);
+                       next = ioat1_dma_get_next_descriptor(ioat);
                        hw->next = next ? next->txd.phys : 0;
+                       dump_desc_dbg(ioat, desc);
                        desc = next;
                } else
                        hw->next = 0;
        } while (len);
 
        if (!desc) {
-               dev_err(to_dev(ioat_chan),
-                       "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n",
-                       chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount);
-               list_splice(&chain, &ioat_chan->free_desc);
-               spin_unlock_bh(&ioat_chan->desc_lock);
+               struct ioat_chan_common *chan = &ioat->base;
+
+               dev_err(to_dev(chan),
+                       "chan%d - get_next_desc failed\n", chan_num(chan));
+               list_splice(&chain, &ioat->free_desc);
+               spin_unlock_bh(&ioat->desc_lock);
                return NULL;
        }
-       spin_unlock_bh(&ioat_chan->desc_lock);
+       spin_unlock_bh(&ioat->desc_lock);
 
        desc->txd.flags = flags;
-       desc->tx_cnt = tx_cnt;
-       desc->src = dma_src;
-       desc->dst = dma_dest;
        desc->len = total_len;
-       list_splice(&chain, &desc->txd.tx_list);
+       list_splice(&chain, &desc->tx_list);
        hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
        hw->ctl_f.compl_write = 1;
+       hw->tx_cnt = tx_cnt;
+       dump_desc_dbg(ioat, desc);
 
        return &desc->txd;
 }
 
-static struct dma_async_tx_descriptor *
-ioat2_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
-                     dma_addr_t dma_src, size_t len, unsigned long flags)
+static void ioat1_cleanup_tasklet(unsigned long data)
 {
-       struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
-       struct ioat_desc_sw *new;
-
-       spin_lock_bh(&ioat_chan->desc_lock);
-       new = ioat2_dma_get_next_descriptor(ioat_chan);
-
-       /*
-        * leave ioat_chan->desc_lock set in ioat 2 path
-        * it will get unlocked at end of tx_submit
-        */
+       struct ioat_dma_chan *chan = (void *)data;
 
-       if (new) {
-               new->len = len;
-               new->dst = dma_dest;
-               new->src = dma_src;
-               new->txd.flags = flags;
-               return &new->txd;
-       } else {
-               spin_unlock_bh(&ioat_chan->desc_lock);
-               dev_err(to_dev(ioat_chan),
-                       "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n",
-                       chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount);
-               return NULL;
-       }
+       ioat1_cleanup(chan);
+       writew(IOAT_CHANCTRL_RUN, chan->base.reg_base + IOAT_CHANCTRL_OFFSET);
 }
 
-static void ioat_dma_cleanup_tasklet(unsigned long data)
+void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
+                   size_t len, struct ioat_dma_descriptor *hw)
 {
-       struct ioat_dma_chan *chan = (void *)data;
-       ioat_dma_memcpy_cleanup(chan);
-       writew(IOAT_CHANCTRL_INT_DISABLE,
-              chan->reg_base + IOAT_CHANCTRL_OFFSET);
-}
+       struct pci_dev *pdev = chan->device->pdev;
+       size_t offset = len - hw->size;
 
-static void
-ioat_dma_unmap(struct ioat_dma_chan *ioat_chan, struct ioat_desc_sw *desc)
-{
-       if (!(desc->txd.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
-               if (desc->txd.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
-                       pci_unmap_single(ioat_chan->device->pdev,
-                                        pci_unmap_addr(desc, dst),
-                                        pci_unmap_len(desc, len),
-                                        PCI_DMA_FROMDEVICE);
-               else
-                       pci_unmap_page(ioat_chan->device->pdev,
-                                      pci_unmap_addr(desc, dst),
-                                      pci_unmap_len(desc, len),
-                                      PCI_DMA_FROMDEVICE);
-       }
+       if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP))
+               ioat_unmap(pdev, hw->dst_addr - offset, len,
+                          PCI_DMA_FROMDEVICE, flags, 1);
 
-       if (!(desc->txd.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
-               if (desc->txd.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
-                       pci_unmap_single(ioat_chan->device->pdev,
-                                        pci_unmap_addr(desc, src),
-                                        pci_unmap_len(desc, len),
-                                        PCI_DMA_TODEVICE);
-               else
-                       pci_unmap_page(ioat_chan->device->pdev,
-                                      pci_unmap_addr(desc, src),
-                                      pci_unmap_len(desc, len),
-                                      PCI_DMA_TODEVICE);
-       }
+       if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP))
+               ioat_unmap(pdev, hw->src_addr - offset, len,
+                          PCI_DMA_TODEVICE, flags, 0);
 }
 
-/**
- * ioat_dma_memcpy_cleanup - cleanup up finished descriptors
- * @chan: ioat channel to be cleaned up
- */
-static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
+unsigned long ioat_get_current_completion(struct ioat_chan_common *chan)
 {
        unsigned long phys_complete;
-       struct ioat_desc_sw *desc, *_desc;
-       dma_cookie_t cookie = 0;
-       unsigned long desc_phys;
-       struct ioat_desc_sw *latest_desc;
-       struct dma_async_tx_descriptor *tx;
+       u64 completion;
 
-       prefetch(ioat_chan->completion_virt);
+       completion = *chan->completion;
+       phys_complete = ioat_chansts_to_addr(completion);
 
-       if (!spin_trylock_bh(&ioat_chan->cleanup_lock))
-               return;
+       dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__,
+               (unsigned long long) phys_complete);
 
-       /* The completion writeback can happen at any time,
-          so reads by the driver need to be atomic operations
-          The descriptor physical addresses are limited to 32-bits
-          when the CPU can only do a 32-bit mov */
-
-#if (BITS_PER_LONG == 64)
-       phys_complete =
-               ioat_chan->completion_virt->full
-               & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
-#else
-       phys_complete =
-               ioat_chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK;
-#endif
-
-       if ((ioat_chan->completion_virt->full
-               & IOAT_CHANSTS_DMA_TRANSFER_STATUS) ==
-                               IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) {
-               dev_err(to_dev(ioat_chan), "Channel halted, chanerr = %x\n",
-                       readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET));
+       if (is_ioat_halted(completion)) {
+               u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
+               dev_err(to_dev(chan), "Channel halted, chanerr = %x\n",
+                       chanerr);
 
                /* TODO do something to salvage the situation */
        }
 
-       if (phys_complete == ioat_chan->last_completion) {
-               spin_unlock_bh(&ioat_chan->cleanup_lock);
+       return phys_complete;
+}
+
+bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
+                          unsigned long *phys_complete)
+{
+       *phys_complete = ioat_get_current_completion(chan);
+       if (*phys_complete == chan->last_completion)
+               return false;
+       clear_bit(IOAT_COMPLETION_ACK, &chan->state);
+       mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
+
+       return true;
+}
+
+static void __cleanup(struct ioat_dma_chan *ioat, unsigned long phys_complete)
+{
+       struct ioat_chan_common *chan = &ioat->base;
+       struct list_head *_desc, *n;
+       struct dma_async_tx_descriptor *tx;
+
+       dev_dbg(to_dev(chan), "%s: phys_complete: %lx\n",
+                __func__, phys_complete);
+       list_for_each_safe(_desc, n, &ioat->used_desc) {
+               struct ioat_desc_sw *desc;
+
+               prefetch(n);
+               desc = list_entry(_desc, typeof(*desc), node);
+               tx = &desc->txd;
                /*
-                * perhaps we're stuck so hard that the watchdog can't go off?
-                * try to catch it after 2 seconds
+                * Incoming DMA requests may use multiple descriptors,
+                * due to exceeding xfercap, perhaps. If so, only the
+                * last one will have a cookie, and require unmapping.
                 */
-               if (ioat_chan->device->version != IOAT_VER_3_0) {
-                       if (time_after(jiffies,
-                                      ioat_chan->last_completion_time + HZ*WATCHDOG_DELAY)) {
-                               ioat_dma_chan_watchdog(&(ioat_chan->device->work.work));
-                               ioat_chan->last_completion_time = jiffies;
+               dump_desc_dbg(ioat, desc);
+               if (tx->cookie) {
+                       chan->completed_cookie = tx->cookie;
+                       tx->cookie = 0;
+                       ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw);
+                       ioat->active -= desc->hw->tx_cnt;
+                       if (tx->callback) {
+                               tx->callback(tx->callback_param);
+                               tx->callback = NULL;
                        }
                }
-               return;
-       }
-       ioat_chan->last_completion_time = jiffies;
-
-       cookie = 0;
-       if (!spin_trylock_bh(&ioat_chan->desc_lock)) {
-               spin_unlock_bh(&ioat_chan->cleanup_lock);
-               return;
-       }
 
-       switch (ioat_chan->device->version) {
-       case IOAT_VER_1_2:
-               list_for_each_entry_safe(desc, _desc,
-                                        &ioat_chan->used_desc, node) {
-                       tx = &desc->txd;
+               if (tx->phys != phys_complete) {
                        /*
-                        * Incoming DMA requests may use multiple descriptors,
-                        * due to exceeding xfercap, perhaps. If so, only the
-                        * last one will have a cookie, and require unmapping.
+                        * a completed entry, but not the last, so clean
+                        * up if the client is done with the descriptor
+                        */
+                       if (async_tx_test_ack(tx))
+                               list_move_tail(&desc->node, &ioat->free_desc);
+               } else {
+                       /*
+                        * last used desc. Do not remove, so we can
+                        * append from it.
                         */
-                       if (tx->cookie) {
-                               cookie = tx->cookie;
-                               ioat_dma_unmap(ioat_chan, desc);
-                               if (tx->callback) {
-                                       tx->callback(tx->callback_param);
-                                       tx->callback = NULL;
-                               }
-                       }
 
-                       if (tx->phys != phys_complete) {
-                               /*
-                                * a completed entry, but not the last, so clean
-                                * up if the client is done with the descriptor
-                                */
-                               if (async_tx_test_ack(tx)) {
-                                       list_move_tail(&desc->node,
-                                                      &ioat_chan->free_desc);
-                               } else
-                                       tx->cookie = 0;
-                       } else {
-                               /*
-                                * last used desc. Do not remove, so we can
-                                * append from it, but don't look at it next
-                                * time, either
-                                */
-                               tx->cookie = 0;
-
-                               /* TODO check status bits? */
-                               break;
+                       /* if nothing else is pending, cancel the
+                        * completion timeout
+                        */
+                       if (n == &ioat->used_desc) {
+                               dev_dbg(to_dev(chan),
+                                       "%s cancel completion timeout\n",
+                                       __func__);
+                               clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
                        }
-               }
-               break;
-       case IOAT_VER_2_0:
-       case IOAT_VER_3_0:
-               /* has some other thread has already cleaned up? */
-               if (ioat_chan->used_desc.prev == NULL)
+
+                       /* TODO check status bits? */
                        break;
+               }
+       }
 
-               /* work backwards to find latest finished desc */
-               desc = to_ioat_desc(ioat_chan->used_desc.next);
-               tx = &desc->txd;
-               latest_desc = NULL;
-               do {
-                       desc = to_ioat_desc(desc->node.prev);
-                       desc_phys = (unsigned long)tx->phys
-                                      & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
-                       if (desc_phys == phys_complete) {
-                               latest_desc = desc;
-                               break;
-                       }
-               } while (&desc->node != ioat_chan->used_desc.prev);
-
-               if (latest_desc != NULL) {
-                       /* work forwards to clear finished descriptors */
-                       for (desc = to_ioat_desc(ioat_chan->used_desc.prev);
-                            &desc->node != latest_desc->node.next &&
-                            &desc->node != ioat_chan->used_desc.next;
-                            desc = to_ioat_desc(desc->node.next)) {
-                               if (tx->cookie) {
-                                       cookie = tx->cookie;
-                                       tx->cookie = 0;
-                                       ioat_dma_unmap(ioat_chan, desc);
-                                       if (tx->callback) {
-                                               tx->callback(tx->callback_param);
-                                               tx->callback = NULL;
-                                       }
-                               }
-                       }
+       chan->last_completion = phys_complete;
+}
 
-                       /* move used.prev up beyond those that are finished */
-                       if (&desc->node == ioat_chan->used_desc.next)
-                               ioat_chan->used_desc.prev = NULL;
-                       else
-                               ioat_chan->used_desc.prev = &desc->node;
-               }
-               break;
+/**
+ * ioat1_cleanup - cleanup up finished descriptors
+ * @chan: ioat channel to be cleaned up
+ *
+ * To prevent lock contention we defer cleanup when the locks are
+ * contended with a terminal timeout that forces cleanup and catches
+ * completion notification errors.
+ */
+static void ioat1_cleanup(struct ioat_dma_chan *ioat)
+{
+       struct ioat_chan_common *chan = &ioat->base;
+       unsigned long phys_complete;
+
+       prefetch(chan->completion);
+
+       if (!spin_trylock_bh(&chan->cleanup_lock))
+               return;
+
+       if (!ioat_cleanup_preamble(chan, &phys_complete)) {
+               spin_unlock_bh(&chan->cleanup_lock);
+               return;
        }
 
-       spin_unlock_bh(&ioat_chan->desc_lock);
+       if (!spin_trylock_bh(&ioat->desc_lock)) {
+               spin_unlock_bh(&chan->cleanup_lock);
+               return;
+       }
 
-       ioat_chan->last_completion = phys_complete;
-       if (cookie != 0)
-               ioat_chan->completed_cookie = cookie;
+       __cleanup(ioat, phys_complete);
 
-       spin_unlock_bh(&ioat_chan->cleanup_lock);
+       spin_unlock_bh(&ioat->desc_lock);
+       spin_unlock_bh(&chan->cleanup_lock);
 }
 
-/**
- * ioat_dma_is_complete - poll the status of a IOAT DMA transaction
- * @chan: IOAT DMA channel handle
- * @cookie: DMA transaction identifier
- * @done: if not %NULL, updated with last completed transaction
- * @used: if not %NULL, updated with last used transaction
- */
-static enum dma_status
-ioat_dma_is_complete(struct dma_chan *chan, dma_cookie_t cookie,
-                    dma_cookie_t *done, dma_cookie_t *used)
+static void ioat1_timer_event(unsigned long data)
 {
-       struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
-       dma_cookie_t last_used;
-       dma_cookie_t last_complete;
-       enum dma_status ret;
+       struct ioat_dma_chan *ioat = (void *) data;
+       struct ioat_chan_common *chan = &ioat->base;
 
-       last_used = chan->cookie;
-       last_complete = ioat_chan->completed_cookie;
-       ioat_chan->watchdog_tcp_cookie = cookie;
+       dev_dbg(to_dev(chan), "%s: state: %lx\n", __func__, chan->state);
 
-       if (done)
-               *done = last_complete;
-       if (used)
-               *used = last_used;
+       spin_lock_bh(&chan->cleanup_lock);
+       if (test_and_clear_bit(IOAT_RESET_PENDING, &chan->state)) {
+               struct ioat_desc_sw *desc;
 
-       ret = dma_async_is_complete(cookie, last_complete, last_used);
-       if (ret == DMA_SUCCESS)
-               return ret;
+               spin_lock_bh(&ioat->desc_lock);
 
-       ioat_dma_memcpy_cleanup(ioat_chan);
+               /* restart active descriptors */
+               desc = to_ioat_desc(ioat->used_desc.prev);
+               ioat_set_chainaddr(ioat, desc->txd.phys);
+               ioat_start(chan);
 
-       last_used = chan->cookie;
-       last_complete = ioat_chan->completed_cookie;
+               ioat->pending = 0;
+               set_bit(IOAT_COMPLETION_PENDING, &chan->state);
+               mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
+               spin_unlock_bh(&ioat->desc_lock);
+       } else if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
+               unsigned long phys_complete;
 
-       if (done)
-               *done = last_complete;
-       if (used)
-               *used = last_used;
+               spin_lock_bh(&ioat->desc_lock);
+               /* if we haven't made progress and we have already
+                * acknowledged a pending completion once, then be more
+                * forceful with a restart
+                */
+               if (ioat_cleanup_preamble(chan, &phys_complete))
+                       __cleanup(ioat, phys_complete);
+               else if (test_bit(IOAT_COMPLETION_ACK, &chan->state))
+                       ioat1_reset_channel(ioat);
+               else {
+                       u64 status = ioat_chansts(chan);
+
+                       /* manually update the last completion address */
+                       if (ioat_chansts_to_addr(status) != 0)
+                               *chan->completion = status;
+
+                       set_bit(IOAT_COMPLETION_ACK, &chan->state);
+                       mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
+               }
+               spin_unlock_bh(&ioat->desc_lock);
+       }
+       spin_unlock_bh(&chan->cleanup_lock);
+}
+
+static enum dma_status
+ioat1_dma_is_complete(struct dma_chan *c, dma_cookie_t cookie,
+                     dma_cookie_t *done, dma_cookie_t *used)
+{
+       struct ioat_dma_chan *ioat = to_ioat_chan(c);
 
-       return dma_async_is_complete(cookie, last_complete, last_used);
+       if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS)
+               return DMA_SUCCESS;
+
+       ioat1_cleanup(ioat);
+
+       return ioat_is_complete(c, cookie, done, used);
 }
 
-static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
+static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat)
 {
+       struct ioat_chan_common *chan = &ioat->base;
        struct ioat_desc_sw *desc;
        struct ioat_dma_descriptor *hw;
 
-       spin_lock_bh(&ioat_chan->desc_lock);
+       spin_lock_bh(&ioat->desc_lock);
 
-       desc = ioat_dma_get_next_descriptor(ioat_chan);
+       desc = ioat1_dma_get_next_descriptor(ioat);
 
        if (!desc) {
-               dev_err(to_dev(ioat_chan),
+               dev_err(to_dev(chan),
                        "Unable to start null desc - get next desc failed\n");
-               spin_unlock_bh(&ioat_chan->desc_lock);
+               spin_unlock_bh(&ioat->desc_lock);
                return;
        }
 
@@ -1230,31 +775,13 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
        hw->src_addr = 0;
        hw->dst_addr = 0;
        async_tx_ack(&desc->txd);
-       switch (ioat_chan->device->version) {
-       case IOAT_VER_1_2:
-               hw->next = 0;
-               list_add_tail(&desc->node, &ioat_chan->used_desc);
-
-               writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF,
-                      ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
-               writel(((u64) desc->txd.phys) >> 32,
-                      ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
-
-               writeb(IOAT_CHANCMD_START, ioat_chan->reg_base
-                       + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
-               break;
-       case IOAT_VER_2_0:
-       case IOAT_VER_3_0:
-               writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF,
-                      ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
-               writel(((u64) desc->txd.phys) >> 32,
-                      ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
-
-               ioat_chan->dmacount++;
-               __ioat2_dma_memcpy_issue_pending(ioat_chan);
-               break;
-       }
-       spin_unlock_bh(&ioat_chan->desc_lock);
+       hw->next = 0;
+       list_add_tail(&desc->node, &ioat->used_desc);
+       dump_desc_dbg(ioat, desc);
+
+       ioat_set_chainaddr(ioat, desc->txd.phys);
+       ioat_start(chan);
+       spin_unlock_bh(&ioat->desc_lock);
 }
 
 /*
@@ -1262,7 +789,7 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
  */
 #define IOAT_TEST_SIZE 2000
 
-static void ioat_dma_test_callback(void *dma_async_param)
+static void __devinit ioat_dma_test_callback(void *dma_async_param)
 {
        struct completion *cmp = dma_async_param;
 
@@ -1273,7 +800,7 @@ static void ioat_dma_test_callback(void *dma_async_param)
  * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
  * @device: device to be tested
  */
-static int ioat_dma_self_test(struct ioatdma_device *device)
+int __devinit ioat_dma_self_test(struct ioatdma_device *device)
 {
        int i;
        u8 *src;
@@ -1371,7 +898,7 @@ MODULE_PARM_DESC(ioat_interrupt_style,
  */
 static int ioat_dma_setup_interrupts(struct ioatdma_device *device)
 {
-       struct ioat_dma_chan *ioat_chan;
+       struct ioat_chan_common *chan;
        struct pci_dev *pdev = device->pdev;
        struct device *dev = &pdev->dev;
        struct msix_entry *msix;
@@ -1404,15 +931,15 @@ msix:
 
        for (i = 0; i < msixcnt; i++) {
                msix = &device->msix_entries[i];
-               ioat_chan = ioat_chan_by_index(device, i);
+               chan = ioat_chan_by_index(device, i);
                err = devm_request_irq(dev, msix->vector,
                                       ioat_dma_do_interrupt_msix, 0,
-                                      "ioat-msix", ioat_chan);
+                                      "ioat-msix", chan);
                if (err) {
                        for (j = 0; j < i; j++) {
                                msix = &device->msix_entries[j];
-                               ioat_chan = ioat_chan_by_index(device, j);
-                               devm_free_irq(dev, msix->vector, ioat_chan);
+                               chan = ioat_chan_by_index(device, j);
+                               devm_free_irq(dev, msix->vector, chan);
                        }
                        goto msix_single_vector;
                }
@@ -1474,7 +1001,7 @@ static void ioat_disable_interrupts(struct ioatdma_device *device)
        writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
 }
 
-static int ioat_probe(struct ioatdma_device *device)
+int __devinit ioat_probe(struct ioatdma_device *device)
 {
        int err = -ENODEV;
        struct dma_device *dma = &device->common;
@@ -1493,26 +1020,19 @@ static int ioat_probe(struct ioatdma_device *device)
        device->completion_pool = pci_pool_create("completion_pool", pdev,
                                                  sizeof(u64), SMP_CACHE_BYTES,
                                                  SMP_CACHE_BYTES);
+
        if (!device->completion_pool) {
                err = -ENOMEM;
                goto err_completion_pool;
        }
 
-       ioat_dma_enumerate_channels(device);
+       device->enumerate_channels(device);
 
        dma_cap_set(DMA_MEMCPY, dma->cap_mask);
-       dma->device_alloc_chan_resources = ioat_dma_alloc_chan_resources;
-       dma->device_free_chan_resources = ioat_dma_free_chan_resources;
-       dma->device_is_tx_complete = ioat_dma_is_complete;
        dma->dev = &pdev->dev;
 
-       dev_err(dev, "Intel(R) I/OAT DMA Engine found,"
-               " %d channels, device version 0x%02x, driver version %s\n",
-               dma->chancnt, device->version, IOAT_DMA_VERSION);
-
        if (!dma->chancnt) {
-               dev_err(dev, "Intel(R) I/OAT DMA Engine problem found: "
-                       "zero channels detected\n");
+               dev_err(dev, "zero channels detected\n");
                goto err_setup_interrupts;
        }
 
@@ -1520,7 +1040,7 @@ static int ioat_probe(struct ioatdma_device *device)
        if (err)
                goto err_setup_interrupts;
 
-       err = ioat_dma_self_test(device);
+       err = device->self_test(device);
        if (err)
                goto err_self_test;
 
@@ -1536,7 +1056,7 @@ err_dma_pool:
        return err;
 }
 
-static int ioat_register(struct ioatdma_device *device)
+int __devinit ioat_register(struct ioatdma_device *device)
 {
        int err = dma_async_device_register(&device->common);
 
@@ -1563,133 +1083,156 @@ static void ioat1_intr_quirk(struct ioatdma_device *device)
        pci_write_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, dmactrl);
 }
 
-int ioat1_dma_probe(struct ioatdma_device *device, int dca)
+static ssize_t ring_size_show(struct dma_chan *c, char *page)
 {
-       struct pci_dev *pdev = device->pdev;
-       struct dma_device *dma;
-       int err;
+       struct ioat_dma_chan *ioat = to_ioat_chan(c);
 
-       device->intr_quirk = ioat1_intr_quirk;
-       dma = &device->common;
-       dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy;
-       dma->device_issue_pending = ioat1_dma_memcpy_issue_pending;
+       return sprintf(page, "%d\n", ioat->desccount);
+}
+static struct ioat_sysfs_entry ring_size_attr = __ATTR_RO(ring_size);
 
-       err = ioat_probe(device);
-       if (err)
-               return err;
-       ioat_set_tcp_copy_break(4096);
-       err = ioat_register(device);
-       if (err)
-               return err;
-       if (dca)
-               device->dca = ioat_dca_init(pdev, device->reg_base);
+static ssize_t ring_active_show(struct dma_chan *c, char *page)
+{
+       struct ioat_dma_chan *ioat = to_ioat_chan(c);
 
-       INIT_DELAYED_WORK(&device->work, ioat_dma_chan_watchdog);
-       schedule_delayed_work(&device->work, WATCHDOG_DELAY);
+       return sprintf(page, "%d\n", ioat->active);
+}
+static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active);
+
+static ssize_t cap_show(struct dma_chan *c, char *page)
+{
+       struct dma_device *dma = c->device;
+
+       return sprintf(page, "copy%s%s%s%s%s%s\n",
+                      dma_has_cap(DMA_PQ, dma->cap_mask) ? " pq" : "",
+                      dma_has_cap(DMA_PQ_VAL, dma->cap_mask) ? " pq_val" : "",
+                      dma_has_cap(DMA_XOR, dma->cap_mask) ? " xor" : "",
+                      dma_has_cap(DMA_XOR_VAL, dma->cap_mask) ? " xor_val" : "",
+                      dma_has_cap(DMA_MEMSET, dma->cap_mask)  ? " fill" : "",
+                      dma_has_cap(DMA_INTERRUPT, dma->cap_mask) ? " intr" : "");
 
-       return err;
 }
+struct ioat_sysfs_entry ioat_cap_attr = __ATTR_RO(cap);
 
-int ioat2_dma_probe(struct ioatdma_device *device, int dca)
+static ssize_t version_show(struct dma_chan *c, char *page)
 {
-       struct pci_dev *pdev = device->pdev;
-       struct dma_device *dma;
-       struct dma_chan *chan;
-       struct ioat_dma_chan *ioat_chan;
-       int err;
+       struct dma_device *dma = c->device;
+       struct ioatdma_device *device = to_ioatdma_device(dma);
 
-       dma = &device->common;
-       dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy;
-       dma->device_issue_pending = ioat2_dma_memcpy_issue_pending;
+       return sprintf(page, "%d.%d\n",
+                      device->version >> 4, device->version & 0xf);
+}
+struct ioat_sysfs_entry ioat_version_attr = __ATTR_RO(version);
+
+static struct attribute *ioat1_attrs[] = {
+       &ring_size_attr.attr,
+       &ring_active_attr.attr,
+       &ioat_cap_attr.attr,
+       &ioat_version_attr.attr,
+       NULL,
+};
+
+static ssize_t
+ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
+{
+       struct ioat_sysfs_entry *entry;
+       struct ioat_chan_common *chan;
 
-       err = ioat_probe(device);
-       if (err)
-               return err;
-       ioat_set_tcp_copy_break(2048);
+       entry = container_of(attr, struct ioat_sysfs_entry, attr);
+       chan = container_of(kobj, struct ioat_chan_common, kobj);
 
-       list_for_each_entry(chan, &dma->channels, device_node) {
-               ioat_chan = to_ioat_chan(chan);
-               writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE | IOAT_DMA_DCA_ANY_CPU,
-                      ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
+       if (!entry->show)
+               return -EIO;
+       return entry->show(&chan->common, page);
+}
+
+struct sysfs_ops ioat_sysfs_ops = {
+       .show   = ioat_attr_show,
+};
+
+static struct kobj_type ioat1_ktype = {
+       .sysfs_ops = &ioat_sysfs_ops,
+       .default_attrs = ioat1_attrs,
+};
+
+void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type)
+{
+       struct dma_device *dma = &device->common;
+       struct dma_chan *c;
+
+       list_for_each_entry(c, &dma->channels, device_node) {
+               struct ioat_chan_common *chan = to_chan_common(c);
+               struct kobject *parent = &c->dev->device.kobj;
+               int err;
+
+               err = kobject_init_and_add(&chan->kobj, type, parent, "quickdata");
+               if (err) {
+                       dev_warn(to_dev(chan),
+                                "sysfs init error (%d), continuing...\n", err);
+                       kobject_put(&chan->kobj);
+                       set_bit(IOAT_KOBJ_INIT_FAIL, &chan->state);
+               }
        }
+}
 
-       err = ioat_register(device);
-       if (err)
-               return err;
-       if (dca)
-               device->dca = ioat2_dca_init(pdev, device->reg_base);
+void ioat_kobject_del(struct ioatdma_device *device)
+{
+       struct dma_device *dma = &device->common;
+       struct dma_chan *c;
 
-       INIT_DELAYED_WORK(&device->work, ioat_dma_chan_watchdog);
-       schedule_delayed_work(&device->work, WATCHDOG_DELAY);
+       list_for_each_entry(c, &dma->channels, device_node) {
+               struct ioat_chan_common *chan = to_chan_common(c);
 
-       return err;
+               if (!test_bit(IOAT_KOBJ_INIT_FAIL, &chan->state)) {
+                       kobject_del(&chan->kobj);
+                       kobject_put(&chan->kobj);
+               }
+       }
 }
 
-int ioat3_dma_probe(struct ioatdma_device *device, int dca)
+int __devinit ioat1_dma_probe(struct ioatdma_device *device, int dca)
 {
        struct pci_dev *pdev = device->pdev;
        struct dma_device *dma;
-       struct dma_chan *chan;
-       struct ioat_dma_chan *ioat_chan;
        int err;
-       u16 dev_id;
 
+       device->intr_quirk = ioat1_intr_quirk;
+       device->enumerate_channels = ioat1_enumerate_channels;
+       device->self_test = ioat_dma_self_test;
        dma = &device->common;
-       dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy;
-       dma->device_issue_pending = ioat2_dma_memcpy_issue_pending;
-
-       /* -= IOAT ver.3 workarounds =- */
-       /* Write CHANERRMSK_INT with 3E07h to mask out the errors
-        * that can cause stability issues for IOAT ver.3
-        */
-       pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07);
-
-       /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
-        * (workaround for spurious config parity error after restart)
-        */
-       pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
-       if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0)
-               pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10);
+       dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy;
+       dma->device_issue_pending = ioat1_dma_memcpy_issue_pending;
+       dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources;
+       dma->device_free_chan_resources = ioat1_dma_free_chan_resources;
+       dma->device_is_tx_complete = ioat1_dma_is_complete;
 
        err = ioat_probe(device);
        if (err)
                return err;
-       ioat_set_tcp_copy_break(262144);
-
-       list_for_each_entry(chan, &dma->channels, device_node) {
-               ioat_chan = to_ioat_chan(chan);
-               writel(IOAT_DMA_DCA_ANY_CPU,
-                      ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
-       }
-
+       ioat_set_tcp_copy_break(4096);
        err = ioat_register(device);
        if (err)
                return err;
+       ioat_kobject_add(device, &ioat1_ktype);
+
        if (dca)
-               device->dca = ioat3_dca_init(pdev, device->reg_base);
+               device->dca = ioat_dca_init(pdev, device->reg_base);
 
        return err;
 }
 
-void ioat_dma_remove(struct ioatdma_device *device)
+void __devexit ioat_dma_remove(struct ioatdma_device *device)
 {
-       struct dma_chan *chan, *_chan;
-       struct ioat_dma_chan *ioat_chan;
        struct dma_device *dma = &device->common;
 
-       if (device->version != IOAT_VER_3_0)
-               cancel_delayed_work(&device->work);
-
        ioat_disable_interrupts(device);
 
+       ioat_kobject_del(device);
+
        dma_async_device_unregister(dma);
 
        pci_pool_destroy(device->dma_pool);
        pci_pool_destroy(device->completion_pool);
 
-       list_for_each_entry_safe(chan, _chan, &dma->channels, device_node) {
-               ioat_chan = to_ioat_chan(chan);
-               list_del(&chan->device_node);
-       }
+       INIT_LIST_HEAD(&dma->channels);
 }
-