Merge branch 'mfd/wm8994' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie...
[linux-2.6.git] / drivers / dma / dmaengine.c
index cdc8ecf..b48967b 100644 (file)
@@ -45,6 +45,7 @@
  * See Documentation/dmaengine.txt for more details
  */
 
+#include <linux/dma-mapping.h>
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/mm.h>
 #include <linux/mutex.h>
 #include <linux/jiffies.h>
 #include <linux/rculist.h>
+#include <linux/idr.h>
+#include <linux/slab.h>
 
 static DEFINE_MUTEX(dma_list_mutex);
+static DEFINE_IDR(dma_idr);
 static LIST_HEAD(dma_device_list);
 static long dmaengine_ref_count;
 
 /* --- sysfs implementation --- */
 
+/**
+ * dev_to_dma_chan - convert a device pointer to the its sysfs container object
+ * @dev - device node
+ *
+ * Must be called under dma_list_mutex
+ */
+static struct dma_chan *dev_to_dma_chan(struct device *dev)
+{
+       struct dma_chan_dev *chan_dev;
+
+       chan_dev = container_of(dev, typeof(*chan_dev), device);
+       return chan_dev->chan;
+}
+
 static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf)
 {
-       struct dma_chan *chan = to_dma_chan(dev);
+       struct dma_chan *chan;
        unsigned long count = 0;
        int i;
+       int err;
 
-       for_each_possible_cpu(i)
-               count += per_cpu_ptr(chan->local, i)->memcpy_count;
+       mutex_lock(&dma_list_mutex);
+       chan = dev_to_dma_chan(dev);
+       if (chan) {
+               for_each_possible_cpu(i)
+                       count += per_cpu_ptr(chan->local, i)->memcpy_count;
+               err = sprintf(buf, "%lu\n", count);
+       } else
+               err = -ENODEV;
+       mutex_unlock(&dma_list_mutex);
 
-       return sprintf(buf, "%lu\n", count);
+       return err;
 }
 
 static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr,
                                      char *buf)
 {
-       struct dma_chan *chan = to_dma_chan(dev);
+       struct dma_chan *chan;
        unsigned long count = 0;
        int i;
+       int err;
 
-       for_each_possible_cpu(i)
-               count += per_cpu_ptr(chan->local, i)->bytes_transferred;
+       mutex_lock(&dma_list_mutex);
+       chan = dev_to_dma_chan(dev);
+       if (chan) {
+               for_each_possible_cpu(i)
+                       count += per_cpu_ptr(chan->local, i)->bytes_transferred;
+               err = sprintf(buf, "%lu\n", count);
+       } else
+               err = -ENODEV;
+       mutex_unlock(&dma_list_mutex);
 
-       return sprintf(buf, "%lu\n", count);
+       return err;
 }
 
 static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf)
 {
-       struct dma_chan *chan = to_dma_chan(dev);
+       struct dma_chan *chan;
+       int err;
+
+       mutex_lock(&dma_list_mutex);
+       chan = dev_to_dma_chan(dev);
+       if (chan)
+               err = sprintf(buf, "%d\n", chan->client_count);
+       else
+               err = -ENODEV;
+       mutex_unlock(&dma_list_mutex);
 
-       return sprintf(buf, "%d\n", chan->client_count);
+       return err;
 }
 
 static struct device_attribute dma_attrs[] = {
@@ -103,9 +146,24 @@ static struct device_attribute dma_attrs[] = {
        __ATTR_NULL
 };
 
+static void chan_dev_release(struct device *dev)
+{
+       struct dma_chan_dev *chan_dev;
+
+       chan_dev = container_of(dev, typeof(*chan_dev), device);
+       if (atomic_dec_and_test(chan_dev->idr_ref)) {
+               mutex_lock(&dma_list_mutex);
+               idr_remove(&dma_idr, chan_dev->dev_id);
+               mutex_unlock(&dma_list_mutex);
+               kfree(chan_dev->idr_ref);
+       }
+       kfree(chan_dev);
+}
+
 static struct class dma_devclass = {
        .name           = "dma",
        .dev_attrs      = dma_attrs,
+       .dev_release    = chan_dev_release,
 };
 
 /* --- client and device registration --- */
@@ -228,7 +286,7 @@ struct dma_chan_tbl_ent {
 /**
  * channel_table - percpu lookup table for memory-to-memory offload providers
  */
-static struct dma_chan_tbl_ent *channel_table[DMA_TX_TYPE_END];
+static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
 
 static int __init dma_channel_table_init(void)
 {
@@ -262,7 +320,7 @@ static int __init dma_channel_table_init(void)
 
        return err;
 }
-subsys_initcall(dma_channel_table_init);
+arch_initcall(dma_channel_table_init);
 
 /**
  * dma_find_channel - find a channel to carry out the operation
@@ -270,17 +328,7 @@ subsys_initcall(dma_channel_table_init);
  */
 struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
 {
-       struct dma_chan *chan;
-       int cpu;
-
-       WARN_ONCE(dmaengine_ref_count == 0,
-                 "client called %s without a reference", __func__);
-
-       cpu = get_cpu();
-       chan = per_cpu_ptr(channel_table[tx_type], cpu)->chan;
-       put_cpu();
-
-       return chan;
+       return this_cpu_read(channel_table[tx_type]->chan);
 }
 EXPORT_SYMBOL(dma_find_channel);
 
@@ -292,9 +340,6 @@ void dma_issue_pending_all(void)
        struct dma_device *device;
        struct dma_chan *chan;
 
-       WARN_ONCE(dmaengine_ref_count == 0,
-                 "client called %s without a reference", __func__);
-
        rcu_read_lock();
        list_for_each_entry_rcu(device, &dma_device_list, global_node) {
                if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
@@ -398,10 +443,10 @@ static void dma_channel_rebalance(void)
                }
 }
 
-static struct dma_chan *private_candidate(dma_cap_mask_t *mask, struct dma_device *dev)
+static struct dma_chan *private_candidate(dma_cap_mask_t *mask, struct dma_device *dev,
+                                         dma_filter_fn fn, void *fn_param)
 {
        struct dma_chan *chan;
-       struct dma_chan *ret = NULL;
 
        if (!__dma_device_satisfies_mask(dev, mask)) {
                pr_debug("%s: wrong capabilities\n", __func__);
@@ -420,14 +465,18 @@ static struct dma_chan *private_candidate(dma_cap_mask_t *mask, struct dma_devic
        list_for_each_entry(chan, &dev->channels, device_node) {
                if (chan->client_count) {
                        pr_debug("%s: %s busy\n",
-                                __func__, dev_name(&chan->dev));
+                                __func__, dma_chan_name(chan));
                        continue;
                }
-               ret = chan;
-               break;
+               if (fn && !fn(chan, fn_param)) {
+                       pr_debug("%s: %s filter said false\n",
+                                __func__, dma_chan_name(chan));
+                       continue;
+               }
+               return chan;
        }
 
-       return ret;
+       return NULL;
 }
 
 /**
@@ -440,48 +489,40 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v
 {
        struct dma_device *device, *_d;
        struct dma_chan *chan = NULL;
-       bool ack;
        int err;
 
        /* Find a channel */
        mutex_lock(&dma_list_mutex);
        list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
-               chan = private_candidate(mask, device);
-               if (!chan)
-                       continue;
-
-               if (fn)
-                       ack = fn(chan, fn_param);
-               else
-                       ack = true;
-
-               if (ack) {
+               chan = private_candidate(mask, device, fn, fn_param);
+               if (chan) {
                        /* Found a suitable channel, try to grab, prep, and
                         * return it.  We first set DMA_PRIVATE to disable
                         * balance_ref_count as this channel will not be
                         * published in the general-purpose allocator
                         */
                        dma_cap_set(DMA_PRIVATE, device->cap_mask);
+                       device->privatecnt++;
                        err = dma_chan_get(chan);
 
                        if (err == -ENODEV) {
                                pr_debug("%s: %s module removed\n", __func__,
-                                        dev_name(&chan->dev));
+                                        dma_chan_name(chan));
                                list_del_rcu(&device->global_node);
                        } else if (err)
-                               pr_err("dmaengine: failed to get %s: (%d)\n",
-                                      dev_name(&chan->dev), err);
+                               pr_debug("dmaengine: failed to get %s: (%d)\n",
+                                        dma_chan_name(chan), err);
                        else
                                break;
-               } else
-                       pr_debug("%s: %s filter said false\n",
-                                __func__, dev_name(&chan->dev));
-               chan = NULL;
+                       if (--device->privatecnt == 0)
+                               dma_cap_clear(DMA_PRIVATE, device->cap_mask);
+                       chan = NULL;
+               }
        }
        mutex_unlock(&dma_list_mutex);
 
        pr_debug("%s: %s (%s)\n", __func__, chan ? "success" : "fail",
-                chan ? dev_name(&chan->dev) : NULL);
+                chan ? dma_chan_name(chan) : NULL);
 
        return chan;
 }
@@ -493,6 +534,9 @@ void dma_release_channel(struct dma_chan *chan)
        WARN_ONCE(chan->client_count != 1,
                  "chan reference count %d != 1\n", chan->client_count);
        dma_chan_put(chan);
+       /* drop PRIVATE cap enabled by __dma_request_channel() */
+       if (--chan->device->privatecnt == 0)
+               dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
        mutex_unlock(&dma_list_mutex);
 }
 EXPORT_SYMBOL_GPL(dma_release_channel);
@@ -521,7 +565,7 @@ void dmaengine_get(void)
                                break;
                        } else if (err)
                                pr_err("dmaengine: failed to get %s: (%d)\n",
-                                      dev_name(&chan->dev), err);
+                                      dma_chan_name(chan), err);
                }
        }
 
@@ -557,15 +601,77 @@ void dmaengine_put(void)
 }
 EXPORT_SYMBOL(dmaengine_put);
 
+static bool device_has_all_tx_types(struct dma_device *device)
+{
+       /* A device that satisfies this test has channels that will never cause
+        * an async_tx channel switch event as all possible operation types can
+        * be handled.
+        */
+       #ifdef CONFIG_ASYNC_TX_DMA
+       if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
+               return false;
+       #endif
+
+       #if defined(CONFIG_ASYNC_MEMCPY) || defined(CONFIG_ASYNC_MEMCPY_MODULE)
+       if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
+               return false;
+       #endif
+
+       #if defined(CONFIG_ASYNC_MEMSET) || defined(CONFIG_ASYNC_MEMSET_MODULE)
+       if (!dma_has_cap(DMA_MEMSET, device->cap_mask))
+               return false;
+       #endif
+
+       #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE)
+       if (!dma_has_cap(DMA_XOR, device->cap_mask))
+               return false;
+
+       #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
+       if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
+               return false;
+       #endif
+       #endif
+
+       #if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE)
+       if (!dma_has_cap(DMA_PQ, device->cap_mask))
+               return false;
+
+       #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
+       if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
+               return false;
+       #endif
+       #endif
+
+       return true;
+}
+
+static int get_dma_id(struct dma_device *device)
+{
+       int rc;
+
+ idr_retry:
+       if (!idr_pre_get(&dma_idr, GFP_KERNEL))
+               return -ENOMEM;
+       mutex_lock(&dma_list_mutex);
+       rc = idr_get_new(&dma_idr, NULL, &device->dev_id);
+       mutex_unlock(&dma_list_mutex);
+       if (rc == -EAGAIN)
+               goto idr_retry;
+       else if (rc != 0)
+               return rc;
+
+       return 0;
+}
+
 /**
  * dma_async_device_register - registers DMA devices found
  * @device: &dma_device
  */
 int dma_async_device_register(struct dma_device *device)
 {
-       static int id;
        int chancnt = 0, rc;
        struct dma_chan* chan;
+       atomic_t *idr_ref;
 
        if (!device)
                return -ENODEV;
@@ -575,43 +681,77 @@ int dma_async_device_register(struct dma_device *device)
                !device->device_prep_dma_memcpy);
        BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
                !device->device_prep_dma_xor);
-       BUG_ON(dma_has_cap(DMA_ZERO_SUM, device->cap_mask) &&
-               !device->device_prep_dma_zero_sum);
+       BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) &&
+               !device->device_prep_dma_xor_val);
+       BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) &&
+               !device->device_prep_dma_pq);
+       BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) &&
+               !device->device_prep_dma_pq_val);
        BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) &&
                !device->device_prep_dma_memset);
        BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
                !device->device_prep_dma_interrupt);
+       BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
+               !device->device_prep_dma_sg);
        BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
                !device->device_prep_slave_sg);
+       BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
+               !device->device_prep_dma_cyclic);
        BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
-               !device->device_terminate_all);
+               !device->device_control);
 
        BUG_ON(!device->device_alloc_chan_resources);
        BUG_ON(!device->device_free_chan_resources);
-       BUG_ON(!device->device_is_tx_complete);
+       BUG_ON(!device->device_tx_status);
        BUG_ON(!device->device_issue_pending);
        BUG_ON(!device->dev);
 
-       mutex_lock(&dma_list_mutex);
-       device->dev_id = id++;
-       mutex_unlock(&dma_list_mutex);
+       /* note: this only matters in the
+        * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
+        */
+       if (device_has_all_tx_types(device))
+               dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
+
+       idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
+       if (!idr_ref)
+               return -ENOMEM;
+       rc = get_dma_id(device);
+       if (rc != 0) {
+               kfree(idr_ref);
+               return rc;
+       }
+
+       atomic_set(idr_ref, 0);
 
        /* represent channels in sysfs. Probably want devs too */
        list_for_each_entry(chan, &device->channels, device_node) {
+               rc = -ENOMEM;
                chan->local = alloc_percpu(typeof(*chan->local));
                if (chan->local == NULL)
-                       continue;
+                       goto err_out;
+               chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
+               if (chan->dev == NULL) {
+                       free_percpu(chan->local);
+                       chan->local = NULL;
+                       goto err_out;
+               }
 
                chan->chan_id = chancnt++;
-               chan->dev.class = &dma_devclass;
-               chan->dev.parent = device->dev;
-               dev_set_name(&chan->dev, "dma%dchan%d",
+               chan->dev->device.class = &dma_devclass;
+               chan->dev->device.parent = device->dev;
+               chan->dev->chan = chan;
+               chan->dev->idr_ref = idr_ref;
+               chan->dev->dev_id = device->dev_id;
+               atomic_inc(idr_ref);
+               dev_set_name(&chan->dev->device, "dma%dchan%d",
                             device->dev_id, chan->chan_id);
 
-               rc = device_register(&chan->dev);
+               rc = device_register(&chan->dev->device);
                if (rc) {
                        free_percpu(chan->local);
                        chan->local = NULL;
+                       kfree(chan->dev);
+                       atomic_dec(idr_ref);
                        goto err_out;
                }
                chan->client_count = 0;
@@ -636,16 +776,30 @@ int dma_async_device_register(struct dma_device *device)
                        }
                }
        list_add_tail_rcu(&device->global_node, &dma_device_list);
+       if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
+               device->privatecnt++;   /* Always private */
        dma_channel_rebalance();
        mutex_unlock(&dma_list_mutex);
 
        return 0;
 
 err_out:
+       /* if we never registered a channel just release the idr */
+       if (atomic_read(idr_ref) == 0) {
+               mutex_lock(&dma_list_mutex);
+               idr_remove(&dma_idr, device->dev_id);
+               mutex_unlock(&dma_list_mutex);
+               kfree(idr_ref);
+               return rc;
+       }
+
        list_for_each_entry(chan, &device->channels, device_node) {
                if (chan->local == NULL)
                        continue;
-               device_unregister(&chan->dev);
+               mutex_lock(&dma_list_mutex);
+               chan->dev->chan = NULL;
+               mutex_unlock(&dma_list_mutex);
+               device_unregister(&chan->dev->device);
                free_percpu(chan->local);
        }
        return rc;
@@ -672,7 +826,11 @@ void dma_async_device_unregister(struct dma_device *device)
                WARN_ONCE(chan->client_count,
                          "%s called while %d clients hold a reference\n",
                          __func__, chan->client_count);
-               device_unregister(&chan->dev);
+               mutex_lock(&dma_list_mutex);
+               chan->dev->chan = NULL;
+               mutex_unlock(&dma_list_mutex);
+               device_unregister(&chan->dev->device);
+               free_percpu(chan->local);
        }
 }
 EXPORT_SYMBOL(dma_async_device_unregister);
@@ -697,12 +855,14 @@ dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
        struct dma_async_tx_descriptor *tx;
        dma_addr_t dma_dest, dma_src;
        dma_cookie_t cookie;
-       int cpu;
+       unsigned long flags;
 
        dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
        dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE);
-       tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len,
-                                        DMA_CTRL_ACK);
+       flags = DMA_CTRL_ACK |
+               DMA_COMPL_SRC_UNMAP_SINGLE |
+               DMA_COMPL_DEST_UNMAP_SINGLE;
+       tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
 
        if (!tx) {
                dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
@@ -713,10 +873,10 @@ dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
        tx->callback = NULL;
        cookie = tx->tx_submit(tx);
 
-       cpu = get_cpu();
-       per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
-       per_cpu_ptr(chan->local, cpu)->memcpy_count++;
-       put_cpu();
+       preempt_disable();
+       __this_cpu_add(chan->local->bytes_transferred, len);
+       __this_cpu_inc(chan->local->memcpy_count);
+       preempt_enable();
 
        return cookie;
 }
@@ -743,12 +903,12 @@ dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
        struct dma_async_tx_descriptor *tx;
        dma_addr_t dma_dest, dma_src;
        dma_cookie_t cookie;
-       int cpu;
+       unsigned long flags;
 
        dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
        dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
-       tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len,
-                                        DMA_CTRL_ACK);
+       flags = DMA_CTRL_ACK | DMA_COMPL_SRC_UNMAP_SINGLE;
+       tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
 
        if (!tx) {
                dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
@@ -759,10 +919,10 @@ dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
        tx->callback = NULL;
        cookie = tx->tx_submit(tx);
 
-       cpu = get_cpu();
-       per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
-       per_cpu_ptr(chan->local, cpu)->memcpy_count++;
-       put_cpu();
+       preempt_disable();
+       __this_cpu_add(chan->local->bytes_transferred, len);
+       __this_cpu_inc(chan->local->memcpy_count);
+       preempt_enable();
 
        return cookie;
 }
@@ -791,13 +951,13 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
        struct dma_async_tx_descriptor *tx;
        dma_addr_t dma_dest, dma_src;
        dma_cookie_t cookie;
-       int cpu;
+       unsigned long flags;
 
        dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
        dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len,
                                DMA_FROM_DEVICE);
-       tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len,
-                                        DMA_CTRL_ACK);
+       flags = DMA_CTRL_ACK;
+       tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
 
        if (!tx) {
                dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE);
@@ -808,10 +968,10 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
        tx->callback = NULL;
        cookie = tx->tx_submit(tx);
 
-       cpu = get_cpu();
-       per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
-       per_cpu_ptr(chan->local, cpu)->memcpy_count++;
-       put_cpu();
+       preempt_disable();
+       __this_cpu_add(chan->local->bytes_transferred, len);
+       __this_cpu_inc(chan->local->memcpy_count);
+       preempt_enable();
 
        return cookie;
 }
@@ -821,55 +981,32 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
        struct dma_chan *chan)
 {
        tx->chan = chan;
+       #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
        spin_lock_init(&tx->lock);
+       #endif
 }
 EXPORT_SYMBOL(dma_async_tx_descriptor_init);
 
 /* dma_wait_for_async_tx - spin wait for a transaction to complete
  * @tx: in-flight transaction to wait on
- *
- * This routine assumes that tx was obtained from a call to async_memcpy,
- * async_xor, async_memset, etc which ensures that tx is "in-flight" (prepped
- * and submitted).  Walking the parent chain is only meant to cover for DMA
- * drivers that do not implement the DMA_INTERRUPT capability and may race with
- * the driver's descriptor cleanup routine.
  */
 enum dma_status
 dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
 {
-       enum dma_status status;
-       struct dma_async_tx_descriptor *iter;
-       struct dma_async_tx_descriptor *parent;
+       unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
 
        if (!tx)
                return DMA_SUCCESS;
 
-       WARN_ONCE(tx->parent, "%s: speculatively walking dependency chain for"
-                 " %s\n", __func__, dev_name(&tx->chan->dev));
-
-       /* poll through the dependency chain, return when tx is complete */
-       do {
-               iter = tx;
-
-               /* find the root of the unsubmitted dependency chain */
-               do {
-                       parent = iter->parent;
-                       if (!parent)
-                               break;
-                       else
-                               iter = parent;
-               } while (parent);
-
-               /* there is a small window for ->parent == NULL and
-                * ->cookie == -EBUSY
-                */
-               while (iter->cookie == -EBUSY)
-                       cpu_relax();
-
-               status = dma_sync_wait(iter->chan, iter->cookie);
-       } while (status == DMA_IN_PROGRESS || (iter != tx));
-
-       return status;
+       while (tx->cookie == -EBUSY) {
+               if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
+                       pr_err("%s timeout waiting for descriptor submission\n",
+                               __func__);
+                       return DMA_ERROR;
+               }
+               cpu_relax();
+       }
+       return dma_sync_wait(tx->chan, tx->cookie);
 }
 EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
 
@@ -879,13 +1016,15 @@ EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
  */
 void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
 {
-       struct dma_async_tx_descriptor *dep = tx->next;
+       struct dma_async_tx_descriptor *dep = txd_next(tx);
        struct dma_async_tx_descriptor *dep_next;
        struct dma_chan *chan;
 
        if (!dep)
                return;
 
+       /* we'll submit tx->next now, so clear the link */
+       txd_clear_next(tx);
        chan = dep->chan;
 
        /* keep submitting up until a channel switch is detected
@@ -893,14 +1032,14 @@ void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
         * processing the interrupt from async_tx_channel_switch
         */
        for (; dep; dep = dep_next) {
-               spin_lock_bh(&dep->lock);
-               dep->parent = NULL;
-               dep_next = dep->next;
+               txd_lock(dep);
+               txd_clear_parent(dep);
+               dep_next = txd_next(dep);
                if (dep_next && dep_next->chan == chan)
-                       dep->next = NULL; /* ->next will be submitted */
+                       txd_clear_next(dep); /* ->next will be submitted */
                else
                        dep_next = NULL; /* submit current dep and terminate */
-               spin_unlock_bh(&dep->lock);
+               txd_unlock(dep);
 
                dep->tx_submit(dep);
        }
@@ -911,9 +1050,8 @@ EXPORT_SYMBOL_GPL(dma_run_dependencies);
 
 static int __init dma_bus_init(void)
 {
-       mutex_init(&dma_list_mutex);
        return class_register(&dma_devclass);
 }
-subsys_initcall(dma_bus_init);
+arch_initcall(dma_bus_init);