lib/string_helpers: introduce generic string_unescape
[linux-3.10.git] / lib / dma-debug.c
index ad65fc0..d87a17a 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/spinlock.h>
 #include <linux/debugfs.h>
 #include <linux/uaccess.h>
+#include <linux/export.h>
 #include <linux/device.h>
 #include <linux/types.h>
 #include <linux/sched.h>
@@ -44,6 +45,12 @@ enum {
        dma_debug_coherent,
 };
 
+enum map_err_types {
+       MAP_ERR_CHECK_NOT_APPLICABLE,
+       MAP_ERR_NOT_CHECKED,
+       MAP_ERR_CHECKED,
+};
+
 #define DMA_DEBUG_STACKTRACE_ENTRIES 5
 
 struct dma_debug_entry {
@@ -56,12 +63,15 @@ struct dma_debug_entry {
        int              direction;
        int              sg_call_ents;
        int              sg_mapped_ents;
+       enum map_err_types  map_err_type;
 #ifdef CONFIG_STACKTRACE
        struct           stack_trace stacktrace;
        unsigned long    st_entries[DMA_DEBUG_STACKTRACE_ENTRIES];
 #endif
 };
 
+typedef bool (*match_fn)(struct dma_debug_entry *, struct dma_debug_entry *);
+
 struct hash_bucket {
        struct list_head list;
        spinlock_t lock;
@@ -75,7 +85,7 @@ static LIST_HEAD(free_entries);
 static DEFINE_SPINLOCK(free_entries_lock);
 
 /* Global disable flag - will be set in case of an error */
-static bool global_disable __read_mostly;
+static u32 global_disable __read_mostly;
 
 /* Global error count */
 static u32 error_count;
@@ -111,17 +121,18 @@ static struct device_driver *current_driver                    __read_mostly;
 
 static DEFINE_RWLOCK(driver_name_lock);
 
+static const char *const maperr2str[] = {
+       [MAP_ERR_CHECK_NOT_APPLICABLE] = "dma map error check not applicable",
+       [MAP_ERR_NOT_CHECKED] = "dma map error not checked",
+       [MAP_ERR_CHECKED] = "dma map error checked",
+};
+
 static const char *type2name[4] = { "single", "page",
                                    "scather-gather", "coherent" };
 
 static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
                                   "DMA_FROM_DEVICE", "DMA_NONE" };
 
-/* little merge helper - remove it after the merge window */
-#ifndef BUS_NOTIFY_UNBOUND_DRIVER
-#define BUS_NOTIFY_UNBOUND_DRIVER 0x0005
-#endif
-
 /*
  * The access to some variables in this macro is racy. We can't use atomic_t
  * here because all these variables are exported to debugfs. Some of them even
@@ -156,14 +167,18 @@ static bool driver_filter(struct device *dev)
                return true;
 
        /* driver filter on and initialized */
-       if (current_driver && dev->driver == current_driver)
+       if (current_driver && dev && dev->driver == current_driver)
                return true;
 
+       /* driver filter on, but we can't filter on a NULL device... */
+       if (!dev)
+               return false;
+
        if (current_driver || !current_driver_name[0])
                return false;
 
        /* driver filter on but not yet initialized */
-       drv = get_driver(dev->driver);
+       drv = dev->driver;
        if (!drv)
                return false;
 
@@ -178,22 +193,21 @@ static bool driver_filter(struct device *dev)
        }
 
        read_unlock_irqrestore(&driver_name_lock, flags);
-       put_driver(drv);
 
        return ret;
 }
 
-#define err_printk(dev, entry, format, arg...) do {            \
-               error_count += 1;                               \
-               if (driver_filter(dev) &&                       \
-                   (show_all_errors || show_num_errors > 0)) { \
-                       WARN(1, "%s %s: " format,               \
-                            dev_driver_string(dev),            \
-                            dev_name(dev) , ## arg);           \
-                       dump_entry_trace(entry);                \
-               }                                               \
-               if (!show_all_errors && show_num_errors > 0)    \
-                       show_num_errors -= 1;                   \
+#define err_printk(dev, entry, format, arg...) do {                    \
+               error_count += 1;                                       \
+               if (driver_filter(dev) &&                               \
+                   (show_all_errors || show_num_errors > 0)) {         \
+                       WARN(1, "%s %s: " format,                       \
+                            dev ? dev_driver_string(dev) : "NULL",     \
+                            dev ? dev_name(dev) : "NULL", ## arg);     \
+                       dump_entry_trace(entry);                        \
+               }                                                       \
+               if (!show_all_errors && show_num_errors > 0)            \
+                       show_num_errors -= 1;                           \
        } while (0);
 
 /*
@@ -236,18 +250,37 @@ static void put_hash_bucket(struct hash_bucket *bucket,
        spin_unlock_irqrestore(&bucket->lock, __flags);
 }
 
+static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b)
+{
+       return ((a->dev_addr == b->dev_addr) &&
+               (a->dev == b->dev)) ? true : false;
+}
+
+static bool containing_match(struct dma_debug_entry *a,
+                            struct dma_debug_entry *b)
+{
+       if (a->dev != b->dev)
+               return false;
+
+       if ((b->dev_addr <= a->dev_addr) &&
+           ((b->dev_addr + b->size) >= (a->dev_addr + a->size)))
+               return true;
+
+       return false;
+}
+
 /*
  * Search a given entry in the hash bucket list
  */
-static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket,
-                                               struct dma_debug_entry *ref)
+static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket,
+                                                 struct dma_debug_entry *ref,
+                                                 match_fn match)
 {
        struct dma_debug_entry *entry, *ret = NULL;
-       int matches = 0, match_lvl, last_lvl = 0;
+       int matches = 0, match_lvl, last_lvl = -1;
 
        list_for_each_entry(entry, &bucket->list, list) {
-               if ((entry->dev_addr != ref->dev_addr) ||
-                   (entry->dev != ref->dev))
+               if (!match(ref, entry))
                        continue;
 
                /*
@@ -255,24 +288,25 @@ static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket,
                 * times. Without a hardware IOMMU this results in the
                 * same device addresses being put into the dma-debug
                 * hash multiple times too. This can result in false
-                * positives being reported. Therfore we implement a
+                * positives being reported. Therefore we implement a
                 * best-fit algorithm here which returns the entry from
                 * the hash which fits best to the reference value
                 * instead of the first-fit.
                 */
                matches += 1;
                match_lvl = 0;
-               entry->size      == ref->size      ? ++match_lvl : match_lvl;
-               entry->type      == ref->type      ? ++match_lvl : match_lvl;
-               entry->direction == ref->direction ? ++match_lvl : match_lvl;
+               entry->size         == ref->size         ? ++match_lvl : 0;
+               entry->type         == ref->type         ? ++match_lvl : 0;
+               entry->direction    == ref->direction    ? ++match_lvl : 0;
+               entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0;
 
-               if (match_lvl == 3) {
+               if (match_lvl == 4) {
                        /* perfect-fit - return the result */
                        return entry;
                } else if (match_lvl > last_lvl) {
                        /*
                         * We found an entry that fits better then the
-                        * previous one
+                        * previous one or it is the 1st match.
                         */
                        last_lvl = match_lvl;
                        ret      = entry;
@@ -288,6 +322,39 @@ static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket,
        return ret;
 }
 
+static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket,
+                                                struct dma_debug_entry *ref)
+{
+       return __hash_bucket_find(bucket, ref, exact_match);
+}
+
+static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket,
+                                                  struct dma_debug_entry *ref,
+                                                  unsigned long *flags)
+{
+
+       unsigned int max_range = dma_get_max_seg_size(ref->dev);
+       struct dma_debug_entry *entry, index = *ref;
+       unsigned int range = 0;
+
+       while (range <= max_range) {
+               entry = __hash_bucket_find(*bucket, &index, containing_match);
+
+               if (entry)
+                       return entry;
+
+               /*
+                * Nothing found, go back a hash bucket
+                */
+               put_hash_bucket(*bucket, flags);
+               range          += (1 << HASH_FN_SHIFT);
+               index.dev_addr -= (1 << HASH_FN_SHIFT);
+               *bucket = get_hash_bucket(&index, flags);
+       }
+
+       return NULL;
+}
+
 /*
  * Add an entry to a hash bucket
  */
@@ -322,11 +389,12 @@ void debug_dma_dump_mappings(struct device *dev)
                list_for_each_entry(entry, &bucket->list, list) {
                        if (!dev || dev == entry->dev) {
                                dev_info(entry->dev,
-                                        "%s idx %d P=%Lx D=%Lx L=%Lx %s\n",
+                                        "%s idx %d P=%Lx D=%Lx L=%Lx %s %s\n",
                                         type2name[entry->type], idx,
                                         (unsigned long long)entry->paddr,
                                         entry->dev_addr, entry->size,
-                                        dir2name[entry->direction]);
+                                        dir2name[entry->direction],
+                                        maperr2str[entry->map_err_type]);
                        }
                }
 
@@ -371,7 +439,7 @@ static struct dma_debug_entry *__dma_entry_alloc(void)
  */
 static struct dma_debug_entry *dma_entry_alloc(void)
 {
-       struct dma_debug_entry *entry = NULL;
+       struct dma_debug_entry *entry;
        unsigned long flags;
 
        spin_lock_irqsave(&free_entries_lock, flags);
@@ -379,11 +447,14 @@ static struct dma_debug_entry *dma_entry_alloc(void)
        if (list_empty(&free_entries)) {
                pr_err("DMA-API: debugging out of memory - disabling\n");
                global_disable = true;
-               goto out;
+               spin_unlock_irqrestore(&free_entries_lock, flags);
+               return NULL;
        }
 
        entry = __dma_entry_alloc();
 
+       spin_unlock_irqrestore(&free_entries_lock, flags);
+
 #ifdef CONFIG_STACKTRACE
        entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES;
        entry->stacktrace.entries = entry->st_entries;
@@ -391,9 +462,6 @@ static struct dma_debug_entry *dma_entry_alloc(void)
        save_stack_trace(&entry->stacktrace);
 #endif
 
-out:
-       spin_unlock_irqrestore(&free_entries_lock, flags);
-
        return entry;
 }
 
@@ -565,7 +633,7 @@ static ssize_t filter_write(struct file *file, const char __user *userbuf,
         * Now parse out the first token and use it as the name for the
         * driver to filter for.
         */
-       for (i = 0; i < NAME_MAX_LEN; ++i) {
+       for (i = 0; i < NAME_MAX_LEN - 1; ++i) {
                current_driver_name[i] = buf[i];
                if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0)
                        break;
@@ -582,9 +650,10 @@ out_unlock:
        return count;
 }
 
-const struct file_operations filter_fops = {
+static const struct file_operations filter_fops = {
        .read  = filter_read,
        .write = filter_write,
+       .llseek = default_llseek,
 };
 
 static int dma_debug_fs_init(void)
@@ -597,7 +666,7 @@ static int dma_debug_fs_init(void)
 
        global_disable_dent = debugfs_create_bool("disabled", 0444,
                        dma_debug_dent,
-                       (u32 *)&global_disable);
+                       &global_disable);
        if (!global_disable_dent)
                goto out_err;
 
@@ -643,7 +712,7 @@ out_err:
        return -ENOMEM;
 }
 
-static int device_dma_allocations(struct device *dev)
+static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry)
 {
        struct dma_debug_entry *entry;
        unsigned long flags;
@@ -654,8 +723,10 @@ static int device_dma_allocations(struct device *dev)
        for (i = 0; i < HASH_SIZE; ++i) {
                spin_lock(&dma_entry_hash[i].lock);
                list_for_each_entry(entry, &dma_entry_hash[i].list, list) {
-                       if (entry->dev == dev)
+                       if (entry->dev == dev) {
                                count += 1;
+                               *out_entry = entry;
+                       }
                }
                spin_unlock(&dma_entry_hash[i].lock);
        }
@@ -665,21 +736,28 @@ static int device_dma_allocations(struct device *dev)
        return count;
 }
 
-static int dma_debug_device_change(struct notifier_block *nb,
-                                   unsigned long action, void *data)
+static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data)
 {
        struct device *dev = data;
+       struct dma_debug_entry *uninitialized_var(entry);
        int count;
 
+       if (global_disable)
+               return 0;
 
        switch (action) {
        case BUS_NOTIFY_UNBOUND_DRIVER:
-               count = device_dma_allocations(dev);
+               count = device_dma_allocations(dev, &entry);
                if (count == 0)
                        break;
-               err_printk(dev, NULL, "DMA-API: device driver has pending "
+               err_printk(dev, entry, "DMA-API: device driver has pending "
                                "DMA allocations while released from device "
-                               "[count=%d]\n", count);
+                               "[count=%d]\n"
+                               "One of leaked entries details: "
+                               "[device address=0x%016llx] [size=%llu bytes] "
+                               "[mapped with %s] [mapped as %s]\n",
+                       count, entry->dev_addr, entry->size,
+                       dir2name[entry->direction], type2name[entry->type]);
                break;
        default:
                break;
@@ -692,6 +770,9 @@ void dma_debug_add_bus(struct bus_type *bus)
 {
        struct notifier_block *nb;
 
+       if (global_disable)
+               return;
+
        nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
        if (nb == NULL) {
                pr_err("dma_debug_add_bus: out of memory\n");
@@ -715,7 +796,7 @@ void dma_debug_init(u32 num_entries)
 
        for (i = 0; i < HASH_SIZE; ++i) {
                INIT_LIST_HEAD(&dma_entry_hash[i].list);
-               dma_entry_hash[i].lock = SPIN_LOCK_UNLOCKED;
+               spin_lock_init(&dma_entry_hash[i].lock);
        }
 
        if (dma_debug_fs_init() != 0) {
@@ -777,21 +858,25 @@ static void check_unmap(struct dma_debug_entry *ref)
        struct hash_bucket *bucket;
        unsigned long flags;
 
-       if (dma_mapping_error(ref->dev, ref->dev_addr)) {
-               err_printk(ref->dev, NULL, "DMA-API: device driver tries "
-                          "to free an invalid DMA memory address\n");
-               return;
-       }
-
        bucket = get_hash_bucket(ref, &flags);
-       entry = hash_bucket_find(bucket, ref);
+       entry = bucket_find_exact(bucket, ref);
 
        if (!entry) {
-               err_printk(ref->dev, NULL, "DMA-API: device driver tries "
-                          "to free DMA memory it has not allocated "
-                          "[device address=0x%016llx] [size=%llu bytes]\n",
-                          ref->dev_addr, ref->size);
-               goto out;
+               /* must drop lock before calling dma_mapping_error */
+               put_hash_bucket(bucket, &flags);
+
+               if (dma_mapping_error(ref->dev, ref->dev_addr)) {
+                       err_printk(ref->dev, NULL,
+                                  "DMA-API: device driver tries to free an "
+                                  "invalid DMA memory address\n");
+               } else {
+                       err_printk(ref->dev, NULL,
+                                  "DMA-API: device driver tries to free DMA "
+                                  "memory it has not allocated [device "
+                                  "address=0x%016llx] [size=%llu bytes]\n",
+                                  ref->dev_addr, ref->size);
+               }
+               return;
        }
 
        if (ref->size != entry->size) {
@@ -814,9 +899,11 @@ static void check_unmap(struct dma_debug_entry *ref)
                err_printk(ref->dev, entry, "DMA-API: device driver frees "
                           "DMA memory with different CPU address "
                           "[device address=0x%016llx] [size=%llu bytes] "
-                          "[cpu alloc address=%p] [cpu free address=%p]",
+                          "[cpu alloc address=0x%016llx] "
+                          "[cpu free address=0x%016llx]",
                           ref->dev_addr, ref->size,
-                          (void *)entry->paddr, (void *)ref->paddr);
+                          (unsigned long long)entry->paddr,
+                          (unsigned long long)ref->paddr);
        }
 
        if (ref->sg_call_ents && ref->type == dma_debug_sg &&
@@ -841,10 +928,18 @@ static void check_unmap(struct dma_debug_entry *ref)
                           dir2name[ref->direction]);
        }
 
+       if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
+               err_printk(ref->dev, entry,
+                          "DMA-API: device driver failed to check map error"
+                          "[device address=0x%016llx] [size=%llu bytes] "
+                          "[mapped as %s]",
+                          ref->dev_addr, ref->size,
+                          type2name[entry->type]);
+       }
+
        hash_bucket_del(entry);
        dma_entry_free(entry);
 
-out:
        put_hash_bucket(bucket, &flags);
 }
 
@@ -855,94 +950,88 @@ static void check_for_stack(struct device *dev, void *addr)
                                "stack [addr=%p]\n", addr);
 }
 
-static inline bool overlap(void *addr, u64 size, void *start, void *end)
+static inline bool overlap(void *addr, unsigned long len, void *start, void *end)
 {
-       void *addr2 = (char *)addr + size;
+       unsigned long a1 = (unsigned long)addr;
+       unsigned long b1 = a1 + len;
+       unsigned long a2 = (unsigned long)start;
+       unsigned long b2 = (unsigned long)end;
 
-       return ((addr >= start && addr < end) ||
-               (addr2 >= start && addr2 < end) ||
-               ((addr < start) && (addr2 >= end)));
+       return !(b1 <= a2 || a1 >= b2);
 }
 
-static void check_for_illegal_area(struct device *dev, void *addr, u64 size)
+static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len)
 {
-       if (overlap(addr, size, _text, _etext) ||
-           overlap(addr, size, __start_rodata, __end_rodata))
-               err_printk(dev, NULL, "DMA-API: device driver maps "
-                               "memory from kernel text or rodata "
-                               "[addr=%p] [size=%llu]\n", addr, size);
+       if (overlap(addr, len, _text, _etext) ||
+           overlap(addr, len, __start_rodata, __end_rodata))
+               err_printk(dev, NULL, "DMA-API: device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len);
 }
 
-static void check_sync(struct device *dev, dma_addr_t addr,
-                      u64 size, u64 offset, int direction, bool to_cpu)
+static void check_sync(struct device *dev,
+                      struct dma_debug_entry *ref,
+                      bool to_cpu)
 {
-       struct dma_debug_entry ref = {
-               .dev            = dev,
-               .dev_addr       = addr,
-               .size           = size,
-               .direction      = direction,
-       };
        struct dma_debug_entry *entry;
        struct hash_bucket *bucket;
        unsigned long flags;
 
-       bucket = get_hash_bucket(&ref, &flags);
+       bucket = get_hash_bucket(ref, &flags);
 
-       entry = hash_bucket_find(bucket, &ref);
+       entry = bucket_find_contain(&bucket, ref, &flags);
 
        if (!entry) {
                err_printk(dev, NULL, "DMA-API: device driver tries "
                                "to sync DMA memory it has not allocated "
                                "[device address=0x%016llx] [size=%llu bytes]\n",
-                               (unsigned long long)addr, size);
+                               (unsigned long long)ref->dev_addr, ref->size);
                goto out;
        }
 
-       if ((offset + size) > entry->size) {
+       if (ref->size > entry->size) {
                err_printk(dev, entry, "DMA-API: device driver syncs"
                                " DMA memory outside allocated range "
                                "[device address=0x%016llx] "
-                               "[allocation size=%llu bytes] [sync offset=%llu] "
-                               "[sync size=%llu]\n", entry->dev_addr, entry->size,
-                               offset, size);
+                               "[allocation size=%llu bytes] "
+                               "[sync offset+size=%llu]\n",
+                               entry->dev_addr, entry->size,
+                               ref->size);
        }
 
-       if (direction != entry->direction) {
+       if (entry->direction == DMA_BIDIRECTIONAL)
+               goto out;
+
+       if (ref->direction != entry->direction) {
                err_printk(dev, entry, "DMA-API: device driver syncs "
                                "DMA memory with different direction "
                                "[device address=0x%016llx] [size=%llu bytes] "
                                "[mapped with %s] [synced with %s]\n",
-                               (unsigned long long)addr, entry->size,
+                               (unsigned long long)ref->dev_addr, entry->size,
                                dir2name[entry->direction],
-                               dir2name[direction]);
+                               dir2name[ref->direction]);
        }
 
-       if (entry->direction == DMA_BIDIRECTIONAL)
-               goto out;
-
        if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
-                     !(direction == DMA_TO_DEVICE))
+                     !(ref->direction == DMA_TO_DEVICE))
                err_printk(dev, entry, "DMA-API: device driver syncs "
                                "device read-only DMA memory for cpu "
                                "[device address=0x%016llx] [size=%llu bytes] "
                                "[mapped with %s] [synced with %s]\n",
-                               (unsigned long long)addr, entry->size,
+                               (unsigned long long)ref->dev_addr, entry->size,
                                dir2name[entry->direction],
-                               dir2name[direction]);
+                               dir2name[ref->direction]);
 
        if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
-                      !(direction == DMA_FROM_DEVICE))
+                      !(ref->direction == DMA_FROM_DEVICE))
                err_printk(dev, entry, "DMA-API: device driver syncs "
                                "device write-only DMA memory to device "
                                "[device address=0x%016llx] [size=%llu bytes] "
                                "[mapped with %s] [synced with %s]\n",
-                               (unsigned long long)addr, entry->size,
+                               (unsigned long long)ref->dev_addr, entry->size,
                                dir2name[entry->direction],
-                               dir2name[direction]);
+                               dir2name[ref->direction]);
 
 out:
        put_hash_bucket(bucket, &flags);
-
 }
 
 void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
@@ -954,7 +1043,7 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
        if (unlikely(global_disable))
                return;
 
-       if (unlikely(dma_mapping_error(dev, dma_addr)))
+       if (dma_mapping_error(dev, dma_addr))
                return;
 
        entry = dma_entry_alloc();
@@ -967,12 +1056,14 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
        entry->dev_addr  = dma_addr;
        entry->size      = size;
        entry->direction = direction;
+       entry->map_err_type = MAP_ERR_NOT_CHECKED;
 
        if (map_single)
                entry->type = dma_debug_single;
 
        if (!PageHighMem(page)) {
-               void *addr = ((char *)page_address(page)) + offset;
+               void *addr = page_address(page) + offset;
+
                check_for_stack(dev, addr);
                check_for_illegal_area(dev, addr, size);
        }
@@ -981,6 +1072,44 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
 }
 EXPORT_SYMBOL(debug_dma_map_page);
 
+void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+       struct dma_debug_entry ref;
+       struct dma_debug_entry *entry;
+       struct hash_bucket *bucket;
+       unsigned long flags;
+
+       if (unlikely(global_disable))
+               return;
+
+       ref.dev = dev;
+       ref.dev_addr = dma_addr;
+       bucket = get_hash_bucket(&ref, &flags);
+
+       list_for_each_entry(entry, &bucket->list, list) {
+               if (!exact_match(&ref, entry))
+                       continue;
+
+               /*
+                * The same physical address can be mapped multiple
+                * times. Without a hardware IOMMU this results in the
+                * same device addresses being put into the dma-debug
+                * hash multiple times too. This can result in false
+                * positives being reported. Therefore we implement a
+                * best-fit algorithm here which updates the first entry
+                * from the hash which fits the reference value and is
+                * not currently listed as being checked.
+                */
+               if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
+                       entry->map_err_type = MAP_ERR_CHECKED;
+                       break;
+               }
+       }
+
+       put_hash_bucket(bucket, &flags);
+}
+EXPORT_SYMBOL(debug_dma_mapping_error);
+
 void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
                          size_t size, int direction, bool map_single)
 {
@@ -1036,19 +1165,16 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
 }
 EXPORT_SYMBOL(debug_dma_map_sg);
 
-static int get_nr_mapped_entries(struct device *dev, struct scatterlist *s)
+static int get_nr_mapped_entries(struct device *dev,
+                                struct dma_debug_entry *ref)
 {
-       struct dma_debug_entry *entry, ref;
+       struct dma_debug_entry *entry;
        struct hash_bucket *bucket;
        unsigned long flags;
        int mapped_ents;
 
-       ref.dev      = dev;
-       ref.dev_addr = sg_dma_address(s);
-       ref.size     = sg_dma_len(s),
-
-       bucket       = get_hash_bucket(&ref, &flags);
-       entry        = hash_bucket_find(bucket, &ref);
+       bucket       = get_hash_bucket(ref, &flags);
+       entry        = bucket_find_exact(bucket, ref);
        mapped_ents  = 0;
 
        if (entry)
@@ -1076,16 +1202,14 @@ void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
                        .dev_addr       = sg_dma_address(s),
                        .size           = sg_dma_len(s),
                        .direction      = dir,
-                       .sg_call_ents   = 0,
+                       .sg_call_ents   = nelems,
                };
 
                if (mapped_ents && i >= mapped_ents)
                        break;
 
-               if (!i) {
-                       ref.sg_call_ents = nelems;
-                       mapped_ents = get_nr_mapped_entries(dev, s);
-               }
+               if (!i)
+                       mapped_ents = get_nr_mapped_entries(dev, &ref);
 
                check_unmap(&ref);
        }
@@ -1140,10 +1264,19 @@ EXPORT_SYMBOL(debug_dma_free_coherent);
 void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
                                   size_t size, int direction)
 {
+       struct dma_debug_entry ref;
+
        if (unlikely(global_disable))
                return;
 
-       check_sync(dev, dma_handle, size, 0, direction, true);
+       ref.type         = dma_debug_single;
+       ref.dev          = dev;
+       ref.dev_addr     = dma_handle;
+       ref.size         = size;
+       ref.direction    = direction;
+       ref.sg_call_ents = 0;
+
+       check_sync(dev, &ref, true);
 }
 EXPORT_SYMBOL(debug_dma_sync_single_for_cpu);
 
@@ -1151,10 +1284,19 @@ void debug_dma_sync_single_for_device(struct device *dev,
                                      dma_addr_t dma_handle, size_t size,
                                      int direction)
 {
+       struct dma_debug_entry ref;
+
        if (unlikely(global_disable))
                return;
 
-       check_sync(dev, dma_handle, size, 0, direction, false);
+       ref.type         = dma_debug_single;
+       ref.dev          = dev;
+       ref.dev_addr     = dma_handle;
+       ref.size         = size;
+       ref.direction    = direction;
+       ref.sg_call_ents = 0;
+
+       check_sync(dev, &ref, false);
 }
 EXPORT_SYMBOL(debug_dma_sync_single_for_device);
 
@@ -1163,10 +1305,19 @@ void debug_dma_sync_single_range_for_cpu(struct device *dev,
                                         unsigned long offset, size_t size,
                                         int direction)
 {
+       struct dma_debug_entry ref;
+
        if (unlikely(global_disable))
                return;
 
-       check_sync(dev, dma_handle, size, offset, direction, true);
+       ref.type         = dma_debug_single;
+       ref.dev          = dev;
+       ref.dev_addr     = dma_handle;
+       ref.size         = offset + size;
+       ref.direction    = direction;
+       ref.sg_call_ents = 0;
+
+       check_sync(dev, &ref, true);
 }
 EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu);
 
@@ -1175,10 +1326,19 @@ void debug_dma_sync_single_range_for_device(struct device *dev,
                                            unsigned long offset,
                                            size_t size, int direction)
 {
+       struct dma_debug_entry ref;
+
        if (unlikely(global_disable))
                return;
 
-       check_sync(dev, dma_handle, size, offset, direction, false);
+       ref.type         = dma_debug_single;
+       ref.dev          = dev;
+       ref.dev_addr     = dma_handle;
+       ref.size         = offset + size;
+       ref.direction    = direction;
+       ref.sg_call_ents = 0;
+
+       check_sync(dev, &ref, false);
 }
 EXPORT_SYMBOL(debug_dma_sync_single_range_for_device);
 
@@ -1192,14 +1352,24 @@ void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
                return;
 
        for_each_sg(sg, s, nelems, i) {
+
+               struct dma_debug_entry ref = {
+                       .type           = dma_debug_sg,
+                       .dev            = dev,
+                       .paddr          = sg_phys(s),
+                       .dev_addr       = sg_dma_address(s),
+                       .size           = sg_dma_len(s),
+                       .direction      = direction,
+                       .sg_call_ents   = nelems,
+               };
+
                if (!i)
-                       mapped_ents = get_nr_mapped_entries(dev, s);
+                       mapped_ents = get_nr_mapped_entries(dev, &ref);
 
                if (i >= mapped_ents)
                        break;
 
-               check_sync(dev, sg_dma_address(s), sg_dma_len(s), 0,
-                          direction, true);
+               check_sync(dev, &ref, true);
        }
 }
 EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu);
@@ -1214,14 +1384,23 @@ void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
                return;
 
        for_each_sg(sg, s, nelems, i) {
+
+               struct dma_debug_entry ref = {
+                       .type           = dma_debug_sg,
+                       .dev            = dev,
+                       .paddr          = sg_phys(s),
+                       .dev_addr       = sg_dma_address(s),
+                       .size           = sg_dma_len(s),
+                       .direction      = direction,
+                       .sg_call_ents   = nelems,
+               };
                if (!i)
-                       mapped_ents = get_nr_mapped_entries(dev, s);
+                       mapped_ents = get_nr_mapped_entries(dev, &ref);
 
                if (i >= mapped_ents)
                        break;
 
-               check_sync(dev, sg_dma_address(s), sg_dma_len(s), 0,
-                          direction, false);
+               check_sync(dev, &ref, false);
        }
 }
 EXPORT_SYMBOL(debug_dma_sync_sg_for_device);