dma_debug_coherent,
};
+enum map_err_types {
+ MAP_ERR_CHECK_NOT_APPLICABLE,
+ MAP_ERR_NOT_CHECKED,
+ MAP_ERR_CHECKED,
+};
+
#define DMA_DEBUG_STACKTRACE_ENTRIES 5
struct dma_debug_entry {
int direction;
int sg_call_ents;
int sg_mapped_ents;
+ enum map_err_types map_err_type;
#ifdef CONFIG_STACKTRACE
struct stack_trace stacktrace;
unsigned long st_entries[DMA_DEBUG_STACKTRACE_ENTRIES];
/* Global disable flag - will be set in case of an error */
static u32 global_disable __read_mostly;
+/*
+ * The global_disable flag can be set in kernel command line, and
+ * dma_debug_init() is called much later than that. Mappings can happen before
+ * the init function is called, and thus no memory has been reserved for the
+ * entries. This out-of-memory situation flips enables the global_disable flag,
+ * which cannot then be enabled again. Use another flag for skipping the
+ * tracking if init hasn't been done yet.
+ */
+static bool initialized __read_mostly;
/* Global error count */
static u32 error_count;
static DEFINE_RWLOCK(driver_name_lock);
+static const char *const maperr2str[] = {
+ [MAP_ERR_CHECK_NOT_APPLICABLE] = "dma map error check not applicable",
+ [MAP_ERR_NOT_CHECKED] = "dma map error not checked",
+ [MAP_ERR_CHECKED] = "dma map error checked",
+};
+
static const char *type2name[4] = { "single", "page",
"scather-gather", "coherent" };
static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
"DMA_FROM_DEVICE", "DMA_NONE" };
+/* dma statistics per device */
+struct dma_dev_info {
+ struct list_head list;
+ struct device *dev;
+ spinlock_t lock; /* Protects dma_dev_info itself */
+
+ int current_allocs;
+ int total_allocs;
+ int max_allocs;
+
+ int current_alloc_size;
+ int total_alloc_size;
+ int max_alloc_size;
+};
+
+static LIST_HEAD(dev_info_list);
+static DEFINE_SPINLOCK(dev_info_lock); /* Protects dev_info_list */
+
/*
* The access to some variables in this macro is racy. We can't use atomic_t
* here because all these variables are exported to debugfs. Some of them even
list_for_each_entry(entry, &bucket->list, list) {
if (!dev || dev == entry->dev) {
dev_info(entry->dev,
- "%s idx %d P=%Lx D=%Lx L=%Lx %s\n",
+ "%s idx %d P=%Lx D=%Lx L=%Lx %s %s\n",
type2name[entry->type], idx,
(unsigned long long)entry->paddr,
entry->dev_addr, entry->size,
- dir2name[entry->direction]);
+ dir2name[entry->direction],
+ maperr2str[entry->map_err_type]);
}
}
}
EXPORT_SYMBOL(debug_dma_dump_mappings);
+/*
+ * device info snapshot updating functions
+ */
+static void ____dev_info_incr(struct dma_dev_info *info,
+ struct dma_debug_entry *entry)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->lock, flags);
+
+ info->current_allocs++;
+ info->total_allocs++;
+ if (info->current_allocs > info->max_allocs)
+ info->max_allocs = info->current_allocs;
+
+ info->current_alloc_size += entry->size;
+ info->total_alloc_size += entry->size;
+ if (info->current_alloc_size > info->max_alloc_size)
+ info->max_alloc_size = info->current_alloc_size;
+
+ spin_unlock_irqrestore(&info->lock, flags);
+}
+
+static void ____dev_info_decr(struct dma_dev_info *info,
+ struct dma_debug_entry *entry)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->lock, flags);
+
+ info->current_allocs--;
+ info->current_alloc_size -= entry->size;
+
+ spin_unlock_irqrestore(&info->lock, flags);
+}
+
+static void __dev_info_fn(struct dma_debug_entry *entry,
+ void (*fn)(struct dma_dev_info *, struct dma_debug_entry *))
+{
+ struct dma_dev_info *info;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_info_lock, flags);
+
+ list_for_each_entry(info, &dev_info_list, list)
+ if (info->dev == entry->dev)
+ goto found;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ dev_err(entry->dev, "Out of memory at %s\n", __func__);
+ spin_unlock_irqrestore(&dev_info_lock, flags);
+ return;
+ }
+
+ spin_lock_init(&info->lock);
+ info->dev = entry->dev;
+ list_add(&info->list, &dev_info_list);
+found:
+ spin_unlock_irqrestore(&dev_info_lock, flags);
+ fn(info, entry);
+}
+
+static inline void dev_info_alloc(struct dma_debug_entry *entry)
+{
+ __dev_info_fn(entry, ____dev_info_incr);
+}
+
+static inline void dev_info_free(struct dma_debug_entry *entry)
+{
+ __dev_info_fn(entry, ____dev_info_decr);
+}
+
/*
* Wrapper function for adding an entry to the hash.
* This function takes care of locking itself.
bucket = get_hash_bucket(entry, &flags);
hash_bucket_add(bucket, entry);
put_hash_bucket(bucket, &flags);
+
+ dev_info_alloc(entry);
}
static struct dma_debug_entry *__dma_entry_alloc(void)
return count;
}
+static inline void seq_print_ip_sym(struct seq_file *s, unsigned long ip)
+{
+ seq_printf(s, "[<%p>] %pS\n", (void *)ip, (void *)ip);
+}
+
+void seq_print_trace(struct seq_file *s, struct stack_trace *trace)
+{
+ int i;
+
+ if (WARN_ON(!trace->entries))
+ return;
+
+ for (i = trace->skip; i < trace->nr_entries; i++)
+ seq_print_ip_sym(s, trace->entries[i]);
+}
+
+/*
+ * Print all map entries just in the order they are stored. We assume that the
+ * user will be able to parse this later anyway. Detailed output includes stack
+ * traces of allocations.
+ */
+void seq_print_dma_mappings(struct seq_file *s, int detail)
+{
+ int idx;
+
+ for (idx = 0; idx < HASH_SIZE; idx++) {
+ struct hash_bucket *bucket = &dma_entry_hash[idx];
+ struct dma_debug_entry *entry;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bucket->lock, flags);
+
+ list_for_each_entry(entry, &bucket->list, list) {
+ seq_printf(s,
+ " %s %s idx %d P=%llx D=%llx L=%llx %s A=%s\n",
+ dev_name(entry->dev),
+ type2name[entry->type], idx,
+ (u64)entry->paddr,
+ entry->dev_addr, entry->size,
+ dir2name[entry->direction],
+ debug_dma_platformdata(entry->dev));
+
+ if (detail)
+ seq_print_trace(s, &entry->stacktrace);
+ }
+
+ spin_unlock_irqrestore(&bucket->lock, flags);
+ }
+}
+
+void __weak dma_debugfs_platform_info(struct dentry *dent)
+{
+}
+
+static int _dump_allocs(struct seq_file *s, void *data)
+{
+ int detail = (int)s->private;
+
+ seq_print_dma_mappings(s, detail);
+ return 0;
+}
+
+static int _dump_dev_info(struct seq_file *s, void *data)
+{
+ struct dma_dev_info *i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_info_lock, flags);
+
+ list_for_each_entry(i, &dev_info_list, list)
+ seq_printf(s,
+ "dev=%s curallocs=%d totallocs=%d maxallocs=%d cursize=%d totsize=%d maxsize=%d\n",
+ dev_name(i->dev), i->current_allocs, i->total_allocs,
+ i->max_allocs, i->current_alloc_size,
+ i->total_alloc_size, i->max_alloc_size);
+
+ spin_unlock_irqrestore(&dev_info_lock, flags);
+ return 0;
+}
+
+#define DEFINE_DEBUGFS(__name, __func, __data) \
+static int __name ## _open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, __func, __data); \
+} \
+static const struct file_operations __name ## _fops = { \
+ .open = __name ## _open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+}
+
+DEFINE_DEBUGFS(_dump_allocs, _dump_allocs, NULL);
+DEFINE_DEBUGFS(_dump_allocs_detail, _dump_allocs, (void *)1);
+DEFINE_DEBUGFS(_dump_dev_info, _dump_dev_info, NULL);
+#undef DEFINE_DEBUGFS
+
+static int map_dump_debug_fs_init(void)
+{
+#define CREATE_FILE(name) \
+ debugfs_create_file(#name, S_IRUGO, \
+ dma_debug_dent, NULL, \
+ &_##name##_fops)
+
+ if (!CREATE_FILE(dump_allocs))
+ return -ENOMEM;
+
+ if (!CREATE_FILE(dump_allocs_detail))
+ return -ENOMEM;
+
+ if (!CREATE_FILE(dump_dev_info))
+ return -ENOMEM;
+
+#undef CREATE_FILE
+
+ dma_debugfs_platform_info(dma_debug_dent);
+ return 0;
+}
+
static const struct file_operations filter_fops = {
.read = filter_read,
.write = filter_write,
if (!filter_dent)
goto out_err;
+ if (map_dump_debug_fs_init())
+ goto out_err;
+
return 0;
out_err:
struct dma_debug_entry *uninitialized_var(entry);
int count;
- if (global_disable)
+ if (!initialized || global_disable)
return 0;
switch (action) {
{
struct notifier_block *nb;
- if (global_disable)
+ if (!initialized || global_disable)
return;
nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
nr_total_entries = num_free_entries;
pr_info("DMA-API: debugging enabled by kernel config\n");
+ initialized = true;
}
static __init int dma_debug_cmdline(char *str)
struct hash_bucket *bucket;
unsigned long flags;
- if (dma_mapping_error(ref->dev, ref->dev_addr)) {
- err_printk(ref->dev, NULL, "DMA-API: device driver tries "
- "to free an invalid DMA memory address\n");
- return;
- }
-
bucket = get_hash_bucket(ref, &flags);
entry = bucket_find_exact(bucket, ref);
if (!entry) {
- err_printk(ref->dev, NULL, "DMA-API: device driver tries "
- "to free DMA memory it has not allocated "
- "[device address=0x%016llx] [size=%llu bytes]\n",
- ref->dev_addr, ref->size);
- goto out;
+ /* must drop lock before calling dma_mapping_error */
+ put_hash_bucket(bucket, &flags);
+
+ if (dma_mapping_error(ref->dev, ref->dev_addr)) {
+ err_printk(ref->dev, NULL,
+ "DMA-API: device driver tries to free an "
+ "invalid DMA memory address\n");
+ } else {
+ err_printk(ref->dev, NULL,
+ "DMA-API: device driver tries to free DMA "
+ "memory it has not allocated [device "
+ "address=0x%016llx] [size=%llu bytes]\n",
+ ref->dev_addr, ref->size);
+ }
+ return;
}
if (ref->size != entry->size) {
dir2name[ref->direction]);
}
+ if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
+ err_printk(ref->dev, entry,
+ "DMA-API: device driver failed to check map error"
+ "[device address=0x%016llx] [size=%llu bytes] "
+ "[mapped as %s]",
+ ref->dev_addr, ref->size,
+ type2name[entry->type]);
+ }
+
+ dev_info_free(entry);
+
hash_bucket_del(entry);
dma_entry_free(entry);
-out:
put_hash_bucket(bucket, &flags);
}
{
struct dma_debug_entry *entry;
- if (unlikely(global_disable))
+ if (unlikely(!initialized || global_disable))
return;
- if (unlikely(dma_mapping_error(dev, dma_addr)))
+ if (dma_mapping_error(dev, dma_addr))
return;
entry = dma_entry_alloc();
entry->dev_addr = dma_addr;
entry->size = size;
entry->direction = direction;
+ entry->map_err_type = MAP_ERR_NOT_CHECKED;
if (map_single)
entry->type = dma_debug_single;
}
EXPORT_SYMBOL(debug_dma_map_page);
+void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ struct dma_debug_entry ref;
+ struct dma_debug_entry *entry;
+ struct hash_bucket *bucket;
+ unsigned long flags;
+
+ if (unlikely(!initialized || global_disable))
+ return;
+
+ ref.dev = dev;
+ ref.dev_addr = dma_addr;
+ bucket = get_hash_bucket(&ref, &flags);
+
+ list_for_each_entry(entry, &bucket->list, list) {
+ if (!exact_match(&ref, entry))
+ continue;
+
+ /*
+ * The same physical address can be mapped multiple
+ * times. Without a hardware IOMMU this results in the
+ * same device addresses being put into the dma-debug
+ * hash multiple times too. This can result in false
+ * positives being reported. Therefore we implement a
+ * best-fit algorithm here which updates the first entry
+ * from the hash which fits the reference value and is
+ * not currently listed as being checked.
+ */
+ if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
+ entry->map_err_type = MAP_ERR_CHECKED;
+ break;
+ }
+ }
+
+ put_hash_bucket(bucket, &flags);
+}
+EXPORT_SYMBOL(debug_dma_mapping_error);
+
void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
size_t size, int direction, bool map_single)
{
.direction = direction,
};
- if (unlikely(global_disable))
+ if (unlikely(!initialized || global_disable))
return;
if (map_single)
struct scatterlist *s;
int i;
- if (unlikely(global_disable))
+ if (unlikely(!initialized || global_disable))
return;
for_each_sg(sg, s, mapped_ents, i) {
struct scatterlist *s;
int mapped_ents = 0, i;
- if (unlikely(global_disable))
+ if (unlikely(!initialized || global_disable))
return;
for_each_sg(sglist, s, nelems, i) {
{
struct dma_debug_entry *entry;
- if (unlikely(global_disable))
+ if (unlikely(!initialized || global_disable))
return;
if (unlikely(virt == NULL))
.direction = DMA_BIDIRECTIONAL,
};
- if (unlikely(global_disable))
+ if (unlikely(!initialized || global_disable))
return;
check_unmap(&ref);
{
struct dma_debug_entry ref;
- if (unlikely(global_disable))
+ if (unlikely(!initialized || global_disable))
return;
ref.type = dma_debug_single;
{
struct dma_debug_entry ref;
- if (unlikely(global_disable))
+ if (unlikely(!initialized || global_disable))
return;
ref.type = dma_debug_single;
{
struct dma_debug_entry ref;
- if (unlikely(global_disable))
+ if (unlikely(!initialized || global_disable))
return;
ref.type = dma_debug_single;
{
struct dma_debug_entry ref;
- if (unlikely(global_disable))
+ if (unlikely(!initialized || global_disable))
return;
ref.type = dma_debug_single;
struct scatterlist *s;
int mapped_ents = 0, i;
- if (unlikely(global_disable))
+ if (unlikely(!initialized || global_disable))
return;
for_each_sg(sg, s, nelems, i) {
struct scatterlist *s;
int mapped_ents = 0, i;
- if (unlikely(global_disable))
+ if (unlikely(!initialized || global_disable))
return;
for_each_sg(sg, s, nelems, i) {