]> nv-tegra.nvidia Code Review - linux-3.10.git/blob - lib/dma-debug.c
[media] v4l: vb2-dma-contig: fix vb2_get_vma()
[linux-3.10.git] / lib / dma-debug.c
1 /*
2  * Copyright (C) 2008 Advanced Micro Devices, Inc.
3  *
4  * Author: Joerg Roedel <joerg.roedel@amd.com>
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 as published
8  * by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
18  */
19
20 #include <linux/scatterlist.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/stacktrace.h>
23 #include <linux/dma-debug.h>
24 #include <linux/spinlock.h>
25 #include <linux/debugfs.h>
26 #include <linux/uaccess.h>
27 #include <linux/export.h>
28 #include <linux/device.h>
29 #include <linux/types.h>
30 #include <linux/sched.h>
31 #include <linux/ctype.h>
32 #include <linux/list.h>
33 #include <linux/slab.h>
34
35 #include <asm/sections.h>
36
37 #define HASH_SIZE       1024ULL
38 #define HASH_FN_SHIFT   13
39 #define HASH_FN_MASK    (HASH_SIZE - 1)
40
41 enum {
42         dma_debug_single,
43         dma_debug_page,
44         dma_debug_sg,
45         dma_debug_coherent,
46 };
47
48 enum map_err_types {
49         MAP_ERR_CHECK_NOT_APPLICABLE,
50         MAP_ERR_NOT_CHECKED,
51         MAP_ERR_CHECKED,
52 };
53
54 #define DMA_DEBUG_STACKTRACE_ENTRIES 5
55
56 struct dma_debug_entry {
57         struct list_head list;
58         struct device    *dev;
59         int              type;
60         phys_addr_t      paddr;
61         u64              dev_addr;
62         u64              size;
63         int              direction;
64         int              sg_call_ents;
65         int              sg_mapped_ents;
66         enum map_err_types  map_err_type;
67 #ifdef CONFIG_STACKTRACE
68         struct           stack_trace stacktrace;
69         unsigned long    st_entries[DMA_DEBUG_STACKTRACE_ENTRIES];
70 #endif
71 };
72
73 typedef bool (*match_fn)(struct dma_debug_entry *, struct dma_debug_entry *);
74
75 struct hash_bucket {
76         struct list_head list;
77         spinlock_t lock;
78 } ____cacheline_aligned_in_smp;
79
80 /* Hash list to save the allocated dma addresses */
81 static struct hash_bucket dma_entry_hash[HASH_SIZE];
82 /* List of pre-allocated dma_debug_entry's */
83 static LIST_HEAD(free_entries);
84 /* Lock for the list above */
85 static DEFINE_SPINLOCK(free_entries_lock);
86
87 /* Global disable flag - will be set in case of an error */
88 static u32 global_disable __read_mostly;
89 /*
90  * The global_disable flag can be set in kernel command line, and
91  * dma_debug_init() is called much later than that. Mappings can happen before
92  * the init function is called, and thus no memory has been reserved for the
93  * entries. This out-of-memory situation flips enables the global_disable flag,
94  * which cannot then be enabled again. Use another flag for skipping the
95  * tracking if init hasn't been done yet.
96  */
97 static bool initialized __read_mostly;
98
99 /* Global error count */
100 static u32 error_count;
101
102 /* Global error show enable*/
103 static u32 show_all_errors __read_mostly;
104 /* Number of errors to show */
105 static u32 show_num_errors = 1;
106
107 static u32 num_free_entries;
108 static u32 min_free_entries;
109 static u32 nr_total_entries;
110
111 /* number of preallocated entries requested by kernel cmdline */
112 static u32 req_entries;
113
114 /* debugfs dentry's for the stuff above */
115 static struct dentry *dma_debug_dent        __read_mostly;
116 static struct dentry *global_disable_dent   __read_mostly;
117 static struct dentry *error_count_dent      __read_mostly;
118 static struct dentry *show_all_errors_dent  __read_mostly;
119 static struct dentry *show_num_errors_dent  __read_mostly;
120 static struct dentry *num_free_entries_dent __read_mostly;
121 static struct dentry *min_free_entries_dent __read_mostly;
122 static struct dentry *filter_dent           __read_mostly;
123
124 /* per-driver filter related state */
125
126 #define NAME_MAX_LEN    64
127
128 static char                  current_driver_name[NAME_MAX_LEN] __read_mostly;
129 static struct device_driver *current_driver                    __read_mostly;
130
131 static DEFINE_RWLOCK(driver_name_lock);
132
133 static const char *const maperr2str[] = {
134         [MAP_ERR_CHECK_NOT_APPLICABLE] = "dma map error check not applicable",
135         [MAP_ERR_NOT_CHECKED] = "dma map error not checked",
136         [MAP_ERR_CHECKED] = "dma map error checked",
137 };
138
139 static const char *type2name[4] = { "single", "page",
140                                     "scather-gather", "coherent" };
141
142 static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
143                                    "DMA_FROM_DEVICE", "DMA_NONE" };
144
145 /* dma statistics per device */
146 struct dma_dev_info {
147         struct list_head list;
148         struct device *dev;
149         spinlock_t lock;        /* Protects dma_dev_info itself */
150
151         int current_allocs;
152         int total_allocs;
153         int max_allocs;
154
155         int current_alloc_size;
156         int total_alloc_size;
157         int max_alloc_size;
158 };
159
160 static LIST_HEAD(dev_info_list);
161 static DEFINE_SPINLOCK(dev_info_lock); /* Protects dev_info_list */
162
163 /*
164  * The access to some variables in this macro is racy. We can't use atomic_t
165  * here because all these variables are exported to debugfs. Some of them even
166  * writeable. This is also the reason why a lock won't help much. But anyway,
167  * the races are no big deal. Here is why:
168  *
169  *   error_count: the addition is racy, but the worst thing that can happen is
170  *                that we don't count some errors
171  *   show_num_errors: the subtraction is racy. Also no big deal because in
172  *                    worst case this will result in one warning more in the
173  *                    system log than the user configured. This variable is
174  *                    writeable via debugfs.
175  */
176 static inline void dump_entry_trace(struct dma_debug_entry *entry)
177 {
178 #ifdef CONFIG_STACKTRACE
179         if (entry) {
180                 pr_warning("Mapped at:\n");
181                 print_stack_trace(&entry->stacktrace, 0);
182         }
183 #endif
184 }
185
186 static bool driver_filter(struct device *dev)
187 {
188         struct device_driver *drv;
189         unsigned long flags;
190         bool ret;
191
192         /* driver filter off */
193         if (likely(!current_driver_name[0]))
194                 return true;
195
196         /* driver filter on and initialized */
197         if (current_driver && dev && dev->driver == current_driver)
198                 return true;
199
200         /* driver filter on, but we can't filter on a NULL device... */
201         if (!dev)
202                 return false;
203
204         if (current_driver || !current_driver_name[0])
205                 return false;
206
207         /* driver filter on but not yet initialized */
208         drv = dev->driver;
209         if (!drv)
210                 return false;
211
212         /* lock to protect against change of current_driver_name */
213         read_lock_irqsave(&driver_name_lock, flags);
214
215         ret = false;
216         if (drv->name &&
217             strncmp(current_driver_name, drv->name, NAME_MAX_LEN - 1) == 0) {
218                 current_driver = drv;
219                 ret = true;
220         }
221
222         read_unlock_irqrestore(&driver_name_lock, flags);
223
224         return ret;
225 }
226
227 #define err_printk(dev, entry, format, arg...) do {                     \
228                 error_count += 1;                                       \
229                 if (driver_filter(dev) &&                               \
230                     (show_all_errors || show_num_errors > 0)) {         \
231                         WARN(1, "%s %s: " format,                       \
232                              dev ? dev_driver_string(dev) : "NULL",     \
233                              dev ? dev_name(dev) : "NULL", ## arg);     \
234                         dump_entry_trace(entry);                        \
235                 }                                                       \
236                 if (!show_all_errors && show_num_errors > 0)            \
237                         show_num_errors -= 1;                           \
238         } while (0);
239
240 /*
241  * Hash related functions
242  *
243  * Every DMA-API request is saved into a struct dma_debug_entry. To
244  * have quick access to these structs they are stored into a hash.
245  */
246 static int hash_fn(struct dma_debug_entry *entry)
247 {
248         /*
249          * Hash function is based on the dma address.
250          * We use bits 20-27 here as the index into the hash
251          */
252         return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK;
253 }
254
255 /*
256  * Request exclusive access to a hash bucket for a given dma_debug_entry.
257  */
258 static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry,
259                                            unsigned long *flags)
260 {
261         int idx = hash_fn(entry);
262         unsigned long __flags;
263
264         spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags);
265         *flags = __flags;
266         return &dma_entry_hash[idx];
267 }
268
269 /*
270  * Give up exclusive access to the hash bucket
271  */
272 static void put_hash_bucket(struct hash_bucket *bucket,
273                             unsigned long *flags)
274 {
275         unsigned long __flags = *flags;
276
277         spin_unlock_irqrestore(&bucket->lock, __flags);
278 }
279
280 static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b)
281 {
282         return ((a->dev_addr == b->dev_addr) &&
283                 (a->dev == b->dev)) ? true : false;
284 }
285
286 static bool containing_match(struct dma_debug_entry *a,
287                              struct dma_debug_entry *b)
288 {
289         if (a->dev != b->dev)
290                 return false;
291
292         if ((b->dev_addr <= a->dev_addr) &&
293             ((b->dev_addr + b->size) >= (a->dev_addr + a->size)))
294                 return true;
295
296         return false;
297 }
298
299 /*
300  * Search a given entry in the hash bucket list
301  */
302 static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket,
303                                                   struct dma_debug_entry *ref,
304                                                   match_fn match)
305 {
306         struct dma_debug_entry *entry, *ret = NULL;
307         int matches = 0, match_lvl, last_lvl = -1;
308
309         list_for_each_entry(entry, &bucket->list, list) {
310                 if (!match(ref, entry))
311                         continue;
312
313                 /*
314                  * Some drivers map the same physical address multiple
315                  * times. Without a hardware IOMMU this results in the
316                  * same device addresses being put into the dma-debug
317                  * hash multiple times too. This can result in false
318                  * positives being reported. Therefore we implement a
319                  * best-fit algorithm here which returns the entry from
320                  * the hash which fits best to the reference value
321                  * instead of the first-fit.
322                  */
323                 matches += 1;
324                 match_lvl = 0;
325                 entry->size         == ref->size         ? ++match_lvl : 0;
326                 entry->type         == ref->type         ? ++match_lvl : 0;
327                 entry->direction    == ref->direction    ? ++match_lvl : 0;
328                 entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0;
329
330                 if (match_lvl == 4) {
331                         /* perfect-fit - return the result */
332                         return entry;
333                 } else if (match_lvl > last_lvl) {
334                         /*
335                          * We found an entry that fits better then the
336                          * previous one or it is the 1st match.
337                          */
338                         last_lvl = match_lvl;
339                         ret      = entry;
340                 }
341         }
342
343         /*
344          * If we have multiple matches but no perfect-fit, just return
345          * NULL.
346          */
347         ret = (matches == 1) ? ret : NULL;
348
349         return ret;
350 }
351
352 static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket,
353                                                  struct dma_debug_entry *ref)
354 {
355         return __hash_bucket_find(bucket, ref, exact_match);
356 }
357
358 static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket,
359                                                    struct dma_debug_entry *ref,
360                                                    unsigned long *flags)
361 {
362
363         unsigned int max_range = dma_get_max_seg_size(ref->dev);
364         struct dma_debug_entry *entry, index = *ref;
365         unsigned int range = 0;
366
367         while (range <= max_range) {
368                 entry = __hash_bucket_find(*bucket, &index, containing_match);
369
370                 if (entry)
371                         return entry;
372
373                 /*
374                  * Nothing found, go back a hash bucket
375                  */
376                 put_hash_bucket(*bucket, flags);
377                 range          += (1 << HASH_FN_SHIFT);
378                 index.dev_addr -= (1 << HASH_FN_SHIFT);
379                 *bucket = get_hash_bucket(&index, flags);
380         }
381
382         return NULL;
383 }
384
385 /*
386  * Add an entry to a hash bucket
387  */
388 static void hash_bucket_add(struct hash_bucket *bucket,
389                             struct dma_debug_entry *entry)
390 {
391         list_add_tail(&entry->list, &bucket->list);
392 }
393
394 /*
395  * Remove entry from a hash bucket list
396  */
397 static void hash_bucket_del(struct dma_debug_entry *entry)
398 {
399         list_del(&entry->list);
400 }
401
402 /*
403  * Dump mapping entries for debugging purposes
404  */
405 void debug_dma_dump_mappings(struct device *dev)
406 {
407         int idx;
408
409         for (idx = 0; idx < HASH_SIZE; idx++) {
410                 struct hash_bucket *bucket = &dma_entry_hash[idx];
411                 struct dma_debug_entry *entry;
412                 unsigned long flags;
413
414                 spin_lock_irqsave(&bucket->lock, flags);
415
416                 list_for_each_entry(entry, &bucket->list, list) {
417                         if (!dev || dev == entry->dev) {
418                                 dev_info(entry->dev,
419                                          "%s idx %d P=%Lx D=%Lx L=%Lx %s %s\n",
420                                          type2name[entry->type], idx,
421                                          (unsigned long long)entry->paddr,
422                                          entry->dev_addr, entry->size,
423                                          dir2name[entry->direction],
424                                          maperr2str[entry->map_err_type]);
425                         }
426                 }
427
428                 spin_unlock_irqrestore(&bucket->lock, flags);
429         }
430 }
431 EXPORT_SYMBOL(debug_dma_dump_mappings);
432
433 /*
434  * device info snapshot updating functions
435  */
436 static void ____dev_info_incr(struct dma_dev_info *info,
437                               struct dma_debug_entry *entry)
438 {
439         unsigned long flags;
440
441         spin_lock_irqsave(&info->lock, flags);
442
443         info->current_allocs++;
444         info->total_allocs++;
445         if (info->current_allocs > info->max_allocs)
446                 info->max_allocs = info->current_allocs;
447
448         info->current_alloc_size += entry->size;
449         info->total_alloc_size += entry->size;
450         if (info->current_alloc_size > info->max_alloc_size)
451                 info->max_alloc_size = info->current_alloc_size;
452
453         spin_unlock_irqrestore(&info->lock, flags);
454 }
455
456 static void ____dev_info_decr(struct dma_dev_info *info,
457                               struct dma_debug_entry *entry)
458 {
459         unsigned long flags;
460
461         spin_lock_irqsave(&info->lock, flags);
462
463         info->current_allocs--;
464         info->current_alloc_size -= entry->size;
465
466         spin_unlock_irqrestore(&info->lock, flags);
467 }
468
469 static void __dev_info_fn(struct dma_debug_entry *entry,
470          void (*fn)(struct dma_dev_info *, struct dma_debug_entry *))
471 {
472         struct dma_dev_info *info;
473         unsigned long flags;
474
475         spin_lock_irqsave(&dev_info_lock, flags);
476
477         list_for_each_entry(info, &dev_info_list, list)
478                 if (info->dev == entry->dev)
479                         goto found;
480
481         info = kzalloc(sizeof(*info), GFP_KERNEL);
482         if (!info) {
483                 dev_err(entry->dev, "Out of memory at %s\n", __func__);
484                 spin_unlock_irqrestore(&dev_info_lock, flags);
485                 return;
486         }
487
488         spin_lock_init(&info->lock);
489         info->dev = entry->dev;
490         list_add(&info->list, &dev_info_list);
491 found:
492         spin_unlock_irqrestore(&dev_info_lock, flags);
493         fn(info, entry);
494 }
495
496 static inline void dev_info_alloc(struct dma_debug_entry *entry)
497 {
498         __dev_info_fn(entry, ____dev_info_incr);
499 }
500
501 static inline void dev_info_free(struct dma_debug_entry *entry)
502 {
503         __dev_info_fn(entry, ____dev_info_decr);
504 }
505
506 /*
507  * Wrapper function for adding an entry to the hash.
508  * This function takes care of locking itself.
509  */
510 static void add_dma_entry(struct dma_debug_entry *entry)
511 {
512         struct hash_bucket *bucket;
513         unsigned long flags;
514
515         bucket = get_hash_bucket(entry, &flags);
516         hash_bucket_add(bucket, entry);
517         put_hash_bucket(bucket, &flags);
518
519         dev_info_alloc(entry);
520 }
521
522 static struct dma_debug_entry *__dma_entry_alloc(void)
523 {
524         struct dma_debug_entry *entry;
525
526         entry = list_entry(free_entries.next, struct dma_debug_entry, list);
527         list_del(&entry->list);
528         memset(entry, 0, sizeof(*entry));
529
530         num_free_entries -= 1;
531         if (num_free_entries < min_free_entries)
532                 min_free_entries = num_free_entries;
533
534         return entry;
535 }
536
537 /* struct dma_entry allocator
538  *
539  * The next two functions implement the allocator for
540  * struct dma_debug_entries.
541  */
542 static struct dma_debug_entry *dma_entry_alloc(void)
543 {
544         struct dma_debug_entry *entry;
545         unsigned long flags;
546
547         spin_lock_irqsave(&free_entries_lock, flags);
548
549         if (list_empty(&free_entries)) {
550                 pr_err("DMA-API: debugging out of memory - disabling\n");
551                 global_disable = true;
552                 spin_unlock_irqrestore(&free_entries_lock, flags);
553                 return NULL;
554         }
555
556         entry = __dma_entry_alloc();
557
558         spin_unlock_irqrestore(&free_entries_lock, flags);
559
560 #ifdef CONFIG_STACKTRACE
561         entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES;
562         entry->stacktrace.entries = entry->st_entries;
563         entry->stacktrace.skip = 2;
564         save_stack_trace(&entry->stacktrace);
565 #endif
566
567         return entry;
568 }
569
570 static void dma_entry_free(struct dma_debug_entry *entry)
571 {
572         unsigned long flags;
573
574         /*
575          * add to beginning of the list - this way the entries are
576          * more likely cache hot when they are reallocated.
577          */
578         spin_lock_irqsave(&free_entries_lock, flags);
579         list_add(&entry->list, &free_entries);
580         num_free_entries += 1;
581         spin_unlock_irqrestore(&free_entries_lock, flags);
582 }
583
584 int dma_debug_resize_entries(u32 num_entries)
585 {
586         int i, delta, ret = 0;
587         unsigned long flags;
588         struct dma_debug_entry *entry;
589         LIST_HEAD(tmp);
590
591         spin_lock_irqsave(&free_entries_lock, flags);
592
593         if (nr_total_entries < num_entries) {
594                 delta = num_entries - nr_total_entries;
595
596                 spin_unlock_irqrestore(&free_entries_lock, flags);
597
598                 for (i = 0; i < delta; i++) {
599                         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
600                         if (!entry)
601                                 break;
602
603                         list_add_tail(&entry->list, &tmp);
604                 }
605
606                 spin_lock_irqsave(&free_entries_lock, flags);
607
608                 list_splice(&tmp, &free_entries);
609                 nr_total_entries += i;
610                 num_free_entries += i;
611         } else {
612                 delta = nr_total_entries - num_entries;
613
614                 for (i = 0; i < delta && !list_empty(&free_entries); i++) {
615                         entry = __dma_entry_alloc();
616                         kfree(entry);
617                 }
618
619                 nr_total_entries -= i;
620         }
621
622         if (nr_total_entries != num_entries)
623                 ret = 1;
624
625         spin_unlock_irqrestore(&free_entries_lock, flags);
626
627         return ret;
628 }
629 EXPORT_SYMBOL(dma_debug_resize_entries);
630
631 /*
632  * DMA-API debugging init code
633  *
634  * The init code does two things:
635  *   1. Initialize core data structures
636  *   2. Preallocate a given number of dma_debug_entry structs
637  */
638
639 static int prealloc_memory(u32 num_entries)
640 {
641         struct dma_debug_entry *entry, *next_entry;
642         int i;
643
644         for (i = 0; i < num_entries; ++i) {
645                 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
646                 if (!entry)
647                         goto out_err;
648
649                 list_add_tail(&entry->list, &free_entries);
650         }
651
652         num_free_entries = num_entries;
653         min_free_entries = num_entries;
654
655         pr_info("DMA-API: preallocated %d debug entries\n", num_entries);
656
657         return 0;
658
659 out_err:
660
661         list_for_each_entry_safe(entry, next_entry, &free_entries, list) {
662                 list_del(&entry->list);
663                 kfree(entry);
664         }
665
666         return -ENOMEM;
667 }
668
669 static ssize_t filter_read(struct file *file, char __user *user_buf,
670                            size_t count, loff_t *ppos)
671 {
672         char buf[NAME_MAX_LEN + 1];
673         unsigned long flags;
674         int len;
675
676         if (!current_driver_name[0])
677                 return 0;
678
679         /*
680          * We can't copy to userspace directly because current_driver_name can
681          * only be read under the driver_name_lock with irqs disabled. So
682          * create a temporary copy first.
683          */
684         read_lock_irqsave(&driver_name_lock, flags);
685         len = scnprintf(buf, NAME_MAX_LEN + 1, "%s\n", current_driver_name);
686         read_unlock_irqrestore(&driver_name_lock, flags);
687
688         return simple_read_from_buffer(user_buf, count, ppos, buf, len);
689 }
690
691 static ssize_t filter_write(struct file *file, const char __user *userbuf,
692                             size_t count, loff_t *ppos)
693 {
694         char buf[NAME_MAX_LEN];
695         unsigned long flags;
696         size_t len;
697         int i;
698
699         /*
700          * We can't copy from userspace directly. Access to
701          * current_driver_name is protected with a write_lock with irqs
702          * disabled. Since copy_from_user can fault and may sleep we
703          * need to copy to temporary buffer first
704          */
705         len = min(count, (size_t)(NAME_MAX_LEN - 1));
706         if (copy_from_user(buf, userbuf, len))
707                 return -EFAULT;
708
709         buf[len] = 0;
710
711         write_lock_irqsave(&driver_name_lock, flags);
712
713         /*
714          * Now handle the string we got from userspace very carefully.
715          * The rules are:
716          *         - only use the first token we got
717          *         - token delimiter is everything looking like a space
718          *           character (' ', '\n', '\t' ...)
719          *
720          */
721         if (!isalnum(buf[0])) {
722                 /*
723                  * If the first character userspace gave us is not
724                  * alphanumerical then assume the filter should be
725                  * switched off.
726                  */
727                 if (current_driver_name[0])
728                         pr_info("DMA-API: switching off dma-debug driver filter\n");
729                 current_driver_name[0] = 0;
730                 current_driver = NULL;
731                 goto out_unlock;
732         }
733
734         /*
735          * Now parse out the first token and use it as the name for the
736          * driver to filter for.
737          */
738         for (i = 0; i < NAME_MAX_LEN - 1; ++i) {
739                 current_driver_name[i] = buf[i];
740                 if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0)
741                         break;
742         }
743         current_driver_name[i] = 0;
744         current_driver = NULL;
745
746         pr_info("DMA-API: enable driver filter for driver [%s]\n",
747                 current_driver_name);
748
749 out_unlock:
750         write_unlock_irqrestore(&driver_name_lock, flags);
751
752         return count;
753 }
754
755 static inline void seq_print_ip_sym(struct seq_file *s, unsigned long ip)
756 {
757         seq_printf(s, "[<%p>] %pS\n", (void *)ip, (void *)ip);
758 }
759
760 void seq_print_trace(struct seq_file *s, struct stack_trace *trace)
761 {
762         int i;
763
764         if (WARN_ON(!trace->entries))
765                 return;
766
767         for (i = trace->skip; i < trace->nr_entries; i++)
768                 seq_print_ip_sym(s, trace->entries[i]);
769 }
770
771 /*
772  * Print all map entries just in the order they are stored. We assume that the
773  * user will be able to parse this later anyway. Detailed output includes stack
774  * traces of allocations.
775  */
776 void seq_print_dma_mappings(struct seq_file *s, int detail)
777 {
778         int idx;
779
780         for (idx = 0; idx < HASH_SIZE; idx++) {
781                 struct hash_bucket *bucket = &dma_entry_hash[idx];
782                 struct dma_debug_entry *entry;
783                 unsigned long flags;
784
785                 spin_lock_irqsave(&bucket->lock, flags);
786
787                 list_for_each_entry(entry, &bucket->list, list) {
788                         seq_printf(s,
789                                    "    %s %s idx %d P=%llx D=%llx L=%llx %s A=%s\n",
790                                    dev_name(entry->dev),
791                                    type2name[entry->type], idx,
792                                    (u64)entry->paddr,
793                                    entry->dev_addr, entry->size,
794                                    dir2name[entry->direction],
795                                    debug_dma_platformdata(entry->dev));
796
797                         if (detail)
798                                 seq_print_trace(s, &entry->stacktrace);
799                 }
800
801                 spin_unlock_irqrestore(&bucket->lock, flags);
802         }
803 }
804
805 void __weak dma_debugfs_platform_info(struct dentry *dent)
806 {
807 }
808
809 static int _dump_allocs(struct seq_file *s, void *data)
810 {
811         int detail = (int)s->private;
812
813         seq_print_dma_mappings(s, detail);
814         return 0;
815 }
816
817 static int _dump_dev_info(struct seq_file *s, void *data)
818 {
819         struct dma_dev_info *i;
820         unsigned long flags;
821
822         spin_lock_irqsave(&dev_info_lock, flags);
823
824         list_for_each_entry(i, &dev_info_list, list)
825                 seq_printf(s,
826                            "dev=%s curallocs=%d totallocs=%d maxallocs=%d cursize=%d totsize=%d maxsize=%d\n",
827                            dev_name(i->dev), i->current_allocs, i->total_allocs,
828                            i->max_allocs, i->current_alloc_size,
829                            i->total_alloc_size, i->max_alloc_size);
830
831         spin_unlock_irqrestore(&dev_info_lock, flags);
832         return 0;
833 }
834
835 #define DEFINE_DEBUGFS(__name, __func, __data)                          \
836 static int __name ## _open(struct inode *inode, struct file *file)      \
837 {                                                                       \
838         return single_open(file, __func, __data);                       \
839 }                                                                       \
840 static const struct file_operations __name ## _fops = {                 \
841         .open           = __name ## _open,                              \
842         .read           = seq_read,                                     \
843         .llseek         = seq_lseek,                                    \
844         .release        = single_release,                               \
845 }
846
847 DEFINE_DEBUGFS(_dump_allocs, _dump_allocs, NULL);
848 DEFINE_DEBUGFS(_dump_allocs_detail, _dump_allocs, (void *)1);
849 DEFINE_DEBUGFS(_dump_dev_info, _dump_dev_info, NULL);
850 #undef DEFINE_DEBUGFS
851
852 static int map_dump_debug_fs_init(void)
853 {
854 #define CREATE_FILE(name) \
855         debugfs_create_file(#name, S_IRUGO, \
856                                 dma_debug_dent, NULL, \
857                                 &_##name##_fops)
858
859         if (!CREATE_FILE(dump_allocs))
860                 return -ENOMEM;
861
862         if (!CREATE_FILE(dump_allocs_detail))
863                 return -ENOMEM;
864
865         if (!CREATE_FILE(dump_dev_info))
866                 return -ENOMEM;
867
868 #undef CREATE_FILE
869
870         dma_debugfs_platform_info(dma_debug_dent);
871         return 0;
872 }
873
874 static const struct file_operations filter_fops = {
875         .read  = filter_read,
876         .write = filter_write,
877         .llseek = default_llseek,
878 };
879
880 static int dma_debug_fs_init(void)
881 {
882         dma_debug_dent = debugfs_create_dir("dma-api", NULL);
883         if (!dma_debug_dent) {
884                 pr_err("DMA-API: can not create debugfs directory\n");
885                 return -ENOMEM;
886         }
887
888         global_disable_dent = debugfs_create_bool("disabled", 0444,
889                         dma_debug_dent,
890                         &global_disable);
891         if (!global_disable_dent)
892                 goto out_err;
893
894         error_count_dent = debugfs_create_u32("error_count", 0444,
895                         dma_debug_dent, &error_count);
896         if (!error_count_dent)
897                 goto out_err;
898
899         show_all_errors_dent = debugfs_create_u32("all_errors", 0644,
900                         dma_debug_dent,
901                         &show_all_errors);
902         if (!show_all_errors_dent)
903                 goto out_err;
904
905         show_num_errors_dent = debugfs_create_u32("num_errors", 0644,
906                         dma_debug_dent,
907                         &show_num_errors);
908         if (!show_num_errors_dent)
909                 goto out_err;
910
911         num_free_entries_dent = debugfs_create_u32("num_free_entries", 0444,
912                         dma_debug_dent,
913                         &num_free_entries);
914         if (!num_free_entries_dent)
915                 goto out_err;
916
917         min_free_entries_dent = debugfs_create_u32("min_free_entries", 0444,
918                         dma_debug_dent,
919                         &min_free_entries);
920         if (!min_free_entries_dent)
921                 goto out_err;
922
923         filter_dent = debugfs_create_file("driver_filter", 0644,
924                                           dma_debug_dent, NULL, &filter_fops);
925         if (!filter_dent)
926                 goto out_err;
927
928         if (map_dump_debug_fs_init())
929                 goto out_err;
930
931         return 0;
932
933 out_err:
934         debugfs_remove_recursive(dma_debug_dent);
935
936         return -ENOMEM;
937 }
938
939 static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry)
940 {
941         struct dma_debug_entry *entry;
942         unsigned long flags;
943         int count = 0, i;
944
945         local_irq_save(flags);
946
947         for (i = 0; i < HASH_SIZE; ++i) {
948                 spin_lock(&dma_entry_hash[i].lock);
949                 list_for_each_entry(entry, &dma_entry_hash[i].list, list) {
950                         if (entry->dev == dev) {
951                                 count += 1;
952                                 *out_entry = entry;
953                         }
954                 }
955                 spin_unlock(&dma_entry_hash[i].lock);
956         }
957
958         local_irq_restore(flags);
959
960         return count;
961 }
962
963 static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data)
964 {
965         struct device *dev = data;
966         struct dma_debug_entry *uninitialized_var(entry);
967         int count;
968
969         if (!initialized || global_disable)
970                 return 0;
971
972         switch (action) {
973         case BUS_NOTIFY_UNBOUND_DRIVER:
974                 count = device_dma_allocations(dev, &entry);
975                 if (count == 0)
976                         break;
977                 err_printk(dev, entry, "DMA-API: device driver has pending "
978                                 "DMA allocations while released from device "
979                                 "[count=%d]\n"
980                                 "One of leaked entries details: "
981                                 "[device address=0x%016llx] [size=%llu bytes] "
982                                 "[mapped with %s] [mapped as %s]\n",
983                         count, entry->dev_addr, entry->size,
984                         dir2name[entry->direction], type2name[entry->type]);
985                 break;
986         default:
987                 break;
988         }
989
990         return 0;
991 }
992
993 void dma_debug_add_bus(struct bus_type *bus)
994 {
995         struct notifier_block *nb;
996
997         if (!initialized || global_disable)
998                 return;
999
1000         nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
1001         if (nb == NULL) {
1002                 pr_err("dma_debug_add_bus: out of memory\n");
1003                 return;
1004         }
1005
1006         nb->notifier_call = dma_debug_device_change;
1007
1008         bus_register_notifier(bus, nb);
1009 }
1010
1011 /*
1012  * Let the architectures decide how many entries should be preallocated.
1013  */
1014 void dma_debug_init(u32 num_entries)
1015 {
1016         int i;
1017
1018         if (global_disable)
1019                 return;
1020
1021         for (i = 0; i < HASH_SIZE; ++i) {
1022                 INIT_LIST_HEAD(&dma_entry_hash[i].list);
1023                 spin_lock_init(&dma_entry_hash[i].lock);
1024         }
1025
1026         if (dma_debug_fs_init() != 0) {
1027                 pr_err("DMA-API: error creating debugfs entries - disabling\n");
1028                 global_disable = true;
1029
1030                 return;
1031         }
1032
1033         if (req_entries)
1034                 num_entries = req_entries;
1035
1036         if (prealloc_memory(num_entries) != 0) {
1037                 pr_err("DMA-API: debugging out of memory error - disabled\n");
1038                 global_disable = true;
1039
1040                 return;
1041         }
1042
1043         nr_total_entries = num_free_entries;
1044
1045         pr_info("DMA-API: debugging enabled by kernel config\n");
1046         initialized = true;
1047 }
1048
1049 static __init int dma_debug_cmdline(char *str)
1050 {
1051         if (!str)
1052                 return -EINVAL;
1053
1054         if (strncmp(str, "off", 3) == 0) {
1055                 pr_info("DMA-API: debugging disabled on kernel command line\n");
1056                 global_disable = true;
1057         }
1058
1059         return 0;
1060 }
1061
1062 static __init int dma_debug_entries_cmdline(char *str)
1063 {
1064         int res;
1065
1066         if (!str)
1067                 return -EINVAL;
1068
1069         res = get_option(&str, &req_entries);
1070
1071         if (!res)
1072                 req_entries = 0;
1073
1074         return 0;
1075 }
1076
1077 __setup("dma_debug=", dma_debug_cmdline);
1078 __setup("dma_debug_entries=", dma_debug_entries_cmdline);
1079
1080 static void check_unmap(struct dma_debug_entry *ref)
1081 {
1082         struct dma_debug_entry *entry;
1083         struct hash_bucket *bucket;
1084         unsigned long flags;
1085
1086         bucket = get_hash_bucket(ref, &flags);
1087         entry = bucket_find_exact(bucket, ref);
1088
1089         if (!entry) {
1090                 /* must drop lock before calling dma_mapping_error */
1091                 put_hash_bucket(bucket, &flags);
1092
1093                 if (dma_mapping_error(ref->dev, ref->dev_addr)) {
1094                         err_printk(ref->dev, NULL,
1095                                    "DMA-API: device driver tries to free an "
1096                                    "invalid DMA memory address\n");
1097                 } else {
1098                         err_printk(ref->dev, NULL,
1099                                    "DMA-API: device driver tries to free DMA "
1100                                    "memory it has not allocated [device "
1101                                    "address=0x%016llx] [size=%llu bytes]\n",
1102                                    ref->dev_addr, ref->size);
1103                 }
1104                 return;
1105         }
1106
1107         if (ref->size != entry->size) {
1108                 err_printk(ref->dev, entry, "DMA-API: device driver frees "
1109                            "DMA memory with different size "
1110                            "[device address=0x%016llx] [map size=%llu bytes] "
1111                            "[unmap size=%llu bytes]\n",
1112                            ref->dev_addr, entry->size, ref->size);
1113         }
1114
1115         if (ref->type != entry->type) {
1116                 err_printk(ref->dev, entry, "DMA-API: device driver frees "
1117                            "DMA memory with wrong function "
1118                            "[device address=0x%016llx] [size=%llu bytes] "
1119                            "[mapped as %s] [unmapped as %s]\n",
1120                            ref->dev_addr, ref->size,
1121                            type2name[entry->type], type2name[ref->type]);
1122         } else if ((entry->type == dma_debug_coherent) &&
1123                    (ref->paddr != entry->paddr)) {
1124                 err_printk(ref->dev, entry, "DMA-API: device driver frees "
1125                            "DMA memory with different CPU address "
1126                            "[device address=0x%016llx] [size=%llu bytes] "
1127                            "[cpu alloc address=0x%016llx] "
1128                            "[cpu free address=0x%016llx]",
1129                            ref->dev_addr, ref->size,
1130                            (unsigned long long)entry->paddr,
1131                            (unsigned long long)ref->paddr);
1132         }
1133
1134         if (ref->sg_call_ents && ref->type == dma_debug_sg &&
1135             ref->sg_call_ents != entry->sg_call_ents) {
1136                 err_printk(ref->dev, entry, "DMA-API: device driver frees "
1137                            "DMA sg list with different entry count "
1138                            "[map count=%d] [unmap count=%d]\n",
1139                            entry->sg_call_ents, ref->sg_call_ents);
1140         }
1141
1142         /*
1143          * This may be no bug in reality - but most implementations of the
1144          * DMA API don't handle this properly, so check for it here
1145          */
1146         if (ref->direction != entry->direction) {
1147                 err_printk(ref->dev, entry, "DMA-API: device driver frees "
1148                            "DMA memory with different direction "
1149                            "[device address=0x%016llx] [size=%llu bytes] "
1150                            "[mapped with %s] [unmapped with %s]\n",
1151                            ref->dev_addr, ref->size,
1152                            dir2name[entry->direction],
1153                            dir2name[ref->direction]);
1154         }
1155
1156         if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
1157                 err_printk(ref->dev, entry,
1158                            "DMA-API: device driver failed to check map error"
1159                            "[device address=0x%016llx] [size=%llu bytes] "
1160                            "[mapped as %s]",
1161                            ref->dev_addr, ref->size,
1162                            type2name[entry->type]);
1163         }
1164
1165         dev_info_free(entry);
1166
1167         hash_bucket_del(entry);
1168         dma_entry_free(entry);
1169
1170         put_hash_bucket(bucket, &flags);
1171 }
1172
1173 static void check_for_stack(struct device *dev, void *addr)
1174 {
1175         if (object_is_on_stack(addr))
1176                 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
1177                                 "stack [addr=%p]\n", addr);
1178 }
1179
1180 static inline bool overlap(void *addr, unsigned long len, void *start, void *end)
1181 {
1182         unsigned long a1 = (unsigned long)addr;
1183         unsigned long b1 = a1 + len;
1184         unsigned long a2 = (unsigned long)start;
1185         unsigned long b2 = (unsigned long)end;
1186
1187         return !(b1 <= a2 || a1 >= b2);
1188 }
1189
1190 static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len)
1191 {
1192         if (overlap(addr, len, _text, _etext) ||
1193             overlap(addr, len, __start_rodata, __end_rodata))
1194                 err_printk(dev, NULL, "DMA-API: device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len);
1195 }
1196
1197 static void check_sync(struct device *dev,
1198                        struct dma_debug_entry *ref,
1199                        bool to_cpu)
1200 {
1201         struct dma_debug_entry *entry;
1202         struct hash_bucket *bucket;
1203         unsigned long flags;
1204
1205         bucket = get_hash_bucket(ref, &flags);
1206
1207         entry = bucket_find_contain(&bucket, ref, &flags);
1208
1209         if (!entry) {
1210                 err_printk(dev, NULL, "DMA-API: device driver tries "
1211                                 "to sync DMA memory it has not allocated "
1212                                 "[device address=0x%016llx] [size=%llu bytes]\n",
1213                                 (unsigned long long)ref->dev_addr, ref->size);
1214                 goto out;
1215         }
1216
1217         if (ref->size > entry->size) {
1218                 err_printk(dev, entry, "DMA-API: device driver syncs"
1219                                 " DMA memory outside allocated range "
1220                                 "[device address=0x%016llx] "
1221                                 "[allocation size=%llu bytes] "
1222                                 "[sync offset+size=%llu]\n",
1223                                 entry->dev_addr, entry->size,
1224                                 ref->size);
1225         }
1226
1227         if (entry->direction == DMA_BIDIRECTIONAL)
1228                 goto out;
1229
1230         if (ref->direction != entry->direction) {
1231                 err_printk(dev, entry, "DMA-API: device driver syncs "
1232                                 "DMA memory with different direction "
1233                                 "[device address=0x%016llx] [size=%llu bytes] "
1234                                 "[mapped with %s] [synced with %s]\n",
1235                                 (unsigned long long)ref->dev_addr, entry->size,
1236                                 dir2name[entry->direction],
1237                                 dir2name[ref->direction]);
1238         }
1239
1240         if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
1241                       !(ref->direction == DMA_TO_DEVICE))
1242                 err_printk(dev, entry, "DMA-API: device driver syncs "
1243                                 "device read-only DMA memory for cpu "
1244                                 "[device address=0x%016llx] [size=%llu bytes] "
1245                                 "[mapped with %s] [synced with %s]\n",
1246                                 (unsigned long long)ref->dev_addr, entry->size,
1247                                 dir2name[entry->direction],
1248                                 dir2name[ref->direction]);
1249
1250         if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
1251                        !(ref->direction == DMA_FROM_DEVICE))
1252                 err_printk(dev, entry, "DMA-API: device driver syncs "
1253                                 "device write-only DMA memory to device "
1254                                 "[device address=0x%016llx] [size=%llu bytes] "
1255                                 "[mapped with %s] [synced with %s]\n",
1256                                 (unsigned long long)ref->dev_addr, entry->size,
1257                                 dir2name[entry->direction],
1258                                 dir2name[ref->direction]);
1259
1260 out:
1261         put_hash_bucket(bucket, &flags);
1262 }
1263
1264 void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
1265                         size_t size, int direction, dma_addr_t dma_addr,
1266                         bool map_single)
1267 {
1268         struct dma_debug_entry *entry;
1269
1270         if (unlikely(!initialized || global_disable))
1271                 return;
1272
1273         if (dma_mapping_error(dev, dma_addr))
1274                 return;
1275
1276         entry = dma_entry_alloc();
1277         if (!entry)
1278                 return;
1279
1280         entry->dev       = dev;
1281         entry->type      = dma_debug_page;
1282         entry->paddr     = page_to_phys(page) + offset;
1283         entry->dev_addr  = dma_addr;
1284         entry->size      = size;
1285         entry->direction = direction;
1286         entry->map_err_type = MAP_ERR_NOT_CHECKED;
1287
1288         if (map_single)
1289                 entry->type = dma_debug_single;
1290
1291         if (!PageHighMem(page)) {
1292                 void *addr = page_address(page) + offset;
1293
1294                 check_for_stack(dev, addr);
1295                 check_for_illegal_area(dev, addr, size);
1296         }
1297
1298         add_dma_entry(entry);
1299 }
1300 EXPORT_SYMBOL(debug_dma_map_page);
1301
1302 void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
1303 {
1304         struct dma_debug_entry ref;
1305         struct dma_debug_entry *entry;
1306         struct hash_bucket *bucket;
1307         unsigned long flags;
1308
1309         if (unlikely(!initialized || global_disable))
1310                 return;
1311
1312         ref.dev = dev;
1313         ref.dev_addr = dma_addr;
1314         bucket = get_hash_bucket(&ref, &flags);
1315
1316         list_for_each_entry(entry, &bucket->list, list) {
1317                 if (!exact_match(&ref, entry))
1318                         continue;
1319
1320                 /*
1321                  * The same physical address can be mapped multiple
1322                  * times. Without a hardware IOMMU this results in the
1323                  * same device addresses being put into the dma-debug
1324                  * hash multiple times too. This can result in false
1325                  * positives being reported. Therefore we implement a
1326                  * best-fit algorithm here which updates the first entry
1327                  * from the hash which fits the reference value and is
1328                  * not currently listed as being checked.
1329                  */
1330                 if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
1331                         entry->map_err_type = MAP_ERR_CHECKED;
1332                         break;
1333                 }
1334         }
1335
1336         put_hash_bucket(bucket, &flags);
1337 }
1338 EXPORT_SYMBOL(debug_dma_mapping_error);
1339
1340 void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
1341                           size_t size, int direction, bool map_single)
1342 {
1343         struct dma_debug_entry ref = {
1344                 .type           = dma_debug_page,
1345                 .dev            = dev,
1346                 .dev_addr       = addr,
1347                 .size           = size,
1348                 .direction      = direction,
1349         };
1350
1351         if (unlikely(!initialized || global_disable))
1352                 return;
1353
1354         if (map_single)
1355                 ref.type = dma_debug_single;
1356
1357         check_unmap(&ref);
1358 }
1359 EXPORT_SYMBOL(debug_dma_unmap_page);
1360
1361 void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
1362                       int nents, int mapped_ents, int direction)
1363 {
1364         struct dma_debug_entry *entry;
1365         struct scatterlist *s;
1366         int i;
1367
1368         if (unlikely(!initialized || global_disable))
1369                 return;
1370
1371         for_each_sg(sg, s, mapped_ents, i) {
1372                 entry = dma_entry_alloc();
1373                 if (!entry)
1374                         return;
1375
1376                 entry->type           = dma_debug_sg;
1377                 entry->dev            = dev;
1378                 entry->paddr          = sg_phys(s);
1379                 entry->size           = sg_dma_len(s);
1380                 entry->dev_addr       = sg_dma_address(s);
1381                 entry->direction      = direction;
1382                 entry->sg_call_ents   = nents;
1383                 entry->sg_mapped_ents = mapped_ents;
1384
1385                 if (!PageHighMem(sg_page(s))) {
1386                         check_for_stack(dev, sg_virt(s));
1387                         check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s));
1388                 }
1389
1390                 add_dma_entry(entry);
1391         }
1392 }
1393 EXPORT_SYMBOL(debug_dma_map_sg);
1394
1395 static int get_nr_mapped_entries(struct device *dev,
1396                                  struct dma_debug_entry *ref)
1397 {
1398         struct dma_debug_entry *entry;
1399         struct hash_bucket *bucket;
1400         unsigned long flags;
1401         int mapped_ents;
1402
1403         bucket       = get_hash_bucket(ref, &flags);
1404         entry        = bucket_find_exact(bucket, ref);
1405         mapped_ents  = 0;
1406
1407         if (entry)
1408                 mapped_ents = entry->sg_mapped_ents;
1409         put_hash_bucket(bucket, &flags);
1410
1411         return mapped_ents;
1412 }
1413
1414 void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
1415                         int nelems, int dir)
1416 {
1417         struct scatterlist *s;
1418         int mapped_ents = 0, i;
1419
1420         if (unlikely(!initialized || global_disable))
1421                 return;
1422
1423         for_each_sg(sglist, s, nelems, i) {
1424
1425                 struct dma_debug_entry ref = {
1426                         .type           = dma_debug_sg,
1427                         .dev            = dev,
1428                         .paddr          = sg_phys(s),
1429                         .dev_addr       = sg_dma_address(s),
1430                         .size           = sg_dma_len(s),
1431                         .direction      = dir,
1432                         .sg_call_ents   = nelems,
1433                 };
1434
1435                 if (mapped_ents && i >= mapped_ents)
1436                         break;
1437
1438                 if (!i)
1439                         mapped_ents = get_nr_mapped_entries(dev, &ref);
1440
1441                 check_unmap(&ref);
1442         }
1443 }
1444 EXPORT_SYMBOL(debug_dma_unmap_sg);
1445
1446 void debug_dma_alloc_coherent(struct device *dev, size_t size,
1447                               dma_addr_t dma_addr, void *virt)
1448 {
1449         struct dma_debug_entry *entry;
1450
1451         if (unlikely(!initialized || global_disable))
1452                 return;
1453
1454         if (unlikely(virt == NULL))
1455                 return;
1456
1457         entry = dma_entry_alloc();
1458         if (!entry)
1459                 return;
1460
1461         entry->type      = dma_debug_coherent;
1462         entry->dev       = dev;
1463         entry->paddr     = virt_to_phys(virt);
1464         entry->size      = size;
1465         entry->dev_addr  = dma_addr;
1466         entry->direction = DMA_BIDIRECTIONAL;
1467
1468         add_dma_entry(entry);
1469 }
1470 EXPORT_SYMBOL(debug_dma_alloc_coherent);
1471
1472 void debug_dma_free_coherent(struct device *dev, size_t size,
1473                          void *virt, dma_addr_t addr)
1474 {
1475         struct dma_debug_entry ref = {
1476                 .type           = dma_debug_coherent,
1477                 .dev            = dev,
1478                 .paddr          = virt_to_phys(virt),
1479                 .dev_addr       = addr,
1480                 .size           = size,
1481                 .direction      = DMA_BIDIRECTIONAL,
1482         };
1483
1484         if (unlikely(!initialized || global_disable))
1485                 return;
1486
1487         check_unmap(&ref);
1488 }
1489 EXPORT_SYMBOL(debug_dma_free_coherent);
1490
1491 void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
1492                                    size_t size, int direction)
1493 {
1494         struct dma_debug_entry ref;
1495
1496         if (unlikely(!initialized || global_disable))
1497                 return;
1498
1499         ref.type         = dma_debug_single;
1500         ref.dev          = dev;
1501         ref.dev_addr     = dma_handle;
1502         ref.size         = size;
1503         ref.direction    = direction;
1504         ref.sg_call_ents = 0;
1505
1506         check_sync(dev, &ref, true);
1507 }
1508 EXPORT_SYMBOL(debug_dma_sync_single_for_cpu);
1509
1510 void debug_dma_sync_single_for_device(struct device *dev,
1511                                       dma_addr_t dma_handle, size_t size,
1512                                       int direction)
1513 {
1514         struct dma_debug_entry ref;
1515
1516         if (unlikely(!initialized || global_disable))
1517                 return;
1518
1519         ref.type         = dma_debug_single;
1520         ref.dev          = dev;
1521         ref.dev_addr     = dma_handle;
1522         ref.size         = size;
1523         ref.direction    = direction;
1524         ref.sg_call_ents = 0;
1525
1526         check_sync(dev, &ref, false);
1527 }
1528 EXPORT_SYMBOL(debug_dma_sync_single_for_device);
1529
1530 void debug_dma_sync_single_range_for_cpu(struct device *dev,
1531                                          dma_addr_t dma_handle,
1532                                          unsigned long offset, size_t size,
1533                                          int direction)
1534 {
1535         struct dma_debug_entry ref;
1536
1537         if (unlikely(!initialized || global_disable))
1538                 return;
1539
1540         ref.type         = dma_debug_single;
1541         ref.dev          = dev;
1542         ref.dev_addr     = dma_handle;
1543         ref.size         = offset + size;
1544         ref.direction    = direction;
1545         ref.sg_call_ents = 0;
1546
1547         check_sync(dev, &ref, true);
1548 }
1549 EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu);
1550
1551 void debug_dma_sync_single_range_for_device(struct device *dev,
1552                                             dma_addr_t dma_handle,
1553                                             unsigned long offset,
1554                                             size_t size, int direction)
1555 {
1556         struct dma_debug_entry ref;
1557
1558         if (unlikely(!initialized || global_disable))
1559                 return;
1560
1561         ref.type         = dma_debug_single;
1562         ref.dev          = dev;
1563         ref.dev_addr     = dma_handle;
1564         ref.size         = offset + size;
1565         ref.direction    = direction;
1566         ref.sg_call_ents = 0;
1567
1568         check_sync(dev, &ref, false);
1569 }
1570 EXPORT_SYMBOL(debug_dma_sync_single_range_for_device);
1571
1572 void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
1573                                int nelems, int direction)
1574 {
1575         struct scatterlist *s;
1576         int mapped_ents = 0, i;
1577
1578         if (unlikely(!initialized || global_disable))
1579                 return;
1580
1581         for_each_sg(sg, s, nelems, i) {
1582
1583                 struct dma_debug_entry ref = {
1584                         .type           = dma_debug_sg,
1585                         .dev            = dev,
1586                         .paddr          = sg_phys(s),
1587                         .dev_addr       = sg_dma_address(s),
1588                         .size           = sg_dma_len(s),
1589                         .direction      = direction,
1590                         .sg_call_ents   = nelems,
1591                 };
1592
1593                 if (!i)
1594                         mapped_ents = get_nr_mapped_entries(dev, &ref);
1595
1596                 if (i >= mapped_ents)
1597                         break;
1598
1599                 check_sync(dev, &ref, true);
1600         }
1601 }
1602 EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu);
1603
1604 void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1605                                   int nelems, int direction)
1606 {
1607         struct scatterlist *s;
1608         int mapped_ents = 0, i;
1609
1610         if (unlikely(!initialized || global_disable))
1611                 return;
1612
1613         for_each_sg(sg, s, nelems, i) {
1614
1615                 struct dma_debug_entry ref = {
1616                         .type           = dma_debug_sg,
1617                         .dev            = dev,
1618                         .paddr          = sg_phys(s),
1619                         .dev_addr       = sg_dma_address(s),
1620                         .size           = sg_dma_len(s),
1621                         .direction      = direction,
1622                         .sg_call_ents   = nelems,
1623                 };
1624                 if (!i)
1625                         mapped_ents = get_nr_mapped_entries(dev, &ref);
1626
1627                 if (i >= mapped_ents)
1628                         break;
1629
1630                 check_sync(dev, &ref, false);
1631         }
1632 }
1633 EXPORT_SYMBOL(debug_dma_sync_sg_for_device);
1634
1635 static int __init dma_debug_driver_setup(char *str)
1636 {
1637         int i;
1638
1639         for (i = 0; i < NAME_MAX_LEN - 1; ++i, ++str) {
1640                 current_driver_name[i] = *str;
1641                 if (*str == 0)
1642                         break;
1643         }
1644
1645         if (current_driver_name[0])
1646                 pr_info("DMA-API: enable driver filter for driver [%s]\n",
1647                         current_driver_name);
1648
1649
1650         return 1;
1651 }
1652 __setup("dma_debug_driver=", dma_debug_driver_setup);