video: tegra: nvmap: Stashing depends on IOMMU API
[linux-3.10.git] / drivers / video / tegra / nvmap / nvmap_dmabuf.c
1 /*
2  * dma_buf exporter for nvmap
3  *
4  * Copyright (c) 2012-2013, NVIDIA CORPORATION.  All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program; if not, write to the Free Software Foundation, Inc.,
17  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18  */
19
20 #define pr_fmt(fmt)     "nvmap: %s() " fmt, __func__
21
22 #include <linux/list.h>
23 #include <linux/slab.h>
24 #include <linux/uaccess.h>
25 #include <linux/export.h>
26 #include <linux/nvmap.h>
27 #include <linux/dma-buf.h>
28 #include <linux/spinlock.h>
29 #include <linux/mutex.h>
30 #include <linux/atomic.h>
31 #include <linux/debugfs.h>
32 #include <linux/seq_file.h>
33 #include <linux/stringify.h>
34
35 #include "nvmap_priv.h"
36 #include "nvmap_ioctl.h"
37
38 #ifdef CONFIG_IOMMU_API
39 #define nvmap_masid_mapping(attach)   to_dma_iommu_mapping((attach)->dev)
40 #else
41 #define nvmap_masid_mapping(attach)   NULL
42 #endif
43
44 struct nvmap_handle_info {
45         struct nvmap_handle *handle;
46         struct list_head maps;
47         struct mutex maps_lock;
48 };
49
50 /**
51  * List node for maps of nvmap handles via the dma_buf API. These store the
52  * necessary info for stashing mappings.
53  *
54  * @mapping Mapping for which this SGT is valid - for supporting multi-asid.
55  * @dir DMA direction.
56  * @sgt The scatter gather table to stash.
57  * @refs Reference counting.
58  * @maps_entry Entry on a given attachment's list of maps.
59  * @stash_entry Entry on the stash list.
60  * @owner The owner of this struct. There can be only one.
61  */
62 struct nvmap_handle_sgt {
63         struct dma_iommu_mapping *mapping;
64         enum dma_data_direction dir;
65         struct sg_table *sgt;
66         struct device *dev;
67
68         atomic_t refs;
69
70         struct list_head maps_entry;
71         struct list_head stash_entry; /* lock the stash before accessing. */
72
73         struct nvmap_handle_info *owner;
74 } ____cacheline_aligned_in_smp;
75
76 static DEFINE_MUTEX(nvmap_stashed_maps_lock);
77 static LIST_HEAD(nvmap_stashed_maps);
78 static struct kmem_cache *handle_sgt_cache;
79
80 /*
81  * Initialize a kmem cache for allocating nvmap_handle_sgt's.
82  */
83 int nvmap_dmabuf_stash_init(void)
84 {
85         handle_sgt_cache = KMEM_CACHE(nvmap_handle_sgt, 0);
86         if (IS_ERR_OR_NULL(handle_sgt_cache)) {
87                 pr_err("Failed to make kmem cache for nvmap_handle_sgt.\n");
88                 return -ENOMEM;
89         }
90
91         return 0;
92 }
93
94 #ifdef CONFIG_NVMAP_DMABUF_STASH_STATS
95 struct nvmap_stash_stats {
96         unsigned long long hits;
97         unsigned long long all_hits;
98         unsigned long long misses;
99         unsigned long long evictions;
100
101         unsigned long long stashed_iova;
102         unsigned long long stashed_maps;
103 };
104
105 static DEFINE_SPINLOCK(nvmap_stat_lock);
106 static struct nvmap_stash_stats nvmap_stash_stats;
107
108 #define stash_stat_inc(var)                     \
109         do {                                    \
110                 spin_lock(&nvmap_stat_lock);    \
111                 nvmap_stash_stats.var += 1;     \
112                 spin_unlock(&nvmap_stat_lock);  \
113         } while (0)
114 #define stash_stat_dec(var)                     \
115         do {                                    \
116                 spin_lock(&nvmap_stat_lock);    \
117                 nvmap_stash_stats.var -= 1;     \
118                 spin_unlock(&nvmap_stat_lock);  \
119         } while (0)
120 #define stash_stat_add_iova(handle)                                     \
121         do {                                                            \
122                 spin_lock(&nvmap_stat_lock);                            \
123                 nvmap_stash_stats.stashed_iova += (handle)->size;       \
124                 spin_unlock(&nvmap_stat_lock);                          \
125         } while (0)
126 #define stash_stat_sub_iova(handle)                                     \
127         do {                                                            \
128                 spin_lock(&nvmap_stat_lock);                            \
129                 nvmap_stash_stats.stashed_iova -= (handle)->size;       \
130                 spin_unlock(&nvmap_stat_lock);                          \
131         } while (0)
132 #else
133 #define stash_stat_inc(var)
134 #define stash_stat_dec(var)
135 #define stash_stat_add_iova(handle)
136 #define stash_stat_sub_iova(handle)
137 #endif
138
139 static int nvmap_dmabuf_attach(struct dma_buf *dmabuf, struct device *dev,
140                                struct dma_buf_attachment *attach)
141 {
142         struct nvmap_handle_info *info = dmabuf->priv;
143
144         dev_dbg(dev, "%s() 0x%p\n", __func__, info->handle);
145         return 0;
146 }
147
148 static void nvmap_dmabuf_detach(struct dma_buf *dmabuf,
149                                 struct dma_buf_attachment *attach)
150 {
151         struct nvmap_handle_info *info = dmabuf->priv;
152
153         dev_dbg(attach->dev, "%s() 0x%p\n", __func__, info->handle);
154 }
155
156 /*
157  * Add this sgt to the stash - should be called when the SGT's ref count hits
158  * 0.
159  */
160 static void __nvmap_dmabuf_add_stash(struct nvmap_handle_sgt *nvmap_sgt)
161 {
162         pr_debug("Adding mapping to stash.\n");
163         mutex_lock(&nvmap_stashed_maps_lock);
164         list_add(&nvmap_sgt->stash_entry, &nvmap_stashed_maps);
165         mutex_unlock(&nvmap_stashed_maps_lock);
166         stash_stat_inc(stashed_maps);
167         stash_stat_add_iova(nvmap_sgt->owner->handle);
168 }
169
170 /*
171  * Make sure this mapping is no longer stashed - this corresponds to a "hit". If
172  * the mapping is not stashed this is just a no-op.
173  */
174 static void __nvmap_dmabuf_del_stash(struct nvmap_handle_sgt *nvmap_sgt)
175 {
176         mutex_lock(&nvmap_stashed_maps_lock);
177         if (list_empty(&nvmap_sgt->stash_entry)) {
178                 mutex_unlock(&nvmap_stashed_maps_lock);
179                 return;
180         }
181
182         pr_debug("Removing map from stash.\n");
183         list_del_init(&nvmap_sgt->stash_entry);
184         mutex_unlock(&nvmap_stashed_maps_lock);
185         stash_stat_inc(hits);
186         stash_stat_dec(stashed_maps);
187         stash_stat_sub_iova(nvmap_sgt->owner->handle);
188 }
189
190 /*
191  * Free an sgt completely. This will bypass the ref count. This also requires
192  * the nvmap_sgt's owner's lock is already taken.
193  */
194 static void __nvmap_dmabuf_free_sgt_locked(struct nvmap_handle_sgt *nvmap_sgt)
195 {
196         struct nvmap_handle_info *info = nvmap_sgt->owner;
197         DEFINE_DMA_ATTRS(attrs);
198
199         list_del(&nvmap_sgt->maps_entry);
200
201         if (info->handle->heap_pgalloc) {
202                 dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
203                 dma_unmap_sg_attrs(nvmap_sgt->dev,
204                                    nvmap_sgt->sgt->sgl, nvmap_sgt->sgt->nents,
205                                    nvmap_sgt->dir, &attrs);
206         }
207         __nvmap_free_sg_table(NULL, info->handle, nvmap_sgt->sgt);
208
209         WARN(atomic_read(&nvmap_sgt->refs), "nvmap: Freeing reffed SGT!");
210         kmem_cache_free(handle_sgt_cache, nvmap_sgt);
211 }
212
213 /*
214  * Evict an entry from the IOVA stash. This does not do anything to the actual
215  * mapping itself - this merely takes the passed nvmap_sgt out of the stash
216  * and decrements the necessary cache stats.
217  */
218 void __nvmap_dmabuf_evict_stash_locked(struct nvmap_handle_sgt *nvmap_sgt)
219 {
220         if (!list_empty(&nvmap_sgt->stash_entry))
221                 list_del_init(&nvmap_sgt->stash_entry);
222
223         stash_stat_dec(stashed_maps);
224         stash_stat_sub_iova(nvmap_sgt->owner->handle);
225 }
226
227 /*
228  * Locks the stash before doing the eviction.
229  */
230 void __nvmap_dmabuf_evict_stash(struct nvmap_handle_sgt *nvmap_sgt)
231 {
232         mutex_lock(&nvmap_stashed_maps_lock);
233         __nvmap_dmabuf_evict_stash_locked(nvmap_sgt);
234         mutex_unlock(&nvmap_stashed_maps_lock);
235 }
236
237 /*
238  * Prepare an SGT for potential stashing later on.
239  */
240 static int __nvmap_dmabuf_prep_sgt_locked(struct dma_buf_attachment *attach,
241                                    enum dma_data_direction dir,
242                                    struct sg_table *sgt)
243 {
244         struct nvmap_handle_sgt *nvmap_sgt;
245         struct nvmap_handle_info *info = attach->dmabuf->priv;
246
247         pr_debug("Prepping SGT.\n");
248         nvmap_sgt = kmem_cache_alloc(handle_sgt_cache, GFP_KERNEL);
249         if (IS_ERR_OR_NULL(nvmap_sgt)) {
250                 pr_err("Prepping SGT failed.\n");
251                 return -ENOMEM;
252         }
253
254         nvmap_sgt->mapping = nvmap_masid_mapping(attach);
255         nvmap_sgt->dir = dir;
256         nvmap_sgt->sgt = sgt;
257         nvmap_sgt->dev = attach->dev;
258         nvmap_sgt->owner = info;
259         INIT_LIST_HEAD(&nvmap_sgt->stash_entry);
260         atomic_set(&nvmap_sgt->refs, 1);
261         list_add(&nvmap_sgt->maps_entry, &info->maps);
262         return 0;
263 }
264
265 /*
266  * Called when an SGT is no longer being used by a device. This will not
267  * necessarily free the SGT - instead it may stash the SGT.
268  */
269 static void __nvmap_dmabuf_stash_sgt_locked(struct dma_buf_attachment *attach,
270                                     enum dma_data_direction dir,
271                                     struct sg_table *sgt)
272 {
273         struct nvmap_handle_sgt *nvmap_sgt;
274         struct nvmap_handle_info *info = attach->dmabuf->priv;
275
276         pr_debug("Stashing SGT - if necessary.\n");
277         list_for_each_entry(nvmap_sgt, &info->maps, maps_entry) {
278                 if (nvmap_sgt->sgt == sgt) {
279                         if (!atomic_sub_and_test(1, &nvmap_sgt->refs))
280                                 goto done;
281
282                         /*
283                          * If we get here, the ref count is zero. Stash the
284                          * mapping.
285                          */
286 #ifdef CONFIG_NVMAP_DMABUF_STASH
287                         __nvmap_dmabuf_add_stash(nvmap_sgt);
288 #else
289                         __nvmap_dmabuf_free_sgt_locked(nvmap_sgt);
290 #endif
291                         goto done;
292                 }
293         }
294
295 done:
296         return;
297 }
298
299 /*
300  * Checks if there is already a map for this attachment. If so increment the
301  * ref count on said map and return the associated sg_table. Otherwise return
302  * NULL.
303  *
304  * If it turns out there is a map, this also checks to see if the map needs to
305  * be removed from the stash - if so, the map is removed.
306  */
307 static struct sg_table *__nvmap_dmabuf_get_sgt_locked(
308         struct dma_buf_attachment *attach, enum dma_data_direction dir)
309 {
310         struct nvmap_handle_sgt *nvmap_sgt;
311         struct sg_table *sgt = NULL;
312         struct nvmap_handle_info *info = attach->dmabuf->priv;
313
314         pr_debug("Getting SGT from stash.\n");
315         list_for_each_entry(nvmap_sgt, &info->maps, maps_entry) {
316                 if (nvmap_masid_mapping(attach) != nvmap_sgt->mapping)
317                         continue;
318
319                 /* We have a hit. */
320                 pr_debug("Stash hit (%s)!\n", dev_name(attach->dev));
321                 sgt = nvmap_sgt->sgt;
322                 atomic_inc(&nvmap_sgt->refs);
323                 __nvmap_dmabuf_del_stash(nvmap_sgt);
324                 stash_stat_inc(all_hits);
325                 break;
326         }
327
328         if (!sgt)
329                 stash_stat_inc(misses);
330         return sgt;
331 }
332
333 /*
334  * If stashing is disabled then the stash related ops become no-ops.
335  */
336 static struct sg_table *nvmap_dmabuf_map_dma_buf(
337         struct dma_buf_attachment *attach, enum dma_data_direction dir)
338 {
339         struct nvmap_handle_info *info = attach->dmabuf->priv;
340         int err, ents;
341         struct sg_table *sgt;
342         DEFINE_DMA_ATTRS(attrs);
343
344         mutex_lock(&info->maps_lock);
345         atomic_inc(&info->handle->pin);
346         sgt = __nvmap_dmabuf_get_sgt_locked(attach, dir);
347         if (sgt)
348                 goto cache_hit;
349
350         sgt = __nvmap_sg_table(NULL, info->handle);
351         if (IS_ERR(sgt))
352                 return sgt;
353
354         if (info->handle->heap_pgalloc && info->handle->alloc) {
355                 dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
356                 ents = dma_map_sg_attrs(attach->dev, sgt->sgl,
357                                         sgt->nents, dir, &attrs);
358                 if (ents <= 0) {
359                         err = -ENOMEM;
360                         goto err_map;
361                 }
362                 BUG_ON(ents != 1);
363         } else if (info->handle->alloc) {
364                 /* carveout has linear map setup. */
365                 mutex_lock(&info->handle->lock);
366                 sg_dma_address(sgt->sgl) = info->handle->carveout->base;
367                 mutex_unlock(&info->handle->lock);
368         } else {
369                 goto err_map;
370         }
371
372         if (__nvmap_dmabuf_prep_sgt_locked(attach, dir, sgt)) {
373                 WARN(1, "No mem to prep sgt.\n");
374                 err = -ENOMEM;
375                 goto err_prep;
376         }
377
378 cache_hit:
379 #ifdef CONFIG_NVMAP_DMABUF_STASH
380         BUG_ON(attach->priv && attach->priv != sgt);
381 #endif
382         attach->priv = sgt;
383         mutex_unlock(&info->maps_lock);
384         return sgt;
385
386 err_prep:
387         dma_unmap_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir, &attrs);
388 err_map:
389         __nvmap_free_sg_table(NULL, info->handle, sgt);
390         atomic_dec(&info->handle->pin);
391         mutex_unlock(&info->maps_lock);
392         return ERR_PTR(err);
393 }
394
395 static void nvmap_dmabuf_unmap_dma_buf(struct dma_buf_attachment *attach,
396                                        struct sg_table *sgt,
397                                        enum dma_data_direction dir)
398 {
399         struct nvmap_handle_info *info = attach->dmabuf->priv;
400
401         mutex_lock(&info->maps_lock);
402         if (!atomic_add_unless(&info->handle->pin, -1, 0)) {
403                 mutex_unlock(&info->maps_lock);
404                 WARN(1, "Unpinning handle that has yet to be pinned!\n");
405                 return;
406         }
407         __nvmap_dmabuf_stash_sgt_locked(attach, dir, sgt);
408         mutex_unlock(&info->maps_lock);
409 }
410
411 static void nvmap_dmabuf_release(struct dma_buf *dmabuf)
412 {
413         struct nvmap_handle_info *info = dmabuf->priv;
414         struct nvmap_handle_sgt *nvmap_sgt;
415
416         mutex_lock(&info->maps_lock);
417         while (!list_empty(&info->maps)) {
418                 nvmap_sgt = list_first_entry(&info->maps,
419                                              struct nvmap_handle_sgt,
420                                              maps_entry);
421                 __nvmap_dmabuf_evict_stash(nvmap_sgt);
422                 __nvmap_dmabuf_free_sgt_locked(nvmap_sgt);
423         }
424         mutex_unlock(&info->maps_lock);
425
426         pr_debug("%s() 0x%p\n", __func__, info->handle);
427
428         dma_buf_detach(info->handle->dmabuf, info->handle->attachment);
429         info->handle->dmabuf = NULL;
430         nvmap_handle_put(info->handle);
431         kfree(info);
432 }
433
434 static int nvmap_dmabuf_begin_cpu_access(struct dma_buf *dmabuf,
435                                           size_t start, size_t len,
436                                           enum dma_data_direction dir)
437 {
438         struct nvmap_handle_info *info = dmabuf->priv;
439
440         return __nvmap_cache_maint(NULL, info->handle, start, start + len,
441                                    NVMAP_CACHE_OP_INV, 1);
442 }
443
444 static void nvmap_dmabuf_end_cpu_access(struct dma_buf *dmabuf,
445                                         size_t start, size_t len,
446                                         enum dma_data_direction dir)
447 {
448         struct nvmap_handle_info *info = dmabuf->priv;
449
450         __nvmap_cache_maint(NULL, info->handle, start, start + len,
451                                    NVMAP_CACHE_OP_WB_INV, 1);
452
453 }
454
455 static void *nvmap_dmabuf_kmap(struct dma_buf *dmabuf, unsigned long page_num)
456 {
457         struct nvmap_handle_info *info = dmabuf->priv;
458
459         pr_debug("%s() 0x%p\n", __func__, info->handle);
460         return __nvmap_kmap(info->handle, page_num);
461 }
462
463 static void nvmap_dmabuf_kunmap(struct dma_buf *dmabuf,
464                 unsigned long page_num, void *addr)
465 {
466         struct nvmap_handle_info *info = dmabuf->priv;
467
468         pr_debug("%s() 0x%p\n", __func__, info->handle);
469         __nvmap_kunmap(info->handle, page_num, addr);
470 }
471
472 static void *nvmap_dmabuf_kmap_atomic(struct dma_buf *dmabuf,
473                                       unsigned long page_num)
474 {
475         WARN(1, "%s() can't be called from atomic\n", __func__);
476         return NULL;
477 }
478
479 static int nvmap_dmabuf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
480 {
481         struct nvmap_handle_info *info = dmabuf->priv;
482
483         return __nvmap_map(info->handle, vma);
484 }
485
486 static void *nvmap_dmabuf_vmap(struct dma_buf *dmabuf)
487 {
488         struct nvmap_handle_info *info = dmabuf->priv;
489
490         pr_debug("%s() 0x%p\n", __func__, info->handle);
491         return __nvmap_mmap(info->handle);
492 }
493
494 static void nvmap_dmabuf_vunmap(struct dma_buf *dmabuf, void *vaddr)
495 {
496         struct nvmap_handle_info *info = dmabuf->priv;
497
498         pr_debug("%s() 0x%p\n", __func__, info->handle);
499         __nvmap_munmap(info->handle, vaddr);
500 }
501
502 static struct dma_buf_ops nvmap_dma_buf_ops = {
503         .attach         = nvmap_dmabuf_attach,
504         .detach         = nvmap_dmabuf_detach,
505         .map_dma_buf    = nvmap_dmabuf_map_dma_buf,
506         .unmap_dma_buf  = nvmap_dmabuf_unmap_dma_buf,
507         .release        = nvmap_dmabuf_release,
508         .begin_cpu_access = nvmap_dmabuf_begin_cpu_access,
509         .end_cpu_access = nvmap_dmabuf_end_cpu_access,
510         .kmap_atomic    = nvmap_dmabuf_kmap_atomic,
511         .kmap           = nvmap_dmabuf_kmap,
512         .kunmap         = nvmap_dmabuf_kunmap,
513         .mmap           = nvmap_dmabuf_mmap,
514         .vmap           = nvmap_dmabuf_vmap,
515         .vunmap         = nvmap_dmabuf_vunmap,
516 };
517
518 /*
519  * Make a dmabuf object for an nvmap handle.
520  */
521 struct dma_buf *__nvmap_make_dmabuf(struct nvmap_client *client,
522                                     struct nvmap_handle *handle)
523 {
524         int err;
525         struct dma_buf *dmabuf;
526         struct nvmap_handle_info *info;
527
528         info = kzalloc(sizeof(*info), GFP_KERNEL);
529         if (!info) {
530                 err = -ENOMEM;
531                 goto err_nomem;
532         }
533         info->handle = handle;
534         INIT_LIST_HEAD(&info->maps);
535         mutex_init(&info->maps_lock);
536
537         dmabuf = dma_buf_export(info, &nvmap_dma_buf_ops, handle->size,
538                                 O_RDWR);
539         if (IS_ERR(dmabuf)) {
540                 err = PTR_ERR(dmabuf);
541                 goto err_export;
542         }
543         nvmap_handle_get(handle);
544         pr_debug("%s() 0x%p => 0x%p\n", __func__, info->handle, dmabuf);
545         return dmabuf;
546
547 err_export:
548         kfree(info);
549 err_nomem:
550         return ERR_PTR(err);
551 }
552
553 int nvmap_get_dmabuf_fd(struct nvmap_client *client, ulong id)
554 {
555         int fd;
556         struct dma_buf *dmabuf;
557
558         dmabuf = __nvmap_dmabuf_export(client, id);
559         if (IS_ERR(dmabuf))
560                 return PTR_ERR(dmabuf);
561         fd = dma_buf_fd(dmabuf, O_CLOEXEC);
562         if (fd < 0)
563                 goto err_out;
564         return fd;
565
566 err_out:
567         dma_buf_put(dmabuf);
568         return fd;
569 }
570
571 struct dma_buf *__nvmap_dmabuf_export(struct nvmap_client *client,
572                                  unsigned long id)
573 {
574         struct nvmap_handle *handle;
575         struct dma_buf *buf;
576
577         handle = nvmap_get_handle_id(client, id);
578         if (!handle)
579                 return ERR_PTR(-EINVAL);
580         buf = handle->dmabuf;
581         if (WARN(!buf, "Attempting to get a freed dma_buf!\n")) {
582                 nvmap_handle_put(handle);
583                 return NULL;
584         }
585
586         get_dma_buf(buf);
587
588         /*
589          * Don't want to take out refs on the handle here.
590          */
591         nvmap_handle_put(handle);
592
593         return handle->dmabuf;
594 }
595 EXPORT_SYMBOL(__nvmap_dmabuf_export);
596
597 /*
598  * Increments ref count on the dma_buf. You are reponsbile for calling
599  * dma_buf_put() on the returned dma_buf object.
600  */
601 struct dma_buf *nvmap_dmabuf_export(struct nvmap_client *client,
602                                  unsigned long user_id)
603 {
604         return __nvmap_dmabuf_export(client, unmarshal_user_id(user_id));
605 }
606
607 /*
608  * Similar to nvmap_dmabuf_export() only use a ref to get the buf instead of a
609  * user_id. You must dma_buf_put() the dma_buf object when you are done with
610  * it.
611  */
612 struct dma_buf *nvmap_dmabuf_export_from_ref(struct nvmap_handle_ref *ref)
613 {
614         if (!virt_addr_valid(ref))
615                 return ERR_PTR(-EINVAL);
616
617         get_dma_buf(ref->handle->dmabuf);
618         return ref->handle->dmabuf;
619 }
620 EXPORT_SYMBOL(nvmap_dmabuf_export_from_ref);
621
622 /*
623  * Returns the nvmap handle ID associated with the passed dma_buf's fd. This
624  * does not affect the ref count of the dma_buf.
625  */
626 ulong nvmap_get_id_from_dmabuf_fd(struct nvmap_client *client, int fd)
627 {
628         ulong id = -EINVAL;
629         struct dma_buf *dmabuf;
630         struct nvmap_handle_info *info;
631
632         dmabuf = dma_buf_get(fd);
633         if (IS_ERR(dmabuf))
634                 return PTR_ERR(dmabuf);
635         if (dmabuf->ops == &nvmap_dma_buf_ops) {
636                 info = dmabuf->priv;
637                 id = (ulong) info->handle;
638         }
639         dma_buf_put(dmabuf);
640         return id;
641 }
642
643 int nvmap_ioctl_share_dmabuf(struct file *filp, void __user *arg)
644 {
645         struct nvmap_create_handle op;
646         struct nvmap_client *client = filp->private_data;
647         ulong handle;
648
649         BUG_ON(!client);
650
651         if (copy_from_user(&op, (void __user *)arg, sizeof(op)))
652                 return -EFAULT;
653
654         handle = unmarshal_user_id(op.id);
655         if (!handle)
656                 return -EINVAL;
657
658         op.fd = nvmap_get_dmabuf_fd(client, handle);
659         if (op.fd < 0)
660                 return op.fd;
661
662         if (copy_to_user((void __user *)arg, &op, sizeof(op))) {
663                 sys_close(op.fd);
664                 return -EFAULT;
665         }
666         return 0;
667 }
668
669 int nvmap_get_dmabuf_param(struct dma_buf *dmabuf, u32 param, u64 *result)
670 {
671         struct nvmap_handle_info *info;
672
673         if (WARN_ON(!virt_addr_valid(dmabuf)))
674                 return -EINVAL;
675
676         info = dmabuf->priv;
677         return __nvmap_get_handle_param(NULL, info->handle, param, result);
678 }
679
680 struct sg_table *nvmap_dmabuf_sg_table(struct dma_buf *dmabuf)
681 {
682         struct nvmap_handle_info *info;
683
684         if (WARN_ON(!virt_addr_valid(dmabuf)))
685                 return ERR_PTR(-EINVAL);
686
687         info = dmabuf->priv;
688         return __nvmap_sg_table(NULL, info->handle);
689 }
690
691 void nvmap_dmabuf_free_sg_table(struct dma_buf *dmabuf, struct sg_table *sgt)
692 {
693         if (WARN_ON(!virt_addr_valid(sgt)))
694                 return;
695
696         __nvmap_free_sg_table(NULL, NULL, sgt);
697 }
698
699 void nvmap_set_dmabuf_private(struct dma_buf *dmabuf, void *priv,
700                 void (*delete)(void *priv))
701 {
702         struct nvmap_handle_info *info;
703
704         if (WARN_ON(!virt_addr_valid(dmabuf)))
705                 return;
706
707         info = dmabuf->priv;
708         info->handle->nvhost_priv = priv;
709         info->handle->nvhost_priv_delete = delete;
710 }
711
712 void *nvmap_get_dmabuf_private(struct dma_buf *dmabuf)
713 {
714         void *priv;
715         struct nvmap_handle_info *info;
716
717         if (WARN_ON(!virt_addr_valid(dmabuf)))
718                 return ERR_PTR(-EINVAL);
719
720         info = dmabuf->priv;
721         priv = info->handle->nvhost_priv;
722         return priv;
723 }
724
725 ulong nvmap_dmabuf_to_user_id(struct dma_buf *dmabuf)
726 {
727         struct nvmap_handle_info *info;
728
729         if (!virt_addr_valid(dmabuf))
730                 return 0;
731
732         info = dmabuf->priv;
733         return (ulong)marshal_kernel_handle((ulong)info->handle);
734 }
735
736 #define NVMAP_DMABUF_WO_TRIGGER_NODE(trigger, name)                     \
737         DEFINE_SIMPLE_ATTRIBUTE(__nvmap_dmabuf_##name##_fops, NULL,     \
738                                 trigger, "%llu");
739 #define NVMAP_DMABUF_WO_DEBUGFS(name, root)                             \
740         do {                                                            \
741                 if (!debugfs_create_file(__stringify(name), S_IWUSR, root, \
742                                          NULL, &__nvmap_dmabuf_##name##_fops))\
743                         return;                                         \
744         } while (0)
745
746 #ifdef CONFIG_NVMAP_DMABUF_STASH_STATS
747
748 /*
749  * Clear the stash stats counting.
750  */
751 static int __nvmap_dmabuf_clear_stash_stats(void *data, u64 val)
752 {
753         spin_lock(&nvmap_stat_lock);
754         nvmap_stash_stats.hits = 0;
755         nvmap_stash_stats.all_hits = 0;
756         nvmap_stash_stats.misses = 0;
757         nvmap_stash_stats.evictions = 0;
758         spin_unlock(&nvmap_stat_lock);
759         return 0;
760 }
761 NVMAP_DMABUF_WO_TRIGGER_NODE(__nvmap_dmabuf_clear_stash_stats, clear_stats);
762 #endif
763
764 void nvmap_dmabuf_debugfs_init(struct dentry *nvmap_root)
765 {
766         struct dentry *dmabuf_root;
767
768         if (!nvmap_root)
769                 return;
770
771         dmabuf_root = debugfs_create_dir("dmabuf", nvmap_root);
772         if (!dmabuf_root)
773                 return;
774
775 #if defined(CONFIG_NVMAP_DMABUF_STASH_STATS)
776 #define CACHE_STAT(root, stat)                                          \
777         do {                                                            \
778                 if (!debugfs_create_u64(__stringify(stat), S_IRUGO,     \
779                                         root, &nvmap_stash_stats.stat)) \
780                         return;                                         \
781         } while (0)
782
783         CACHE_STAT(dmabuf_root, hits);
784         CACHE_STAT(dmabuf_root, all_hits);
785         CACHE_STAT(dmabuf_root, misses);
786         CACHE_STAT(dmabuf_root, evictions);
787         CACHE_STAT(dmabuf_root, stashed_iova);
788         CACHE_STAT(dmabuf_root, stashed_maps);
789         NVMAP_DMABUF_WO_DEBUGFS(clear_stats, dmabuf_root);
790 #endif
791 }