video: tegra: nvmap: don't count shared memory in full
[linux-3.10.git] / drivers / video / tegra / nvmap / nvmap_dev.c
1 /*
2  * drivers/video/tegra/nvmap/nvmap_dev.c
3  *
4  * User-space interface to nvmap
5  *
6  * Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #include <linux/backing-dev.h>
24 #include <linux/bitmap.h>
25 #include <linux/debugfs.h>
26 #include <linux/delay.h>
27 #include <linux/io.h>
28 #include <linux/kernel.h>
29 #include <linux/device.h>
30 #include <linux/oom.h>
31 #include <linux/platform_device.h>
32 #include <linux/seq_file.h>
33 #include <linux/slab.h>
34 #include <linux/spinlock.h>
35 #include <linux/uaccess.h>
36 #include <linux/vmalloc.h>
37 #include <linux/nvmap.h>
38 #include <linux/module.h>
39 #include <linux/resource.h>
40 #include <linux/security.h>
41 #include <linux/stat.h>
42 #include <linux/kthread.h>
43
44 #include <asm/cputype.h>
45
46 #define CREATE_TRACE_POINTS
47 #include <trace/events/nvmap.h>
48
49 #include "nvmap_priv.h"
50 #include "nvmap_ioctl.h"
51
52 #define NVMAP_CARVEOUT_KILLER_RETRY_TIME 100 /* msecs */
53
54 /* this is basically the L2 cache size */
55 #ifdef CONFIG_DENVER_CPU
56 size_t cache_maint_inner_threshold = SZ_2M * 8;
57 #else
58 size_t cache_maint_inner_threshold = SZ_2M;
59 #endif
60
61 #ifdef CONFIG_NVMAP_OUTER_CACHE_MAINT_BY_SET_WAYS
62 size_t cache_maint_outer_threshold = SZ_1M;
63 #endif
64
65 struct nvmap_carveout_node {
66         unsigned int            heap_bit;
67         struct nvmap_heap       *carveout;
68         int                     index;
69         struct list_head        clients;
70         spinlock_t              clients_lock;
71         phys_addr_t                     base;
72         size_t                  size;
73 };
74
75 struct nvmap_device *nvmap_dev;
76 struct nvmap_stats nvmap_stats;
77
78 static struct backing_dev_info nvmap_bdi = {
79         .ra_pages       = 0,
80         .capabilities   = (BDI_CAP_NO_ACCT_AND_WRITEBACK |
81                            BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP),
82 };
83
84 static struct device_dma_parameters nvmap_dma_parameters = {
85         .max_segment_size = UINT_MAX,
86 };
87
88 static int nvmap_open(struct inode *inode, struct file *filp);
89 static int nvmap_release(struct inode *inode, struct file *filp);
90 static long nvmap_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
91 static int nvmap_map(struct file *filp, struct vm_area_struct *vma);
92 static void nvmap_vma_close(struct vm_area_struct *vma);
93 static int nvmap_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
94
95 static const struct file_operations nvmap_user_fops = {
96         .owner          = THIS_MODULE,
97         .open           = nvmap_open,
98         .release        = nvmap_release,
99         .unlocked_ioctl = nvmap_ioctl,
100 #ifdef CONFIG_COMPAT
101         .compat_ioctl = nvmap_ioctl,
102 #endif
103         .mmap           = nvmap_map,
104 };
105
106 static struct vm_operations_struct nvmap_vma_ops = {
107         .open           = nvmap_vma_open,
108         .close          = nvmap_vma_close,
109         .fault          = nvmap_vma_fault,
110 };
111
112 int is_nvmap_vma(struct vm_area_struct *vma)
113 {
114         return vma->vm_ops == &nvmap_vma_ops;
115 }
116
117 /*
118  * Verifies that the passed ID is a valid handle ID. Then the passed client's
119  * reference to the handle is returned.
120  *
121  * Note: to call this function make sure you own the client ref lock.
122  */
123 struct nvmap_handle_ref *__nvmap_validate_locked(struct nvmap_client *c,
124                                                  struct nvmap_handle *h)
125 {
126         struct rb_node *n = c->handle_refs.rb_node;
127
128         while (n) {
129                 struct nvmap_handle_ref *ref;
130                 ref = rb_entry(n, struct nvmap_handle_ref, node);
131                 if (ref->handle == h)
132                         return ref;
133                 else if ((uintptr_t)h > (uintptr_t)ref->handle)
134                         n = n->rb_right;
135                 else
136                         n = n->rb_left;
137         }
138
139         return NULL;
140 }
141
142 unsigned long nvmap_carveout_usage(struct nvmap_client *c,
143                                    struct nvmap_heap_block *b)
144 {
145         struct nvmap_heap *h = nvmap_block_to_heap(b);
146         struct nvmap_carveout_node *n;
147         int i;
148
149         for (i = 0; i < nvmap_dev->nr_carveouts; i++) {
150                 n = &nvmap_dev->heaps[i];
151                 if (n->carveout == h)
152                         return n->heap_bit;
153         }
154         return 0;
155 }
156
157 /*
158  * This routine is used to flush the carveout memory from cache.
159  * Why cache flush is needed for carveout? Consider the case, where a piece of
160  * carveout is allocated as cached and released. After this, if the same memory is
161  * allocated for uncached request and the memory is not flushed out from cache.
162  * In this case, the client might pass this to H/W engine and it could start modify
163  * the memory. As this was cached earlier, it might have some portion of it in cache.
164  * During cpu request to read/write other memory, the cached portion of this memory
165  * might get flushed back to main memory and would cause corruptions, if it happens
166  * after H/W writes data to memory.
167  *
168  * But flushing out the memory blindly on each carveout allocation is redundant.
169  *
170  * In order to optimize the carveout buffer cache flushes, the following
171  * strategy is used.
172  *
173  * The whole Carveout is flushed out from cache during its initialization.
174  * During allocation, carveout buffers are not flused from cache.
175  * During deallocation, carveout buffers are flushed, if they were allocated as cached.
176  * if they were allocated as uncached/writecombined, no cache flush is needed.
177  * Just draining store buffers is enough.
178  */
179 int nvmap_flush_heap_block(struct nvmap_client *client,
180         struct nvmap_heap_block *block, size_t len, unsigned int prot)
181 {
182         ulong kaddr;
183         phys_addr_t phys = block->base;
184         phys_addr_t end = block->base + len;
185         struct vm_struct *area = NULL;
186
187         if (prot == NVMAP_HANDLE_UNCACHEABLE || prot == NVMAP_HANDLE_WRITE_COMBINE)
188                 goto out;
189
190 #ifdef CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS
191         if (len >= cache_maint_inner_threshold) {
192                 inner_flush_cache_all();
193                 if (prot != NVMAP_HANDLE_INNER_CACHEABLE)
194                         outer_flush_range(block->base, block->base + len);
195                 goto out;
196         }
197 #endif
198
199         area = alloc_vm_area(PAGE_SIZE, NULL);
200         if (!area)
201                 return -ENOMEM;
202
203         kaddr = (ulong)area->addr;
204
205         while (phys < end) {
206                 phys_addr_t next = (phys + PAGE_SIZE) & PAGE_MASK;
207                 void *base = (void *)kaddr + (phys & ~PAGE_MASK);
208
209                 next = min(next, end);
210                 ioremap_page_range(kaddr, kaddr + PAGE_SIZE,
211                         phys, PG_PROT_KERNEL);
212                 FLUSH_DCACHE_AREA(base, next - phys);
213                 phys = next;
214                 unmap_kernel_range(kaddr, PAGE_SIZE);
215         }
216
217         if (prot != NVMAP_HANDLE_INNER_CACHEABLE)
218                 outer_flush_range(block->base, block->base + len);
219
220         free_vm_area(area);
221 out:
222         wmb();
223         return 0;
224 }
225
226 void nvmap_carveout_commit_add(struct nvmap_client *client,
227                                struct nvmap_carveout_node *node,
228                                size_t len)
229 {
230         spin_lock(&node->clients_lock);
231         BUG_ON(list_empty(&client->carveout_commit[node->index].list) &&
232                client->carveout_commit[node->index].commit != 0);
233
234         client->carveout_commit[node->index].commit += len;
235         /* if this client isn't already on the list of nodes for this heap,
236            add it */
237         if (list_empty(&client->carveout_commit[node->index].list)) {
238                 list_add(&client->carveout_commit[node->index].list,
239                          &node->clients);
240         }
241         spin_unlock(&node->clients_lock);
242 }
243
244 void nvmap_carveout_commit_subtract(struct nvmap_client *client,
245                                     struct nvmap_carveout_node *node,
246                                     size_t len)
247 {
248         if (!client)
249                 return;
250
251         spin_lock(&node->clients_lock);
252         BUG_ON(client->carveout_commit[node->index].commit < len);
253         client->carveout_commit[node->index].commit -= len;
254         /* if no more allocation in this carveout for this node, delete it */
255         if (!client->carveout_commit[node->index].commit)
256                 list_del_init(&client->carveout_commit[node->index].list);
257         spin_unlock(&node->clients_lock);
258 }
259
260 static struct nvmap_client *get_client_from_carveout_commit(
261         struct nvmap_carveout_node *node, struct nvmap_carveout_commit *commit)
262 {
263         struct nvmap_carveout_commit *first_commit = commit - node->index;
264         return (void *)first_commit - offsetof(struct nvmap_client,
265                                                carveout_commit);
266 }
267
268 static
269 struct nvmap_heap_block *do_nvmap_carveout_alloc(struct nvmap_client *client,
270                                               struct nvmap_handle *handle,
271                                               unsigned long type)
272 {
273         struct nvmap_carveout_node *co_heap;
274         struct nvmap_device *dev = nvmap_dev;
275         int i;
276
277         for (i = 0; i < dev->nr_carveouts; i++) {
278                 struct nvmap_heap_block *block;
279                 co_heap = &dev->heaps[i];
280
281                 if (!(co_heap->heap_bit & type))
282                         continue;
283
284                 block = nvmap_heap_alloc(co_heap->carveout, handle);
285                 if (block)
286                         return block;
287         }
288         return NULL;
289 }
290
291 struct nvmap_heap_block *nvmap_carveout_alloc(struct nvmap_client *client,
292                                               struct nvmap_handle *handle,
293                                               unsigned long type)
294 {
295         return do_nvmap_carveout_alloc(client, handle, type);
296 }
297
298 /* remove a handle from the device's tree of all handles; called
299  * when freeing handles. */
300 int nvmap_handle_remove(struct nvmap_device *dev, struct nvmap_handle *h)
301 {
302         spin_lock(&dev->handle_lock);
303
304         /* re-test inside the spinlock if the handle really has no clients;
305          * only remove the handle if it is unreferenced */
306         if (atomic_add_return(0, &h->ref) > 0) {
307                 spin_unlock(&dev->handle_lock);
308                 return -EBUSY;
309         }
310         smp_rmb();
311         BUG_ON(atomic_read(&h->ref) < 0);
312         BUG_ON(atomic_read(&h->pin) != 0);
313
314         rb_erase(&h->node, &dev->handles);
315
316         spin_unlock(&dev->handle_lock);
317         return 0;
318 }
319
320 /* adds a newly-created handle to the device master tree */
321 void nvmap_handle_add(struct nvmap_device *dev, struct nvmap_handle *h)
322 {
323         struct rb_node **p;
324         struct rb_node *parent = NULL;
325
326         spin_lock(&dev->handle_lock);
327         p = &dev->handles.rb_node;
328         while (*p) {
329                 struct nvmap_handle *b;
330
331                 parent = *p;
332                 b = rb_entry(parent, struct nvmap_handle, node);
333                 if (h > b)
334                         p = &parent->rb_right;
335                 else
336                         p = &parent->rb_left;
337         }
338         rb_link_node(&h->node, parent, p);
339         rb_insert_color(&h->node, &dev->handles);
340         spin_unlock(&dev->handle_lock);
341 }
342
343 /* Validates that a handle is in the device master tree and that the
344  * client has permission to access it. */
345 struct nvmap_handle *nvmap_validate_get(struct nvmap_handle *id)
346 {
347         struct nvmap_handle *h = NULL;
348         struct rb_node *n;
349
350         spin_lock(&nvmap_dev->handle_lock);
351
352         n = nvmap_dev->handles.rb_node;
353
354         while (n) {
355                 h = rb_entry(n, struct nvmap_handle, node);
356                 if (h == id) {
357                         h = nvmap_handle_get(h);
358                         spin_unlock(&nvmap_dev->handle_lock);
359                         return h;
360                 }
361                 if (id > h)
362                         n = n->rb_right;
363                 else
364                         n = n->rb_left;
365         }
366         spin_unlock(&nvmap_dev->handle_lock);
367         return NULL;
368 }
369
370 struct nvmap_client *__nvmap_create_client(struct nvmap_device *dev,
371                                            const char *name)
372 {
373         struct nvmap_client *client;
374         struct task_struct *task;
375         int i;
376
377         if (WARN_ON(!dev))
378                 return NULL;
379
380         client = kzalloc(sizeof(*client) + (sizeof(struct nvmap_carveout_commit)
381                          * dev->nr_carveouts), GFP_KERNEL);
382         if (!client)
383                 return NULL;
384
385         client->name = name;
386         client->kernel_client = true;
387         client->handle_refs = RB_ROOT;
388
389         for (i = 0; i < dev->nr_carveouts; i++) {
390                 INIT_LIST_HEAD(&client->carveout_commit[i].list);
391                 client->carveout_commit[i].commit = 0;
392         }
393
394         get_task_struct(current->group_leader);
395         task_lock(current->group_leader);
396         /* don't bother to store task struct for kernel threads,
397            they can't be killed anyway */
398         if (current->flags & PF_KTHREAD) {
399                 put_task_struct(current->group_leader);
400                 task = NULL;
401         } else {
402                 task = current->group_leader;
403         }
404         task_unlock(current->group_leader);
405         client->task = task;
406
407         mutex_init(&client->ref_lock);
408         atomic_set(&client->count, 1);
409
410         spin_lock(&dev->clients_lock);
411         list_add(&client->list, &dev->clients);
412         spin_unlock(&dev->clients_lock);
413         return client;
414 }
415
416 static void destroy_client(struct nvmap_client *client)
417 {
418         struct rb_node *n;
419         int i;
420
421         if (!client)
422                 return;
423
424         spin_lock(&nvmap_dev->clients_lock);
425         list_del(&client->list);
426         spin_unlock(&nvmap_dev->clients_lock);
427
428         while ((n = rb_first(&client->handle_refs))) {
429                 struct nvmap_handle_ref *ref;
430                 int pins, dupes;
431
432                 ref = rb_entry(n, struct nvmap_handle_ref, node);
433
434                 smp_rmb();
435                 pins = atomic_read(&ref->pin);
436
437                 while (pins--)
438                         __nvmap_unpin(ref);
439
440                 if (ref->handle->owner == client)
441                         ref->handle->owner = NULL;
442
443                 dma_buf_put(ref->handle->dmabuf);
444                 rb_erase(&ref->node, &client->handle_refs);
445                 atomic_dec(&ref->handle->share_count);
446
447                 dupes = atomic_read(&ref->dupes);
448                 while (dupes--)
449                         nvmap_handle_put(ref->handle);
450
451                 kfree(ref);
452         }
453
454         for (i = 0; i < nvmap_dev->nr_carveouts; i++)
455                 list_del(&client->carveout_commit[i].list);
456
457         if (client->task)
458                 put_task_struct(client->task);
459
460         kfree(client);
461 }
462
463 struct nvmap_client *nvmap_client_get(struct nvmap_client *client)
464 {
465         if (!virt_addr_valid(client))
466                 return NULL;
467
468         if (!atomic_add_unless(&client->count, 1, 0))
469                 return NULL;
470
471         return client;
472 }
473
474 void nvmap_client_put(struct nvmap_client *client)
475 {
476         if (!client)
477                 return;
478
479         if (!atomic_dec_return(&client->count))
480                 destroy_client(client);
481 }
482
483 static int nvmap_open(struct inode *inode, struct file *filp)
484 {
485         struct miscdevice *miscdev = filp->private_data;
486         struct nvmap_device *dev = dev_get_drvdata(miscdev->parent);
487         struct nvmap_client *priv;
488         int ret;
489         __attribute__((unused)) struct rlimit old_rlim, new_rlim;
490
491         ret = nonseekable_open(inode, filp);
492         if (unlikely(ret))
493                 return ret;
494
495         BUG_ON(dev != nvmap_dev);
496         priv = __nvmap_create_client(dev, "user");
497         if (!priv)
498                 return -ENOMEM;
499         trace_nvmap_open(priv, priv->name);
500
501         priv->kernel_client = false;
502
503         filp->f_mapping->backing_dev_info = &nvmap_bdi;
504
505         filp->private_data = priv;
506         return 0;
507 }
508
509 static int nvmap_release(struct inode *inode, struct file *filp)
510 {
511         struct nvmap_client *priv = filp->private_data;
512
513         trace_nvmap_release(priv, priv->name);
514         nvmap_client_put(priv);
515         return 0;
516 }
517
518 int __nvmap_map(struct nvmap_handle *h, struct vm_area_struct *vma)
519 {
520         struct nvmap_vma_priv *priv;
521
522         h = nvmap_handle_get(h);
523         if (!h)
524                 return -EINVAL;
525
526         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
527         if (!priv)
528                 return -ENOMEM;
529         priv->handle = h;
530
531         vma->vm_flags |= VM_SHARED | VM_DONTEXPAND |
532                           VM_DONTDUMP | VM_DONTCOPY |
533                           (h->heap_pgalloc ? 0 : VM_PFNMAP);
534         vma->vm_ops = &nvmap_vma_ops;
535         BUG_ON(vma->vm_private_data != NULL);
536         vma->vm_private_data = priv;
537         vma->vm_page_prot = nvmap_pgprot(h, vma->vm_page_prot);
538         nvmap_vma_open(vma);
539         return 0;
540 }
541
542 static int nvmap_map(struct file *filp, struct vm_area_struct *vma)
543 {
544         BUG_ON(vma->vm_private_data != NULL);
545         vma->vm_flags |= (VM_SHARED | VM_DONTEXPAND |
546                           VM_DONTDUMP | VM_DONTCOPY);
547         vma->vm_ops = &nvmap_vma_ops;
548         return 0;
549 }
550
551 static long nvmap_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
552 {
553         int err = 0;
554         void __user *uarg = (void __user *)arg;
555
556         if (_IOC_TYPE(cmd) != NVMAP_IOC_MAGIC)
557                 return -ENOTTY;
558
559         if (_IOC_NR(cmd) > NVMAP_IOC_MAXNR)
560                 return -ENOTTY;
561
562         if (_IOC_DIR(cmd) & _IOC_READ)
563                 err = !access_ok(VERIFY_WRITE, uarg, _IOC_SIZE(cmd));
564         if (_IOC_DIR(cmd) & _IOC_WRITE)
565                 err = !access_ok(VERIFY_READ, uarg, _IOC_SIZE(cmd));
566
567         if (err)
568                 return -EFAULT;
569
570         switch (cmd) {
571         case NVMAP_IOC_CREATE:
572         case NVMAP_IOC_FROM_ID:
573         case NVMAP_IOC_FROM_FD:
574                 err = nvmap_ioctl_create(filp, cmd, uarg);
575                 break;
576
577         case NVMAP_IOC_GET_ID:
578                 err = nvmap_ioctl_getid(filp, uarg);
579                 break;
580
581         case NVMAP_IOC_GET_FD:
582                 err = nvmap_ioctl_getfd(filp, uarg);
583                 break;
584
585 #ifdef CONFIG_COMPAT
586         case NVMAP_IOC_PARAM_32:
587                 err = nvmap_ioctl_get_param(filp, uarg, true);
588                 break;
589 #endif
590
591         case NVMAP_IOC_PARAM:
592                 err = nvmap_ioctl_get_param(filp, uarg, false);
593                 break;
594
595 #ifdef CONFIG_COMPAT
596         case NVMAP_IOC_UNPIN_MULT_32:
597         case NVMAP_IOC_PIN_MULT_32:
598                 err = nvmap_ioctl_pinop(filp, cmd == NVMAP_IOC_PIN_MULT_32,
599                         uarg, true);
600                 break;
601 #endif
602
603         case NVMAP_IOC_UNPIN_MULT:
604         case NVMAP_IOC_PIN_MULT:
605                 err = nvmap_ioctl_pinop(filp, cmd == NVMAP_IOC_PIN_MULT,
606                         uarg, false);
607                 break;
608
609         case NVMAP_IOC_ALLOC:
610                 err = nvmap_ioctl_alloc(filp, uarg);
611                 break;
612
613         case NVMAP_IOC_ALLOC_KIND:
614                 err = nvmap_ioctl_alloc_kind(filp, uarg);
615                 break;
616
617         case NVMAP_IOC_FREE:
618                 err = nvmap_ioctl_free(filp, arg);
619                 break;
620
621 #ifdef CONFIG_COMPAT
622         case NVMAP_IOC_MMAP_32:
623                 err = nvmap_map_into_caller_ptr(filp, uarg, true);
624                 break;
625 #endif
626
627         case NVMAP_IOC_MMAP:
628                 err = nvmap_map_into_caller_ptr(filp, uarg, false);
629                 break;
630
631 #ifdef CONFIG_COMPAT
632         case NVMAP_IOC_WRITE_32:
633         case NVMAP_IOC_READ_32:
634                 err = nvmap_ioctl_rw_handle(filp, cmd == NVMAP_IOC_READ_32,
635                         uarg, true);
636                 break;
637 #endif
638
639         case NVMAP_IOC_WRITE:
640         case NVMAP_IOC_READ:
641                 err = nvmap_ioctl_rw_handle(filp, cmd == NVMAP_IOC_READ, uarg,
642                         false);
643                 break;
644
645 #ifdef CONFIG_COMPAT
646         case NVMAP_IOC_CACHE_32:
647                 err = nvmap_ioctl_cache_maint(filp, uarg, true);
648                 break;
649 #endif
650
651         case NVMAP_IOC_CACHE:
652                 err = nvmap_ioctl_cache_maint(filp, uarg, false);
653                 break;
654
655         case NVMAP_IOC_CACHE_LIST:
656         case NVMAP_IOC_RESERVE:
657                 err = nvmap_ioctl_cache_maint_list(filp, uarg,
658                                                    cmd == NVMAP_IOC_RESERVE);
659                 break;
660
661         case NVMAP_IOC_SHARE:
662                 err = nvmap_ioctl_share_dmabuf(filp, uarg);
663                 break;
664
665         default:
666                 return -ENOTTY;
667         }
668         return err;
669 }
670
671 /* to ensure that the backing store for the VMA isn't freed while a fork'd
672  * reference still exists, nvmap_vma_open increments the reference count on
673  * the handle, and nvmap_vma_close decrements it. alternatively, we could
674  * disallow copying of the vma, or behave like pmem and zap the pages. FIXME.
675 */
676 void nvmap_vma_open(struct vm_area_struct *vma)
677 {
678         struct nvmap_vma_priv *priv;
679         struct nvmap_handle *h;
680         struct nvmap_vma_list *vma_list, *tmp;
681
682         priv = vma->vm_private_data;
683         BUG_ON(!priv);
684         BUG_ON(!priv->handle);
685
686         atomic_inc(&priv->count);
687         h = priv->handle;
688
689         vma_list = kmalloc(sizeof(*vma_list), GFP_KERNEL);
690         if (vma_list) {
691                 mutex_lock(&h->lock);
692                 list_for_each_entry(tmp, &h->vmas, list)
693                         BUG_ON(tmp->vma == vma);
694
695                 vma_list->vma = vma;
696                 list_add(&vma_list->list, &h->vmas);
697                 mutex_unlock(&h->lock);
698         } else {
699                 WARN(1, "vma not tracked");
700         }
701 }
702
703 static void nvmap_vma_close(struct vm_area_struct *vma)
704 {
705         struct nvmap_vma_priv *priv = vma->vm_private_data;
706         struct nvmap_vma_list *vma_list;
707         struct nvmap_handle *h;
708         bool vma_found = false;
709
710         if (!priv)
711                 return;
712
713         BUG_ON(!priv->handle);
714
715         h = priv->handle;
716         mutex_lock(&h->lock);
717         list_for_each_entry(vma_list, &h->vmas, list) {
718                 if (vma_list->vma != vma)
719                         continue;
720                 list_del(&vma_list->list);
721                 kfree(vma_list);
722                 vma_found = true;
723                 break;
724         }
725         BUG_ON(!vma_found);
726         mutex_unlock(&h->lock);
727
728         if (__atomic_add_unless(&priv->count, -1, 0) == 1) {
729                 if (priv->handle)
730                         nvmap_handle_put(priv->handle);
731                 vma->vm_private_data = NULL;
732                 kfree(priv);
733         }
734 }
735
736 static int nvmap_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
737 {
738         struct page *page;
739         struct nvmap_vma_priv *priv;
740         unsigned long offs;
741
742         offs = (unsigned long)(vmf->virtual_address - vma->vm_start);
743         priv = vma->vm_private_data;
744         if (!priv || !priv->handle || !priv->handle->alloc)
745                 return VM_FAULT_SIGBUS;
746
747         offs += priv->offs;
748         /* if the VMA was split for some reason, vm_pgoff will be the VMA's
749          * offset from the original VMA */
750         offs += (vma->vm_pgoff << PAGE_SHIFT);
751
752         if (offs >= priv->handle->size)
753                 return VM_FAULT_SIGBUS;
754
755         if (!priv->handle->heap_pgalloc) {
756                 unsigned long pfn;
757                 BUG_ON(priv->handle->carveout->base & ~PAGE_MASK);
758                 pfn = ((priv->handle->carveout->base + offs) >> PAGE_SHIFT);
759                 if (!pfn_valid(pfn)) {
760                         vm_insert_pfn(vma,
761                                 (unsigned long)vmf->virtual_address, pfn);
762                         return VM_FAULT_NOPAGE;
763                 }
764                 /* CMA memory would get here */
765                 page = pfn_to_page(pfn);
766         } else {
767                 offs >>= PAGE_SHIFT;
768                 if (nvmap_page_reserved(priv->handle->pgalloc.pages[offs]))
769                         return VM_FAULT_SIGBUS;
770                 page = nvmap_to_page(priv->handle->pgalloc.pages[offs]);
771                 nvmap_page_mkdirty(&priv->handle->pgalloc.pages[offs]);
772         }
773
774         if (page)
775                 get_page(page);
776         vmf->page = page;
777         return (page) ? 0 : VM_FAULT_SIGBUS;
778 }
779
780 #define DEBUGFS_OPEN_FOPS(name) \
781 static int nvmap_debug_##name##_open(struct inode *inode, \
782                                             struct file *file) \
783 { \
784         return single_open(file, nvmap_debug_##name##_show, \
785                             inode->i_private); \
786 } \
787 \
788 static const struct file_operations debug_##name##_fops = { \
789         .open = nvmap_debug_##name##_open, \
790         .read = seq_read, \
791         .llseek = seq_lseek, \
792         .release = single_release, \
793 }
794
795 #define K(x) (x >> 10)
796
797 static void client_stringify(struct nvmap_client *client, struct seq_file *s)
798 {
799         char task_comm[TASK_COMM_LEN];
800         if (!client->task) {
801                 seq_printf(s, "%-18s %18s %8u", client->name, "kernel", 0);
802                 return;
803         }
804         get_task_comm(task_comm, client->task);
805         seq_printf(s, "%-18s %18s %8u", client->name, task_comm,
806                    client->task->pid);
807 }
808
809 static void allocations_stringify(struct nvmap_client *client,
810                                   struct seq_file *s, bool iovmm)
811 {
812         struct rb_node *n;
813
814         nvmap_ref_lock(client);
815         n = rb_first(&client->handle_refs);
816         for (; n != NULL; n = rb_next(n)) {
817                 struct nvmap_handle_ref *ref =
818                         rb_entry(n, struct nvmap_handle_ref, node);
819                 struct nvmap_handle *handle = ref->handle;
820                 if (handle->alloc && handle->heap_pgalloc == iovmm) {
821                         phys_addr_t base = iovmm ? 0 :
822                                            (handle->carveout->base);
823                         seq_printf(s,
824                                 "%-18s %-18s %8llx %10zuK %8x %6u %6u %6u %6u %8p\n",
825                                 "", "",
826                                 (unsigned long long)base, K(handle->size),
827                                 handle->userflags,
828                                 atomic_read(&handle->ref),
829                                 atomic_read(&ref->dupes),
830                                 atomic_read(&ref->pin),
831                                 atomic_read(&handle->share_count),
832                                 handle);
833                 }
834         }
835         nvmap_ref_unlock(client);
836 }
837
838 static void nvmap_get_client_mss(struct nvmap_client *client,
839                                  u64 *total, bool iovmm)
840 {
841         struct rb_node *n;
842
843         *total = 0;
844         nvmap_ref_lock(client);
845         n = rb_first(&client->handle_refs);
846         for (; n != NULL; n = rb_next(n)) {
847                 struct nvmap_handle_ref *ref =
848                         rb_entry(n, struct nvmap_handle_ref, node);
849                 struct nvmap_handle *handle = ref->handle;
850                 if (handle->alloc && handle->heap_pgalloc == iovmm)
851                         *total += handle->size /
852                                   atomic_read(&handle->share_count);
853         }
854         nvmap_ref_unlock(client);
855 }
856
857 static int nvmap_debug_allocations_show(struct seq_file *s, void *unused)
858 {
859         struct nvmap_carveout_node *node = s->private;
860         struct nvmap_carveout_commit *commit;
861         unsigned int total = 0;
862
863         spin_lock(&node->clients_lock);
864         seq_printf(s, "%-18s %18s %8s %11s\n",
865                 "CLIENT", "PROCESS", "PID", "SIZE");
866         seq_printf(s, "%-18s %18s %8s %11s %8s %6s %6s %6s %6s %8s\n",
867                         "", "", "BASE", "SIZE", "FLAGS", "REFS",
868                         "DUPES", "PINS", "SHARE", "UID");
869         list_for_each_entry(commit, &node->clients, list) {
870                 struct nvmap_client *client =
871                         get_client_from_carveout_commit(node, commit);
872                 client_stringify(client, s);
873                 seq_printf(s, " %10zuK\n", K(commit->commit));
874                 allocations_stringify(client, s, false);
875                 seq_printf(s, "\n");
876                 total += commit->commit;
877         }
878         seq_printf(s, "%-18s %-18s %8s %10uK\n", "total", "", "", K(total));
879         spin_unlock(&node->clients_lock);
880         return 0;
881 }
882
883 DEBUGFS_OPEN_FOPS(allocations);
884
885 static int nvmap_debug_clients_show(struct seq_file *s, void *unused)
886 {
887         struct nvmap_carveout_node *node = s->private;
888         struct nvmap_carveout_commit *commit;
889         unsigned int total = 0;
890
891         spin_lock(&node->clients_lock);
892         seq_printf(s, "%-18s %18s %8s %11s\n",
893                 "CLIENT", "PROCESS", "PID", "SIZE");
894         list_for_each_entry(commit, &node->clients, list) {
895                 struct nvmap_client *client =
896                         get_client_from_carveout_commit(node, commit);
897                 client_stringify(client, s);
898                 seq_printf(s, " %10zu\n", K(commit->commit));
899                 total += commit->commit;
900         }
901         seq_printf(s, "%-18s %18s %8s %10uK\n", "total", "", "", K(total));
902         spin_unlock(&node->clients_lock);
903         return 0;
904 }
905
906 DEBUGFS_OPEN_FOPS(clients);
907
908 static void nvmap_iovmm_get_total_mss(u64 *pss, u64 *non_pss, u64 *total)
909 {
910         int i;
911         struct rb_node *n;
912         struct nvmap_device *dev = nvmap_dev;
913
914         *total = 0;
915         if (pss)
916                 *pss = 0;
917         if (non_pss)
918                 *non_pss = 0;
919         if (!dev)
920                 return;
921         spin_lock(&dev->handle_lock);
922         n = rb_first(&dev->handles);
923         for (; n != NULL; n = rb_next(n)) {
924                 struct nvmap_handle *h =
925                         rb_entry(n, struct nvmap_handle, node);
926
927                 if (!h || !h->alloc || !h->heap_pgalloc)
928                         continue;
929                 if (!non_pss) {
930                         *total += h->size;
931                         continue;
932                 }
933
934                 for (i = 0; i < h->size >> PAGE_SHIFT; i++) {
935                         struct page *page = nvmap_to_page(h->pgalloc.pages[i]);
936                         int mapcount = page_mapcount(page);
937                         if (!mapcount)
938                                 *non_pss += PAGE_SIZE;
939                         *total += PAGE_SIZE;
940                 }
941         }
942         if (pss && non_pss)
943                 *pss = *total - *non_pss;
944         spin_unlock(&dev->handle_lock);
945 }
946
947 #define PRINT_MEM_STATS_NOTE(x) \
948 do { \
949         seq_printf(s, "Note: total memory is precise account of pages " \
950                 "allocated by NvMap.\nIt doesn't match with all clients " \
951                 "\"%s\" accumulated as shared memory \nis accounted in " \
952                 "full in each clients \"%s\" that shared memory.\n", #x, #x); \
953 } while (0)
954
955 static int nvmap_debug_iovmm_clients_show(struct seq_file *s, void *unused)
956 {
957         u64 total;
958         struct nvmap_client *client;
959         struct nvmap_device *dev = s->private;
960
961         spin_lock(&dev->clients_lock);
962         seq_printf(s, "%-18s %18s %8s %11s\n",
963                 "CLIENT", "PROCESS", "PID", "SIZE");
964         list_for_each_entry(client, &dev->clients, list) {
965                 u64 client_total;
966                 client_stringify(client, s);
967                 nvmap_get_client_mss(client, &client_total, true);
968                 seq_printf(s, " %10lluK\n", K(client_total));
969         }
970         spin_unlock(&dev->clients_lock);
971         nvmap_iovmm_get_total_mss(NULL, NULL, &total);
972         seq_printf(s, "%-18s %18s %8s %10lluK\n", "total", "", "", K(total));
973         return 0;
974 }
975
976 DEBUGFS_OPEN_FOPS(iovmm_clients);
977
978 static int nvmap_debug_iovmm_allocations_show(struct seq_file *s, void *unused)
979 {
980         u64 total;
981         struct nvmap_client *client;
982         struct nvmap_device *dev = s->private;
983
984         spin_lock(&dev->clients_lock);
985         seq_printf(s, "%-18s %18s %8s %11s\n",
986                 "CLIENT", "PROCESS", "PID", "SIZE");
987         seq_printf(s, "%-18s %18s %8s %11s %8s %6s %6s %6s %6s %8s\n",
988                         "", "", "BASE", "SIZE", "FLAGS", "REFS",
989                         "DUPES", "PINS", "SHARE", "UID");
990         list_for_each_entry(client, &dev->clients, list) {
991                 u64 client_total;
992                 client_stringify(client, s);
993                 nvmap_get_client_mss(client, &client_total, true);
994                 seq_printf(s, " %10lluK\n", K(client_total));
995                 allocations_stringify(client, s, true);
996                 seq_printf(s, "\n");
997         }
998         spin_unlock(&dev->clients_lock);
999         nvmap_iovmm_get_total_mss(NULL, NULL, &total);
1000         seq_printf(s, "%-18s %-18s %8s %10lluK\n", "total", "", "", K(total));
1001         return 0;
1002 }
1003
1004 DEBUGFS_OPEN_FOPS(iovmm_allocations);
1005
1006 static void nvmap_iovmm_get_client_mss(struct nvmap_client *client, u64 *pss,
1007                                    u64 *non_pss, u64 *total)
1008 {
1009         int i;
1010         struct rb_node *n;
1011
1012         *pss = *non_pss = *total = 0;
1013         nvmap_ref_lock(client);
1014         n = rb_first(&client->handle_refs);
1015         for (; n != NULL; n = rb_next(n)) {
1016                 struct nvmap_handle_ref *ref =
1017                         rb_entry(n, struct nvmap_handle_ref, node);
1018                 struct nvmap_handle *h = ref->handle;
1019
1020                 if (!h || !h->alloc || !h->heap_pgalloc)
1021                         continue;
1022
1023                 for (i = 0; i < h->size >> PAGE_SHIFT; i++) {
1024                         struct page *page = nvmap_to_page(h->pgalloc.pages[i]);
1025                         int mapcount = page_mapcount(page);
1026                         if (!mapcount)
1027                                 *non_pss += PAGE_SIZE;
1028                         *total += PAGE_SIZE;
1029                 }
1030                 *pss = *total - *non_pss;
1031         }
1032         nvmap_ref_unlock(client);
1033 }
1034
1035 static int nvmap_debug_iovmm_procrank_show(struct seq_file *s, void *unused)
1036 {
1037         u64 pss, non_pss, total;
1038         struct nvmap_client *client;
1039         struct nvmap_device *dev = s->private;
1040         u64 total_memory, total_pss, total_non_pss;
1041
1042         spin_lock(&dev->clients_lock);
1043         seq_printf(s, "%-18s %18s %8s %11s %11s %11s\n",
1044                 "CLIENT", "PROCESS", "PID", "PSS", "NON-PSS", "TOTAL");
1045         list_for_each_entry(client, &dev->clients, list) {
1046                 client_stringify(client, s);
1047                 nvmap_iovmm_get_client_mss(client, &pss, &non_pss, &total);
1048                 seq_printf(s, " %10lluK %10lluK %10lluK\n", K(pss),
1049                         K(non_pss), K(total));
1050         }
1051         spin_unlock(&dev->clients_lock);
1052
1053         nvmap_iovmm_get_total_mss(&total_pss, &total_non_pss, &total_memory);
1054         seq_printf(s, "%-18s %18s %8s %10lluK %10lluK %10lluK\n",
1055                 "total", "", "", K(total_pss),
1056                 K(total_non_pss), K(total_memory));
1057         PRINT_MEM_STATS_NOTE(TOTAL);
1058         return 0;
1059 }
1060
1061 DEBUGFS_OPEN_FOPS(iovmm_procrank);
1062
1063 ulong nvmap_iovmm_get_used_pages(void)
1064 {
1065         u64 total;
1066
1067         nvmap_iovmm_get_total_mss(NULL, NULL, &total);
1068         return total >> PAGE_SHIFT;
1069 }
1070
1071 static int nvmap_stats_reset(void *data, u64 val)
1072 {
1073         int i;
1074
1075         if (val) {
1076                 atomic64_set(&nvmap_stats.collect, 0);
1077                 for (i = 0; i < NS_NUM; i++) {
1078                         if (i == NS_TOTAL)
1079                                 continue;
1080                         atomic64_set(&nvmap_stats.stats[i], 0);
1081                 }
1082         }
1083         return 0;
1084 }
1085
1086 static int nvmap_stats_get(void *data, u64 *val)
1087 {
1088         atomic64_t *ptr = data;
1089
1090         *val = atomic64_read(ptr);
1091         return 0;
1092 }
1093
1094 static int nvmap_stats_set(void *data, u64 val)
1095 {
1096         atomic64_t *ptr = data;
1097
1098         atomic64_set(ptr, val);
1099         return 0;
1100 }
1101
1102 DEFINE_SIMPLE_ATTRIBUTE(reset_stats_fops, NULL, nvmap_stats_reset, "%llu\n");
1103 DEFINE_SIMPLE_ATTRIBUTE(stats_fops, nvmap_stats_get, nvmap_stats_set, "%llu\n");
1104
1105 static void nvmap_stats_init(struct dentry *nvmap_debug_root)
1106 {
1107         struct dentry *stats_root;
1108
1109 #define CREATE_DF(x, y) \
1110         debugfs_create_file(#x, S_IRUGO, stats_root, &y, &stats_fops);
1111
1112         stats_root = debugfs_create_dir("stats", nvmap_debug_root);
1113         if (!IS_ERR_OR_NULL(stats_root)) {
1114                 CREATE_DF(alloc, nvmap_stats.stats[NS_ALLOC]);
1115                 CREATE_DF(release, nvmap_stats.stats[NS_RELEASE]);
1116                 CREATE_DF(ualloc, nvmap_stats.stats[NS_UALLOC]);
1117                 CREATE_DF(urelease, nvmap_stats.stats[NS_URELEASE]);
1118                 CREATE_DF(kalloc, nvmap_stats.stats[NS_KALLOC]);
1119                 CREATE_DF(krelease, nvmap_stats.stats[NS_KRELEASE]);
1120                 CREATE_DF(cflush_rq, nvmap_stats.stats[NS_CFLUSH_RQ]);
1121                 CREATE_DF(cflush_done, nvmap_stats.stats[NS_CFLUSH_DONE]);
1122                 CREATE_DF(ucflush_rq, nvmap_stats.stats[NS_UCFLUSH_RQ]);
1123                 CREATE_DF(ucflush_done, nvmap_stats.stats[NS_UCFLUSH_DONE]);
1124                 CREATE_DF(kcflush_rq, nvmap_stats.stats[NS_KCFLUSH_RQ]);
1125                 CREATE_DF(kcflush_done, nvmap_stats.stats[NS_KCFLUSH_DONE]);
1126                 CREATE_DF(total_memory, nvmap_stats.stats[NS_TOTAL]);
1127
1128                 debugfs_create_file("collect", S_IRUGO | S_IWUSR,
1129                         stats_root, &nvmap_stats.collect, &stats_fops);
1130                 debugfs_create_file("reset", S_IWUSR,
1131                         stats_root, NULL, &reset_stats_fops);
1132         }
1133
1134 #undef CREATE_DF
1135 }
1136
1137 void nvmap_stats_inc(enum nvmap_stats_t stat, size_t size)
1138 {
1139         if (atomic64_read(&nvmap_stats.collect) || stat == NS_TOTAL)
1140                 atomic64_add(size, &nvmap_stats.stats[stat]);
1141 }
1142
1143 void nvmap_stats_dec(enum nvmap_stats_t stat, size_t size)
1144 {
1145         if (atomic64_read(&nvmap_stats.collect) || stat == NS_TOTAL)
1146                 atomic64_sub(size, &nvmap_stats.stats[stat]);
1147 }
1148
1149 u64 nvmap_stats_read(enum nvmap_stats_t stat)
1150 {
1151         return atomic64_read(&nvmap_stats.stats[stat]);
1152 }
1153
1154 static int nvmap_probe(struct platform_device *pdev)
1155 {
1156         struct nvmap_platform_data *plat = pdev->dev.platform_data;
1157         struct nvmap_device *dev;
1158         struct dentry *nvmap_debug_root;
1159         unsigned int i;
1160         int e;
1161
1162         if (!plat) {
1163                 dev_err(&pdev->dev, "no platform data?\n");
1164                 return -ENODEV;
1165         }
1166
1167         /*
1168          * The DMA mapping API uses these parameters to decide how to map the
1169          * passed buffers. If the maximum physical segment size is set to
1170          * smaller than the size of the buffer, then the buffer will be mapped
1171          * as separate IO virtual address ranges.
1172          */
1173         pdev->dev.dma_parms = &nvmap_dma_parameters;
1174
1175         if (WARN_ON(nvmap_dev != NULL)) {
1176                 dev_err(&pdev->dev, "only one nvmap device may be present\n");
1177                 return -ENODEV;
1178         }
1179
1180         dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1181         if (!dev) {
1182                 dev_err(&pdev->dev, "out of memory for device\n");
1183                 return -ENOMEM;
1184         }
1185
1186         nvmap_dev = dev;
1187
1188         dev->dev_user.minor = MISC_DYNAMIC_MINOR;
1189         dev->dev_user.name = "nvmap";
1190         dev->dev_user.fops = &nvmap_user_fops;
1191         dev->dev_user.parent = &pdev->dev;
1192
1193         dev->handles = RB_ROOT;
1194
1195 #ifdef CONFIG_NVMAP_PAGE_POOLS
1196         e = nvmap_page_pool_init(dev);
1197         if (e)
1198                 goto fail;
1199 #endif
1200
1201         spin_lock_init(&dev->handle_lock);
1202         INIT_LIST_HEAD(&dev->clients);
1203         spin_lock_init(&dev->clients_lock);
1204
1205         e = misc_register(&dev->dev_user);
1206         if (e) {
1207                 dev_err(&pdev->dev, "unable to register miscdevice %s\n",
1208                         dev->dev_user.name);
1209                 goto fail;
1210         }
1211
1212         dev->nr_carveouts = 0;
1213         dev->heaps = kzalloc(sizeof(struct nvmap_carveout_node) *
1214                              plat->nr_carveouts, GFP_KERNEL);
1215         if (!dev->heaps) {
1216                 e = -ENOMEM;
1217                 dev_err(&pdev->dev, "couldn't allocate carveout memory\n");
1218                 goto fail;
1219         }
1220
1221         nvmap_debug_root = debugfs_create_dir("nvmap", NULL);
1222         if (IS_ERR_OR_NULL(nvmap_debug_root))
1223                 dev_err(&pdev->dev, "couldn't create debug files\n");
1224
1225         debugfs_create_u32("max_handle_count", S_IRUGO,
1226                         nvmap_debug_root, &nvmap_max_handle_count);
1227
1228         for (i = 0; i < plat->nr_carveouts; i++) {
1229                 struct nvmap_carveout_node *node = &dev->heaps[dev->nr_carveouts];
1230                 const struct nvmap_platform_carveout *co = &plat->carveouts[i];
1231                 node->base = round_up(co->base, PAGE_SIZE);
1232                 node->size = round_down(co->size -
1233                                         (node->base - co->base), PAGE_SIZE);
1234                 if (!co->size)
1235                         continue;
1236
1237                 node->carveout = nvmap_heap_create(
1238                                 dev->dev_user.this_device, co,
1239                                 node->base, node->size, node);
1240
1241                 if (!node->carveout) {
1242                         e = -ENOMEM;
1243                         dev_err(&pdev->dev, "couldn't create %s\n", co->name);
1244                         goto fail_heaps;
1245                 }
1246                 node->index = dev->nr_carveouts;
1247                 dev->nr_carveouts++;
1248                 spin_lock_init(&node->clients_lock);
1249                 INIT_LIST_HEAD(&node->clients);
1250                 node->heap_bit = co->usage_mask;
1251
1252                 if (!IS_ERR_OR_NULL(nvmap_debug_root)) {
1253                         struct dentry *heap_root =
1254                                 debugfs_create_dir(co->name, nvmap_debug_root);
1255                         if (!IS_ERR_OR_NULL(heap_root)) {
1256                                 debugfs_create_file("clients", S_IRUGO,
1257                                         heap_root, node, &debug_clients_fops);
1258                                 debugfs_create_file("allocations", S_IRUGO,
1259                                         heap_root, node,
1260                                         &debug_allocations_fops);
1261                                 nvmap_heap_debugfs_init(heap_root,
1262                                                         node->carveout);
1263                         }
1264                 }
1265         }
1266         if (!IS_ERR_OR_NULL(nvmap_debug_root)) {
1267                 struct dentry *iovmm_root =
1268                         debugfs_create_dir("iovmm", nvmap_debug_root);
1269                 if (!IS_ERR_OR_NULL(iovmm_root)) {
1270                         debugfs_create_file("clients", S_IRUGO, iovmm_root,
1271                                 dev, &debug_iovmm_clients_fops);
1272                         debugfs_create_file("allocations", S_IRUGO, iovmm_root,
1273                                 dev, &debug_iovmm_allocations_fops);
1274                         debugfs_create_file("procrank", S_IRUGO, iovmm_root,
1275                                 dev, &debug_iovmm_procrank_fops);
1276                 }
1277 #ifdef CONFIG_NVMAP_PAGE_POOLS
1278                 nvmap_page_pool_debugfs_init(nvmap_debug_root);
1279 #endif
1280 #ifdef CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS
1281                 debugfs_create_size_t("cache_maint_inner_threshold",
1282                                       S_IRUSR | S_IWUSR,
1283                                       nvmap_debug_root,
1284                                       &cache_maint_inner_threshold);
1285
1286                 /* cortex-a9 */
1287                 if ((read_cpuid_id() >> 4 & 0xfff) == 0xc09)
1288                         cache_maint_inner_threshold = SZ_32K;
1289                 pr_info("nvmap:inner cache maint threshold=%zd",
1290                         cache_maint_inner_threshold);
1291 #endif
1292 #ifdef CONFIG_NVMAP_OUTER_CACHE_MAINT_BY_SET_WAYS
1293                 debugfs_create_size_t("cache_maint_outer_threshold",
1294                                       S_IRUSR | S_IWUSR,
1295                                       nvmap_debug_root,
1296                                       &cache_maint_outer_threshold);
1297                 pr_info("nvmap:outer cache maint threshold=%zd",
1298                         cache_maint_outer_threshold);
1299 #endif
1300         }
1301
1302         nvmap_stats_init(nvmap_debug_root);
1303         platform_set_drvdata(pdev, dev);
1304
1305         nvmap_dmabuf_debugfs_init(nvmap_debug_root);
1306         e = nvmap_dmabuf_stash_init();
1307         if (e)
1308                 goto fail_heaps;
1309
1310         return 0;
1311 fail_heaps:
1312         for (i = 0; i < dev->nr_carveouts; i++) {
1313                 struct nvmap_carveout_node *node = &dev->heaps[i];
1314                 nvmap_heap_destroy(node->carveout);
1315         }
1316 fail:
1317 #ifdef CONFIG_NVMAP_PAGE_POOLS
1318         nvmap_page_pool_fini(nvmap_dev);
1319 #endif
1320         kfree(dev->heaps);
1321         if (dev->dev_user.minor != MISC_DYNAMIC_MINOR)
1322                 misc_deregister(&dev->dev_user);
1323         kfree(dev);
1324         nvmap_dev = NULL;
1325         return e;
1326 }
1327
1328 static int nvmap_remove(struct platform_device *pdev)
1329 {
1330         struct nvmap_device *dev = platform_get_drvdata(pdev);
1331         struct rb_node *n;
1332         struct nvmap_handle *h;
1333         int i;
1334
1335         misc_deregister(&dev->dev_user);
1336
1337         while ((n = rb_first(&dev->handles))) {
1338                 h = rb_entry(n, struct nvmap_handle, node);
1339                 rb_erase(&h->node, &dev->handles);
1340                 kfree(h);
1341         }
1342
1343         for (i = 0; i < dev->nr_carveouts; i++) {
1344                 struct nvmap_carveout_node *node = &dev->heaps[i];
1345                 nvmap_heap_destroy(node->carveout);
1346         }
1347         kfree(dev->heaps);
1348
1349         kfree(dev);
1350         nvmap_dev = NULL;
1351         return 0;
1352 }
1353
1354 static int nvmap_suspend(struct platform_device *pdev, pm_message_t state)
1355 {
1356         return 0;
1357 }
1358
1359 static int nvmap_resume(struct platform_device *pdev)
1360 {
1361         return 0;
1362 }
1363
1364 static struct platform_driver nvmap_driver = {
1365         .probe          = nvmap_probe,
1366         .remove         = nvmap_remove,
1367         .suspend        = nvmap_suspend,
1368         .resume         = nvmap_resume,
1369
1370         .driver = {
1371                 .name   = "tegra-nvmap",
1372                 .owner  = THIS_MODULE,
1373         },
1374 };
1375
1376 static int __init nvmap_init_driver(void)
1377 {
1378         int e;
1379
1380         nvmap_dev = NULL;
1381
1382         e = nvmap_heap_init();
1383         if (e)
1384                 goto fail;
1385
1386         e = platform_driver_register(&nvmap_driver);
1387         if (e) {
1388                 nvmap_heap_deinit();
1389                 goto fail;
1390         }
1391
1392 fail:
1393         return e;
1394 }
1395 fs_initcall(nvmap_init_driver);
1396
1397 static void __exit nvmap_exit_driver(void)
1398 {
1399         platform_driver_unregister(&nvmap_driver);
1400         nvmap_heap_deinit();
1401         nvmap_dev = NULL;
1402 }
1403 module_exit(nvmap_exit_driver);