39b7bd3fafe1912c8ccf54a826dafa4868de7e7d
[linux-2.6.git] / drivers / video / tegra / nvmap / nvmap_dev.c
1 /*
2  * drivers/video/tegra/nvmap/nvmap_dev.c
3  *
4  * User-space interface to nvmap
5  *
6  * Copyright (c) 2011-2012, NVIDIA Corporation.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #include <linux/backing-dev.h>
24 #include <linux/bitmap.h>
25 #include <linux/debugfs.h>
26 #include <linux/delay.h>
27 #include <linux/kernel.h>
28 #include <linux/miscdevice.h>
29 #include <linux/mm.h>
30 #include <linux/module.h>
31 #include <linux/oom.h>
32 #include <linux/platform_device.h>
33 #include <linux/seq_file.h>
34 #include <linux/slab.h>
35 #include <linux/spinlock.h>
36 #include <linux/uaccess.h>
37 #include <linux/vmalloc.h>
38 #include <linux/nvmap.h>
39
40 #include <asm/cacheflush.h>
41 #include <asm/tlbflush.h>
42
43 #include <mach/iovmm.h>
44
45 #define CREATE_TRACE_POINTS
46 #include <trace/events/nvmap.h>
47
48 #include "nvmap.h"
49 #include "nvmap_ioctl.h"
50 #include "nvmap_mru.h"
51 #include "nvmap_common.h"
52
53 #define NVMAP_NUM_PTES          64
54 #define NVMAP_CARVEOUT_KILLER_RETRY_TIME 100 /* msecs */
55
56 #ifdef CONFIG_NVMAP_CARVEOUT_KILLER
57 static bool carveout_killer = true;
58 #else
59 static bool carveout_killer;
60 #endif
61 module_param(carveout_killer, bool, 0640);
62
63 struct nvmap_carveout_node {
64         unsigned int            heap_bit;
65         struct nvmap_heap       *carveout;
66         int                     index;
67         struct list_head        clients;
68         spinlock_t              clients_lock;
69 };
70
71 struct nvmap_device {
72         struct vm_struct *vm_rgn;
73         pte_t           *ptes[NVMAP_NUM_PTES];
74         unsigned long   ptebits[NVMAP_NUM_PTES / BITS_PER_LONG];
75         unsigned int    lastpte;
76         spinlock_t      ptelock;
77
78         struct rb_root  handles;
79         spinlock_t      handle_lock;
80         wait_queue_head_t pte_wait;
81         struct miscdevice dev_super;
82         struct miscdevice dev_user;
83         struct nvmap_carveout_node *heaps;
84         int nr_carveouts;
85         struct nvmap_share iovmm_master;
86         struct list_head clients;
87         spinlock_t      clients_lock;
88 };
89
90 struct nvmap_device *nvmap_dev;
91
92 static struct backing_dev_info nvmap_bdi = {
93         .ra_pages       = 0,
94         .capabilities   = (BDI_CAP_NO_ACCT_AND_WRITEBACK |
95                            BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP),
96 };
97
98 static int nvmap_open(struct inode *inode, struct file *filp);
99 static int nvmap_release(struct inode *inode, struct file *filp);
100 static long nvmap_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
101 static int nvmap_map(struct file *filp, struct vm_area_struct *vma);
102 static void nvmap_vma_open(struct vm_area_struct *vma);
103 static void nvmap_vma_close(struct vm_area_struct *vma);
104 static int nvmap_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
105
106 static const struct file_operations nvmap_user_fops = {
107         .owner          = THIS_MODULE,
108         .open           = nvmap_open,
109         .release        = nvmap_release,
110         .unlocked_ioctl = nvmap_ioctl,
111         .mmap           = nvmap_map,
112 };
113
114 static const struct file_operations nvmap_super_fops = {
115         .owner          = THIS_MODULE,
116         .open           = nvmap_open,
117         .release        = nvmap_release,
118         .unlocked_ioctl = nvmap_ioctl,
119         .mmap           = nvmap_map,
120 };
121
122 static struct vm_operations_struct nvmap_vma_ops = {
123         .open           = nvmap_vma_open,
124         .close          = nvmap_vma_close,
125         .fault          = nvmap_vma_fault,
126 };
127
128 int is_nvmap_vma(struct vm_area_struct *vma)
129 {
130         return vma->vm_ops == &nvmap_vma_ops;
131 }
132
133 struct device *nvmap_client_to_device(struct nvmap_client *client)
134 {
135         if (client->super)
136                 return client->dev->dev_super.this_device;
137         else
138                 return client->dev->dev_user.this_device;
139 }
140
141 struct nvmap_share *nvmap_get_share_from_dev(struct nvmap_device *dev)
142 {
143         return &dev->iovmm_master;
144 }
145
146 /* allocates a PTE for the caller's use; returns the PTE pointer or
147  * a negative errno. may be called from IRQs */
148 pte_t **nvmap_alloc_pte_irq(struct nvmap_device *dev, void **vaddr)
149 {
150         unsigned long flags;
151         unsigned long bit;
152
153         spin_lock_irqsave(&dev->ptelock, flags);
154         bit = find_next_zero_bit(dev->ptebits, NVMAP_NUM_PTES, dev->lastpte);
155         if (bit == NVMAP_NUM_PTES) {
156                 bit = find_first_zero_bit(dev->ptebits, dev->lastpte);
157                 if (bit == dev->lastpte)
158                         bit = NVMAP_NUM_PTES;
159         }
160
161         if (bit == NVMAP_NUM_PTES) {
162                 spin_unlock_irqrestore(&dev->ptelock, flags);
163                 return ERR_PTR(-ENOMEM);
164         }
165
166         dev->lastpte = bit;
167         set_bit(bit, dev->ptebits);
168         spin_unlock_irqrestore(&dev->ptelock, flags);
169
170         *vaddr = dev->vm_rgn->addr + bit * PAGE_SIZE;
171         return &(dev->ptes[bit]);
172 }
173
174 /* allocates a PTE for the caller's use; returns the PTE pointer or
175  * a negative errno. must be called from sleepable contexts */
176 pte_t **nvmap_alloc_pte(struct nvmap_device *dev, void **vaddr)
177 {
178         int ret;
179         pte_t **pte;
180         ret = wait_event_interruptible(dev->pte_wait,
181                         !IS_ERR(pte = nvmap_alloc_pte_irq(dev, vaddr)));
182
183         if (ret == -ERESTARTSYS)
184                 return ERR_PTR(-EINTR);
185
186         return pte;
187 }
188
189 /* frees a PTE */
190 void nvmap_free_pte(struct nvmap_device *dev, pte_t **pte)
191 {
192         unsigned long addr;
193         unsigned int bit = pte - dev->ptes;
194         unsigned long flags;
195
196         if (WARN_ON(bit >= NVMAP_NUM_PTES))
197                 return;
198
199         addr = (unsigned long)dev->vm_rgn->addr + bit * PAGE_SIZE;
200         set_pte_at(&init_mm, addr, *pte, 0);
201
202         spin_lock_irqsave(&dev->ptelock, flags);
203         clear_bit(bit, dev->ptebits);
204         spin_unlock_irqrestore(&dev->ptelock, flags);
205         wake_up(&dev->pte_wait);
206 }
207
208 /* get pte for the virtual address */
209 pte_t **nvmap_vaddr_to_pte(struct nvmap_device *dev, unsigned long vaddr)
210 {
211         unsigned int bit;
212
213         BUG_ON(vaddr < (unsigned long)dev->vm_rgn->addr);
214         bit = (vaddr - (unsigned long)dev->vm_rgn->addr) >> PAGE_SHIFT;
215         BUG_ON(bit >= NVMAP_NUM_PTES);
216         return &(dev->ptes[bit]);
217 }
218
219 /* verifies that the handle ref value "ref" is a valid handle ref for the
220  * file. caller must hold the file's ref_lock prior to calling this function */
221 struct nvmap_handle_ref *_nvmap_validate_id_locked(struct nvmap_client *c,
222                                                    unsigned long id)
223 {
224         struct rb_node *n = c->handle_refs.rb_node;
225
226         while (n) {
227                 struct nvmap_handle_ref *ref;
228                 ref = rb_entry(n, struct nvmap_handle_ref, node);
229                 if ((unsigned long)ref->handle == id)
230                         return ref;
231                 else if (id > (unsigned long)ref->handle)
232                         n = n->rb_right;
233                 else
234                         n = n->rb_left;
235         }
236
237         return NULL;
238 }
239
240 struct nvmap_handle *nvmap_get_handle_id(struct nvmap_client *client,
241                                          unsigned long id)
242 {
243         struct nvmap_handle_ref *ref;
244         struct nvmap_handle *h = NULL;
245
246         nvmap_ref_lock(client);
247         ref = _nvmap_validate_id_locked(client, id);
248         if (ref)
249                 h = ref->handle;
250         if (h)
251                 h = nvmap_handle_get(h);
252         nvmap_ref_unlock(client);
253         return h;
254 }
255
256 unsigned long nvmap_carveout_usage(struct nvmap_client *c,
257                                    struct nvmap_heap_block *b)
258 {
259         struct nvmap_heap *h = nvmap_block_to_heap(b);
260         struct nvmap_carveout_node *n;
261         int i;
262
263         for (i = 0; i < c->dev->nr_carveouts; i++) {
264                 n = &c->dev->heaps[i];
265                 if (n->carveout == h)
266                         return n->heap_bit;
267         }
268         return 0;
269 }
270
271 /*
272  * This routine is used to flush the carveout memory from cache.
273  * Why cache flush is needed for carveout? Consider the case, where a piece of
274  * carveout is allocated as cached and released. After this, if the same memory is
275  * allocated for uncached request and the memory is not flushed out from cache.
276  * In this case, the client might pass this to H/W engine and it could start modify
277  * the memory. As this was cached earlier, it might have some portion of it in cache.
278  * During cpu request to read/write other memory, the cached portion of this memory
279  * might get flushed back to main memory and would cause corruptions, if it happens
280  * after H/W writes data to memory.
281  *
282  * But flushing out the memory blindly on each carveout allocation is redundant.
283  *
284  * In order to optimize the carveout buffer cache flushes, the following
285  * strategy is used.
286  *
287  * The whole Carveout is flushed out from cache during its initialization.
288  * During allocation, carveout buffers are not flused from cache.
289  * During deallocation, carveout buffers are flushed, if they were allocated as cached.
290  * if they were allocated as uncached/writecombined, no cache flush is needed.
291  * Just draining store buffers is enough.
292  */
293 int nvmap_flush_heap_block(struct nvmap_client *client,
294         struct nvmap_heap_block *block, size_t len, unsigned int prot)
295 {
296         pte_t **pte;
297         void *addr;
298         phys_addr_t kaddr;
299         phys_addr_t phys = block->base;
300         phys_addr_t end = block->base + len;
301
302         if (prot == NVMAP_HANDLE_UNCACHEABLE || prot == NVMAP_HANDLE_WRITE_COMBINE)
303                 goto out;
304
305         if (len >= FLUSH_CLEAN_BY_SET_WAY_THRESHOLD_INNER) {
306                 inner_flush_cache_all();
307                 if (prot != NVMAP_HANDLE_INNER_CACHEABLE)
308                         outer_flush_range(block->base, block->base + len);
309                 goto out;
310         }
311
312         pte = nvmap_alloc_pte((client ? client->dev : nvmap_dev), &addr);
313         if (IS_ERR(pte))
314                 return PTR_ERR(pte);
315
316         kaddr = (phys_addr_t)addr;
317
318         while (phys < end) {
319                 phys_addr_t next = (phys + PAGE_SIZE) & PAGE_MASK;
320                 unsigned long pfn = __phys_to_pfn(phys);
321                 void *base = (void *)kaddr + (phys & ~PAGE_MASK);
322
323                 next = min(next, end);
324                 set_pte_at(&init_mm, kaddr, *pte, pfn_pte(pfn, pgprot_kernel));
325                 flush_tlb_kernel_page(kaddr);
326                 __cpuc_flush_dcache_area(base, next - phys);
327                 phys = next;
328         }
329
330         if (prot != NVMAP_HANDLE_INNER_CACHEABLE)
331                 outer_flush_range(block->base, block->base + len);
332
333         nvmap_free_pte((client ? client->dev : nvmap_dev), pte);
334 out:
335         wmb();
336         return 0;
337 }
338
339 void nvmap_carveout_commit_add(struct nvmap_client *client,
340                                struct nvmap_carveout_node *node,
341                                size_t len)
342 {
343         unsigned long flags;
344
345         nvmap_ref_lock(client);
346         spin_lock_irqsave(&node->clients_lock, flags);
347         BUG_ON(list_empty(&client->carveout_commit[node->index].list) &&
348                client->carveout_commit[node->index].commit != 0);
349
350         client->carveout_commit[node->index].commit += len;
351         /* if this client isn't already on the list of nodes for this heap,
352            add it */
353         if (list_empty(&client->carveout_commit[node->index].list)) {
354                 list_add(&client->carveout_commit[node->index].list,
355                          &node->clients);
356         }
357         spin_unlock_irqrestore(&node->clients_lock, flags);
358         nvmap_ref_unlock(client);
359 }
360
361 void nvmap_carveout_commit_subtract(struct nvmap_client *client,
362                                     struct nvmap_carveout_node *node,
363                                     size_t len)
364 {
365         unsigned long flags;
366
367         if (!client)
368                 return;
369
370         spin_lock_irqsave(&node->clients_lock, flags);
371         BUG_ON(client->carveout_commit[node->index].commit < len);
372         client->carveout_commit[node->index].commit -= len;
373         /* if no more allocation in this carveout for this node, delete it */
374         if (!client->carveout_commit[node->index].commit)
375                 list_del_init(&client->carveout_commit[node->index].list);
376         spin_unlock_irqrestore(&node->clients_lock, flags);
377 }
378
379 static struct nvmap_client *get_client_from_carveout_commit(
380         struct nvmap_carveout_node *node, struct nvmap_carveout_commit *commit)
381 {
382         struct nvmap_carveout_commit *first_commit = commit - node->index;
383         return (void *)first_commit - offsetof(struct nvmap_client,
384                                                carveout_commit);
385 }
386
387 static DECLARE_WAIT_QUEUE_HEAD(wait_reclaim);
388 static int wait_count;
389 bool nvmap_shrink_carveout(struct nvmap_carveout_node *node)
390 {
391         struct nvmap_carveout_commit *commit;
392         size_t selected_size = 0;
393         int selected_oom_adj = OOM_ADJUST_MIN;
394         struct task_struct *selected_task = NULL;
395         unsigned long flags;
396         bool wait = false;
397         int current_oom_adj = OOM_ADJUST_MIN;
398
399         task_lock(current);
400         if (current->signal)
401                 current_oom_adj = current->signal->oom_adj;
402         task_unlock(current);
403
404         spin_lock_irqsave(&node->clients_lock, flags);
405         /* find the task with the smallest oom_adj (lowest priority)
406          * and largest carveout allocation -- ignore kernel allocations,
407          * there's no way to handle them */
408         list_for_each_entry(commit, &node->clients, list) {
409                 struct nvmap_client *client =
410                         get_client_from_carveout_commit(node, commit);
411                 size_t size = commit->commit;
412                 struct task_struct *task = client->task;
413                 struct signal_struct *sig;
414
415                 if (!task)
416                         continue;
417
418                 task_lock(task);
419                 sig = task->signal;
420                 if (!task->mm || !sig)
421                         goto end;
422                 /* don't try to kill current */
423                 if (task == current->group_leader)
424                         goto end;
425                 /* don't try to kill higher priority tasks */
426                 if (sig->oom_adj < current_oom_adj)
427                         goto end;
428                 if (sig->oom_adj < selected_oom_adj)
429                         goto end;
430                 if (sig->oom_adj == selected_oom_adj &&
431                     size <= selected_size)
432                         goto end;
433                 selected_oom_adj = sig->oom_adj;
434                 selected_size = size;
435                 selected_task = task;
436 end:
437                 task_unlock(task);
438         }
439         if (selected_task) {
440                 wait = true;
441                 if (fatal_signal_pending(selected_task)) {
442                         pr_warning("carveout_killer: process %d dying "
443                                    "slowly\n", selected_task->pid);
444                         goto out;
445                 }
446                 pr_info("carveout_killer: killing process %d with oom_adj %d "
447                         "to reclaim %d (for process with oom_adj %d)\n",
448                         selected_task->pid, selected_oom_adj,
449                         selected_size, current_oom_adj);
450                 force_sig(SIGKILL, selected_task);
451         }
452 out:
453         spin_unlock_irqrestore(&node->clients_lock, flags);
454         return wait;
455 }
456
457 static
458 struct nvmap_heap_block *do_nvmap_carveout_alloc(struct nvmap_client *client,
459                                               struct nvmap_handle *handle,
460                                               unsigned long type)
461 {
462         struct nvmap_carveout_node *co_heap;
463         struct nvmap_device *dev = client->dev;
464         int i;
465
466         for (i = 0; i < dev->nr_carveouts; i++) {
467                 struct nvmap_heap_block *block;
468                 co_heap = &dev->heaps[i];
469
470                 if (!(co_heap->heap_bit & type))
471                         continue;
472
473                 block = nvmap_heap_alloc(co_heap->carveout, handle);
474                 if (block)
475                         return block;
476         }
477         return NULL;
478 }
479
480 static bool nvmap_carveout_freed(int count)
481 {
482         smp_rmb();
483         return count != wait_count;
484 }
485
486 struct nvmap_heap_block *nvmap_carveout_alloc(struct nvmap_client *client,
487                                               struct nvmap_handle *handle,
488                                               unsigned long type)
489 {
490         struct nvmap_heap_block *block;
491         struct nvmap_carveout_node *co_heap;
492         struct nvmap_device *dev = client->dev;
493         int i;
494         unsigned long end = jiffies +
495                 msecs_to_jiffies(NVMAP_CARVEOUT_KILLER_RETRY_TIME);
496         int count = 0;
497
498         do {
499                 block = do_nvmap_carveout_alloc(client, handle, type);
500                 if (!carveout_killer)
501                         return block;
502
503                 if (block)
504                         return block;
505
506                 if (!count++) {
507                         char task_comm[TASK_COMM_LEN];
508                         if (client->task)
509                                 get_task_comm(task_comm, client->task);
510                         else
511                                 task_comm[0] = 0;
512                         pr_info("%s: failed to allocate %u bytes for "
513                                 "process %s, firing carveout "
514                                 "killer!\n", __func__, handle->size, task_comm);
515
516                 } else {
517                         pr_info("%s: still can't allocate %u bytes, "
518                                 "attempt %d!\n", __func__, handle->size, count);
519                 }
520
521                 /* shrink carveouts that matter and try again */
522                 for (i = 0; i < dev->nr_carveouts; i++) {
523                         int count;
524                         co_heap = &dev->heaps[i];
525
526                         if (!(co_heap->heap_bit & type))
527                                 continue;
528
529                         count = wait_count;
530                         /* indicates we didn't find anything to kill,
531                            might as well stop trying */
532                         if (!nvmap_shrink_carveout(co_heap))
533                                 return NULL;
534
535                         if (time_is_after_jiffies(end))
536                                 wait_event_interruptible_timeout(wait_reclaim,
537                                          nvmap_carveout_freed(count),
538                                          end - jiffies);
539                 }
540         } while (time_is_after_jiffies(end));
541
542         if (time_is_before_jiffies(end))
543                 pr_info("carveout_killer: timeout expired without "
544                         "allocation succeeding.\n");
545
546         return NULL;
547 }
548
549 /* remove a handle from the device's tree of all handles; called
550  * when freeing handles. */
551 int nvmap_handle_remove(struct nvmap_device *dev, struct nvmap_handle *h)
552 {
553         spin_lock(&dev->handle_lock);
554
555         /* re-test inside the spinlock if the handle really has no clients;
556          * only remove the handle if it is unreferenced */
557         if (atomic_add_return(0, &h->ref) > 0) {
558                 spin_unlock(&dev->handle_lock);
559                 return -EBUSY;
560         }
561         smp_rmb();
562         BUG_ON(atomic_read(&h->ref) < 0);
563         BUG_ON(atomic_read(&h->pin) != 0);
564
565         rb_erase(&h->node, &dev->handles);
566
567         spin_unlock(&dev->handle_lock);
568         return 0;
569 }
570
571 /* adds a newly-created handle to the device master tree */
572 void nvmap_handle_add(struct nvmap_device *dev, struct nvmap_handle *h)
573 {
574         struct rb_node **p;
575         struct rb_node *parent = NULL;
576
577         spin_lock(&dev->handle_lock);
578         p = &dev->handles.rb_node;
579         while (*p) {
580                 struct nvmap_handle *b;
581
582                 parent = *p;
583                 b = rb_entry(parent, struct nvmap_handle, node);
584                 if (h > b)
585                         p = &parent->rb_right;
586                 else
587                         p = &parent->rb_left;
588         }
589         rb_link_node(&h->node, parent, p);
590         rb_insert_color(&h->node, &dev->handles);
591         spin_unlock(&dev->handle_lock);
592 }
593
594 /* validates that a handle is in the device master tree, and that the
595  * client has permission to access it */
596 struct nvmap_handle *nvmap_validate_get(struct nvmap_client *client,
597                                         unsigned long id)
598 {
599         struct nvmap_handle *h = NULL;
600         struct rb_node *n;
601
602         spin_lock(&client->dev->handle_lock);
603
604         n = client->dev->handles.rb_node;
605
606         while (n) {
607                 h = rb_entry(n, struct nvmap_handle, node);
608                 if ((unsigned long)h == id) {
609                         if (client->super || h->global || (h->owner == client))
610                                 h = nvmap_handle_get(h);
611                         else
612                                 h = NULL;
613                         spin_unlock(&client->dev->handle_lock);
614                         return h;
615                 }
616                 if (id > (unsigned long)h)
617                         n = n->rb_right;
618                 else
619                         n = n->rb_left;
620         }
621         spin_unlock(&client->dev->handle_lock);
622         return NULL;
623 }
624
625 struct nvmap_client *nvmap_create_client(struct nvmap_device *dev,
626                                          const char *name)
627 {
628         struct nvmap_client *client;
629         struct task_struct *task;
630         int i;
631
632         if (WARN_ON(!dev))
633                 return NULL;
634
635         client = kzalloc(sizeof(*client) + (sizeof(struct nvmap_carveout_commit)
636                          * dev->nr_carveouts), GFP_KERNEL);
637         if (!client)
638                 return NULL;
639
640         client->name = name;
641         client->super = true;
642         client->dev = dev;
643         /* TODO: allocate unique IOVMM client for each nvmap client */
644         client->share = &dev->iovmm_master;
645         client->handle_refs = RB_ROOT;
646
647         atomic_set(&client->iovm_commit, 0);
648
649         client->iovm_limit = nvmap_mru_vm_size(client->share->iovmm);
650
651         for (i = 0; i < dev->nr_carveouts; i++) {
652                 INIT_LIST_HEAD(&client->carveout_commit[i].list);
653                 client->carveout_commit[i].commit = 0;
654         }
655
656         get_task_struct(current->group_leader);
657         task_lock(current->group_leader);
658         /* don't bother to store task struct for kernel threads,
659            they can't be killed anyway */
660         if (current->flags & PF_KTHREAD) {
661                 put_task_struct(current->group_leader);
662                 task = NULL;
663         } else {
664                 task = current->group_leader;
665         }
666         task_unlock(current->group_leader);
667         client->task = task;
668
669         mutex_init(&client->ref_lock);
670         atomic_set(&client->count, 1);
671
672         spin_lock(&dev->clients_lock);
673         list_add(&client->list, &dev->clients);
674         spin_unlock(&dev->clients_lock);
675         return client;
676 }
677
678 static void destroy_client(struct nvmap_client *client)
679 {
680         struct rb_node *n;
681         int i;
682
683         if (!client)
684                 return;
685
686
687         while ((n = rb_first(&client->handle_refs))) {
688                 struct nvmap_handle_ref *ref;
689                 int pins, dupes;
690
691                 ref = rb_entry(n, struct nvmap_handle_ref, node);
692                 rb_erase(&ref->node, &client->handle_refs);
693
694                 smp_rmb();
695                 pins = atomic_read(&ref->pin);
696
697                 if (ref->handle->owner == client)
698                         ref->handle->owner = NULL;
699
700                 while (pins--)
701                         nvmap_unpin_handles(client, &ref->handle, 1);
702
703                 dupes = atomic_read(&ref->dupes);
704                 while (dupes--)
705                         nvmap_handle_put(ref->handle);
706
707                 kfree(ref);
708         }
709
710         if (carveout_killer) {
711                 wait_count++;
712                 smp_wmb();
713                 wake_up_all(&wait_reclaim);
714         }
715
716         for (i = 0; i < client->dev->nr_carveouts; i++)
717                 list_del(&client->carveout_commit[i].list);
718
719         if (client->task)
720                 put_task_struct(client->task);
721
722         spin_lock(&client->dev->clients_lock);
723         list_del(&client->list);
724         spin_unlock(&client->dev->clients_lock);
725         kfree(client);
726 }
727
728 struct nvmap_client *nvmap_client_get(struct nvmap_client *client)
729 {
730         if (WARN_ON(!client))
731                 return NULL;
732
733         if (WARN_ON(!atomic_add_unless(&client->count, 1, 0)))
734                 return NULL;
735
736         return client;
737 }
738
739 struct nvmap_client *nvmap_client_get_file(int fd)
740 {
741         struct nvmap_client *client = ERR_PTR(-EFAULT);
742         struct file *f = fget(fd);
743         if (!f)
744                 return ERR_PTR(-EINVAL);
745
746         if ((f->f_op == &nvmap_user_fops) || (f->f_op == &nvmap_super_fops)) {
747                 client = f->private_data;
748                 atomic_inc(&client->count);
749         }
750
751         fput(f);
752         return client;
753 }
754
755 void nvmap_client_put(struct nvmap_client *client)
756 {
757         if (!client)
758                 return;
759
760         if (!atomic_dec_return(&client->count))
761                 destroy_client(client);
762 }
763
764 static int nvmap_open(struct inode *inode, struct file *filp)
765 {
766         struct miscdevice *miscdev = filp->private_data;
767         struct nvmap_device *dev = dev_get_drvdata(miscdev->parent);
768         struct nvmap_client *priv;
769         int ret;
770
771         ret = nonseekable_open(inode, filp);
772         if (unlikely(ret))
773                 return ret;
774
775         BUG_ON(dev != nvmap_dev);
776         priv = nvmap_create_client(dev, "user");
777         if (!priv)
778                 return -ENOMEM;
779         trace_nvmap_open(priv);
780
781         priv->super = (filp->f_op == &nvmap_super_fops);
782
783         filp->f_mapping->backing_dev_info = &nvmap_bdi;
784
785         filp->private_data = priv;
786         return 0;
787 }
788
789 static int nvmap_release(struct inode *inode, struct file *filp)
790 {
791         trace_nvmap_release(filp->private_data);
792         nvmap_client_put(filp->private_data);
793         return 0;
794 }
795
796 static int nvmap_map(struct file *filp, struct vm_area_struct *vma)
797 {
798         struct nvmap_vma_priv *priv;
799
800         /* after NVMAP_IOC_MMAP, the handle that is mapped by this VMA
801          * will be stored in vm_private_data and faulted in. until the
802          * ioctl is made, the VMA is mapped no-access */
803         vma->vm_private_data = NULL;
804
805         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
806         if (!priv)
807                 return -ENOMEM;
808
809         priv->offs = 0;
810         priv->handle = NULL;
811         atomic_set(&priv->count, 1);
812
813         vma->vm_flags |= VM_SHARED;
814         vma->vm_flags |= (VM_IO | VM_DONTEXPAND | VM_MIXEDMAP | VM_RESERVED);
815         vma->vm_ops = &nvmap_vma_ops;
816         vma->vm_private_data = priv;
817
818         return 0;
819 }
820
821 static long nvmap_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
822 {
823         int err = 0;
824         void __user *uarg = (void __user *)arg;
825
826         if (_IOC_TYPE(cmd) != NVMAP_IOC_MAGIC)
827                 return -ENOTTY;
828
829         if (_IOC_NR(cmd) > NVMAP_IOC_MAXNR)
830                 return -ENOTTY;
831
832         if (_IOC_DIR(cmd) & _IOC_READ)
833                 err = !access_ok(VERIFY_WRITE, uarg, _IOC_SIZE(cmd));
834         if (_IOC_DIR(cmd) & _IOC_WRITE)
835                 err = !access_ok(VERIFY_READ, uarg, _IOC_SIZE(cmd));
836
837         if (err)
838                 return -EFAULT;
839
840         switch (cmd) {
841         case NVMAP_IOC_CLAIM:
842                 nvmap_warn(filp->private_data, "preserved handles not"
843                            "supported\n");
844                 err = -ENODEV;
845                 break;
846         case NVMAP_IOC_CREATE:
847         case NVMAP_IOC_FROM_ID:
848                 err = nvmap_ioctl_create(filp, cmd, uarg);
849                 break;
850
851         case NVMAP_IOC_GET_ID:
852                 err = nvmap_ioctl_getid(filp, uarg);
853                 break;
854
855         case NVMAP_IOC_PARAM:
856                 err = nvmap_ioctl_get_param(filp, uarg);
857                 break;
858
859         case NVMAP_IOC_UNPIN_MULT:
860         case NVMAP_IOC_PIN_MULT:
861                 err = nvmap_ioctl_pinop(filp, cmd == NVMAP_IOC_PIN_MULT, uarg);
862                 break;
863
864         case NVMAP_IOC_ALLOC:
865                 err = nvmap_ioctl_alloc(filp, uarg);
866                 break;
867
868         case NVMAP_IOC_FREE:
869                 err = nvmap_ioctl_free(filp, arg);
870                 break;
871
872         case NVMAP_IOC_MMAP:
873                 err = nvmap_map_into_caller_ptr(filp, uarg);
874                 break;
875
876         case NVMAP_IOC_WRITE:
877         case NVMAP_IOC_READ:
878                 err = nvmap_ioctl_rw_handle(filp, cmd == NVMAP_IOC_READ, uarg);
879                 break;
880
881         case NVMAP_IOC_CACHE:
882                 err = nvmap_ioctl_cache_maint(filp, uarg);
883                 break;
884
885         case NVMAP_IOC_SHARE:
886                 err = nvmap_ioctl_share_dmabuf(filp, uarg);
887                 break;
888
889         default:
890                 return -ENOTTY;
891         }
892         return err;
893 }
894
895 /* to ensure that the backing store for the VMA isn't freed while a fork'd
896  * reference still exists, nvmap_vma_open increments the reference count on
897  * the handle, and nvmap_vma_close decrements it. alternatively, we could
898  * disallow copying of the vma, or behave like pmem and zap the pages. FIXME.
899 */
900 static void nvmap_vma_open(struct vm_area_struct *vma)
901 {
902         struct nvmap_vma_priv *priv;
903
904         priv = vma->vm_private_data;
905         BUG_ON(!priv);
906
907         atomic_inc(&priv->count);
908         if(priv->handle)
909                 nvmap_usecount_inc(priv->handle);
910 }
911
912 static void nvmap_vma_close(struct vm_area_struct *vma)
913 {
914         struct nvmap_vma_priv *priv = vma->vm_private_data;
915
916         if (priv) {
917                 if (priv->handle) {
918                         BUG_ON(priv->handle->usecount == 0);
919                         nvmap_usecount_dec(priv->handle);
920                 }
921                 if (!atomic_dec_return(&priv->count)) {
922                         if (priv->handle)
923                                 nvmap_handle_put(priv->handle);
924                         kfree(priv);
925                 }
926         }
927         vma->vm_private_data = NULL;
928 }
929
930 static int nvmap_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
931 {
932         struct nvmap_vma_priv *priv;
933         unsigned long offs;
934
935         offs = (unsigned long)(vmf->virtual_address - vma->vm_start);
936         priv = vma->vm_private_data;
937         if (!priv || !priv->handle || !priv->handle->alloc)
938                 return VM_FAULT_SIGBUS;
939
940         offs += priv->offs;
941         /* if the VMA was split for some reason, vm_pgoff will be the VMA's
942          * offset from the original VMA */
943         offs += (vma->vm_pgoff << PAGE_SHIFT);
944
945         if (offs >= priv->handle->size)
946                 return VM_FAULT_SIGBUS;
947
948         if (!priv->handle->heap_pgalloc) {
949                 unsigned long pfn;
950                 BUG_ON(priv->handle->carveout->base & ~PAGE_MASK);
951                 pfn = ((priv->handle->carveout->base + offs) >> PAGE_SHIFT);
952                 vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
953                 return VM_FAULT_NOPAGE;
954         } else {
955                 struct page *page;
956                 offs >>= PAGE_SHIFT;
957                 page = priv->handle->pgalloc.pages[offs];
958                 if (page)
959                         get_page(page);
960                 vmf->page = page;
961                 return (page) ? 0 : VM_FAULT_SIGBUS;
962         }
963 }
964
965 static ssize_t attr_show_usage(struct device *dev,
966                                struct device_attribute *attr, char *buf)
967 {
968         struct nvmap_carveout_node *node = nvmap_heap_device_to_arg(dev);
969
970         return sprintf(buf, "%08x\n", node->heap_bit);
971 }
972
973 static struct device_attribute heap_attr_show_usage =
974         __ATTR(usage, S_IRUGO, attr_show_usage, NULL);
975
976 static struct attribute *heap_extra_attrs[] = {
977         &heap_attr_show_usage.attr,
978         NULL,
979 };
980
981 static struct attribute_group heap_extra_attr_group = {
982         .attrs = heap_extra_attrs,
983 };
984
985 static void client_stringify(struct nvmap_client *client, struct seq_file *s)
986 {
987         char task_comm[TASK_COMM_LEN];
988         if (!client->task) {
989                 seq_printf(s, "%-18s %18s %8u", client->name, "kernel", 0);
990                 return;
991         }
992         get_task_comm(task_comm, client->task);
993         seq_printf(s, "%-18s %18s %8u", client->name, task_comm,
994                    client->task->pid);
995 }
996
997 static void allocations_stringify(struct nvmap_client *client,
998                                   struct seq_file *s, bool iovmm)
999 {
1000         struct rb_node *n = rb_first(&client->handle_refs);
1001
1002         for (; n != NULL; n = rb_next(n)) {
1003                 struct nvmap_handle_ref *ref =
1004                         rb_entry(n, struct nvmap_handle_ref, node);
1005                 struct nvmap_handle *handle = ref->handle;
1006                 if (handle->alloc && handle->heap_pgalloc == iovmm) {
1007                         unsigned long base = iovmm ? 0:
1008                                 (unsigned long)(handle->carveout->base);
1009                         seq_printf(s, "%-18s %-18s %8lx %10u %8x\n", "", "",
1010                                         base, handle->size, handle->userflags);
1011                 }
1012         }
1013 }
1014
1015 static int nvmap_debug_allocations_show(struct seq_file *s, void *unused)
1016 {
1017         struct nvmap_carveout_node *node = s->private;
1018         struct nvmap_carveout_commit *commit;
1019         unsigned long flags;
1020         unsigned int total = 0;
1021
1022         spin_lock_irqsave(&node->clients_lock, flags);
1023         seq_printf(s, "%-18s %18s %8s %10s %8s\n", "CLIENT", "PROCESS", "PID",
1024                 "SIZE", "FLAGS");
1025         seq_printf(s, "%-18s %18s %8s %10s\n", "", "",
1026                                         "BASE", "SIZE");
1027         list_for_each_entry(commit, &node->clients, list) {
1028                 struct nvmap_client *client =
1029                         get_client_from_carveout_commit(node, commit);
1030                 client_stringify(client, s);
1031                 seq_printf(s, " %10u\n", commit->commit);
1032                 allocations_stringify(client, s, false);
1033                 seq_printf(s, "\n");
1034                 total += commit->commit;
1035         }
1036         seq_printf(s, "%-18s %-18s %8u %10u\n", "total", "", 0, total);
1037         spin_unlock_irqrestore(&node->clients_lock, flags);
1038
1039         return 0;
1040 }
1041
1042 static int nvmap_debug_allocations_open(struct inode *inode, struct file *file)
1043 {
1044         return single_open(file, nvmap_debug_allocations_show,
1045                            inode->i_private);
1046 }
1047
1048 static const struct file_operations debug_allocations_fops = {
1049         .open = nvmap_debug_allocations_open,
1050         .read = seq_read,
1051         .llseek = seq_lseek,
1052         .release = single_release,
1053 };
1054
1055 static int nvmap_debug_clients_show(struct seq_file *s, void *unused)
1056 {
1057         struct nvmap_carveout_node *node = s->private;
1058         struct nvmap_carveout_commit *commit;
1059         unsigned long flags;
1060         unsigned int total = 0;
1061
1062         spin_lock_irqsave(&node->clients_lock, flags);
1063         seq_printf(s, "%-18s %18s %8s %10s\n", "CLIENT", "PROCESS", "PID",
1064                 "SIZE");
1065         list_for_each_entry(commit, &node->clients, list) {
1066                 struct nvmap_client *client =
1067                         get_client_from_carveout_commit(node, commit);
1068                 client_stringify(client, s);
1069                 seq_printf(s, " %10u\n", commit->commit);
1070                 total += commit->commit;
1071         }
1072         seq_printf(s, "%-18s %18s %8u %10u\n", "total", "", 0, total);
1073         spin_unlock_irqrestore(&node->clients_lock, flags);
1074
1075         return 0;
1076 }
1077
1078 static int nvmap_debug_clients_open(struct inode *inode, struct file *file)
1079 {
1080         return single_open(file, nvmap_debug_clients_show, inode->i_private);
1081 }
1082
1083 static const struct file_operations debug_clients_fops = {
1084         .open = nvmap_debug_clients_open,
1085         .read = seq_read,
1086         .llseek = seq_lseek,
1087         .release = single_release,
1088 };
1089
1090 static int nvmap_debug_iovmm_clients_show(struct seq_file *s, void *unused)
1091 {
1092         unsigned long flags;
1093         unsigned int total = 0;
1094         struct nvmap_client *client;
1095         struct nvmap_device *dev = s->private;
1096
1097         spin_lock_irqsave(&dev->clients_lock, flags);
1098         seq_printf(s, "%-18s %18s %8s %10s\n", "CLIENT", "PROCESS", "PID",
1099                 "SIZE");
1100         list_for_each_entry(client, &dev->clients, list) {
1101                 client_stringify(client, s);
1102                 seq_printf(s, " %10u\n", atomic_read(&client->iovm_commit));
1103                 total += atomic_read(&client->iovm_commit);
1104         }
1105         seq_printf(s, "%-18s %18s %8u %10u\n", "total", "", 0, total);
1106         spin_unlock_irqrestore(&dev->clients_lock, flags);
1107
1108         return 0;
1109 }
1110
1111 static int nvmap_debug_iovmm_clients_open(struct inode *inode,
1112                                             struct file *file)
1113 {
1114         return single_open(file, nvmap_debug_iovmm_clients_show,
1115                             inode->i_private);
1116 }
1117
1118 static const struct file_operations debug_iovmm_clients_fops = {
1119         .open = nvmap_debug_iovmm_clients_open,
1120         .read = seq_read,
1121         .llseek = seq_lseek,
1122         .release = single_release,
1123 };
1124
1125 static int nvmap_debug_iovmm_allocations_show(struct seq_file *s, void *unused)
1126 {
1127         unsigned long flags;
1128         unsigned int total = 0;
1129         struct nvmap_client *client;
1130         struct nvmap_device *dev = s->private;
1131
1132         spin_lock_irqsave(&dev->clients_lock, flags);
1133         seq_printf(s, "%-18s %18s %8s %10s %8s\n", "CLIENT", "PROCESS", "PID",
1134                 "SIZE", "FLAGS");
1135         seq_printf(s, "%-18s %18s %8s %10s\n", "", "",
1136                                         "BASE", "SIZE");
1137         list_for_each_entry(client, &dev->clients, list) {
1138                 client_stringify(client, s);
1139                 seq_printf(s, " %10u\n", atomic_read(&client->iovm_commit));
1140                 allocations_stringify(client, s, true);
1141                 seq_printf(s, "\n");
1142                 total += atomic_read(&client->iovm_commit);
1143         }
1144         seq_printf(s, "%-18s %-18s %8u %10u\n", "total", "", 0, total);
1145         spin_unlock_irqrestore(&dev->clients_lock, flags);
1146
1147         return 0;
1148 }
1149
1150 static int nvmap_debug_iovmm_allocations_open(struct inode *inode,
1151                                                 struct file *file)
1152 {
1153         return single_open(file, nvmap_debug_iovmm_allocations_show,
1154                             inode->i_private);
1155 }
1156
1157 static const struct file_operations debug_iovmm_allocations_fops = {
1158         .open = nvmap_debug_iovmm_allocations_open,
1159         .read = seq_read,
1160         .llseek = seq_lseek,
1161         .release = single_release,
1162 };
1163
1164 static int nvmap_probe(struct platform_device *pdev)
1165 {
1166         struct nvmap_platform_data *plat = pdev->dev.platform_data;
1167         struct nvmap_device *dev;
1168         struct dentry *nvmap_debug_root;
1169         unsigned int i;
1170         int e;
1171
1172         if (!plat) {
1173                 dev_err(&pdev->dev, "no platform data?\n");
1174                 return -ENODEV;
1175         }
1176
1177         if (WARN_ON(nvmap_dev != NULL)) {
1178                 dev_err(&pdev->dev, "only one nvmap device may be present\n");
1179                 return -ENODEV;
1180         }
1181
1182         dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1183         if (!dev) {
1184                 dev_err(&pdev->dev, "out of memory for device\n");
1185                 return -ENOMEM;
1186         }
1187
1188         dev->dev_user.minor = MISC_DYNAMIC_MINOR;
1189         dev->dev_user.name = "nvmap";
1190         dev->dev_user.fops = &nvmap_user_fops;
1191         dev->dev_user.parent = &pdev->dev;
1192
1193         dev->dev_super.minor = MISC_DYNAMIC_MINOR;
1194         dev->dev_super.name = "knvmap";
1195         dev->dev_super.fops = &nvmap_super_fops;
1196         dev->dev_super.parent = &pdev->dev;
1197
1198         dev->handles = RB_ROOT;
1199
1200         init_waitqueue_head(&dev->pte_wait);
1201
1202         init_waitqueue_head(&dev->iovmm_master.pin_wait);
1203         mutex_init(&dev->iovmm_master.pin_lock);
1204 #ifdef CONFIG_NVMAP_PAGE_POOLS
1205         for (i = 0; i < NVMAP_NUM_POOLS; i++)
1206                 nvmap_page_pool_init(&dev->iovmm_master.pools[i], i);
1207 #endif
1208
1209         dev->iovmm_master.iovmm =
1210                 tegra_iovmm_alloc_client(&pdev->dev, NULL,
1211                         &(dev->dev_user));
1212 #if defined(CONFIG_TEGRA_IOVMM) || defined(CONFIG_IOMMU_API)
1213         if (!dev->iovmm_master.iovmm) {
1214                 e = PTR_ERR(dev->iovmm_master.iovmm);
1215                 dev_err(&pdev->dev, "couldn't create iovmm client\n");
1216                 goto fail;
1217         }
1218 #endif
1219         dev->vm_rgn = alloc_vm_area(NVMAP_NUM_PTES * PAGE_SIZE, NULL);
1220         if (!dev->vm_rgn) {
1221                 e = -ENOMEM;
1222                 dev_err(&pdev->dev, "couldn't allocate remapping region\n");
1223                 goto fail;
1224         }
1225         e = nvmap_mru_init(&dev->iovmm_master);
1226         if (e) {
1227                 dev_err(&pdev->dev, "couldn't initialize MRU lists\n");
1228                 goto fail;
1229         }
1230
1231         spin_lock_init(&dev->ptelock);
1232         spin_lock_init(&dev->handle_lock);
1233         INIT_LIST_HEAD(&dev->clients);
1234         spin_lock_init(&dev->clients_lock);
1235
1236         for (i = 0; i < NVMAP_NUM_PTES; i++) {
1237                 unsigned long addr;
1238                 pgd_t *pgd;
1239                 pud_t *pud;
1240                 pmd_t *pmd;
1241
1242                 addr = (unsigned long)dev->vm_rgn->addr + (i * PAGE_SIZE);
1243                 pgd = pgd_offset_k(addr);
1244                 pud = pud_alloc(&init_mm, pgd, addr);
1245                 if (!pud) {
1246                         e = -ENOMEM;
1247                         dev_err(&pdev->dev, "couldn't allocate page tables\n");
1248                         goto fail;
1249                 }
1250                 pmd = pmd_alloc(&init_mm, pud, addr);
1251                 if (!pmd) {
1252                         e = -ENOMEM;
1253                         dev_err(&pdev->dev, "couldn't allocate page tables\n");
1254                         goto fail;
1255                 }
1256                 dev->ptes[i] = pte_alloc_kernel(pmd, addr);
1257                 if (!dev->ptes[i]) {
1258                         e = -ENOMEM;
1259                         dev_err(&pdev->dev, "couldn't allocate page tables\n");
1260                         goto fail;
1261                 }
1262         }
1263
1264         e = misc_register(&dev->dev_user);
1265         if (e) {
1266                 dev_err(&pdev->dev, "unable to register miscdevice %s\n",
1267                         dev->dev_user.name);
1268                 goto fail;
1269         }
1270
1271         e = misc_register(&dev->dev_super);
1272         if (e) {
1273                 dev_err(&pdev->dev, "unable to register miscdevice %s\n",
1274                         dev->dev_super.name);
1275                 goto fail;
1276         }
1277
1278         dev->nr_carveouts = 0;
1279         dev->heaps = kzalloc(sizeof(struct nvmap_carveout_node) *
1280                              plat->nr_carveouts, GFP_KERNEL);
1281         if (!dev->heaps) {
1282                 e = -ENOMEM;
1283                 dev_err(&pdev->dev, "couldn't allocate carveout memory\n");
1284                 goto fail;
1285         }
1286
1287         nvmap_debug_root = debugfs_create_dir("nvmap", NULL);
1288         if (IS_ERR_OR_NULL(nvmap_debug_root))
1289                 dev_err(&pdev->dev, "couldn't create debug files\n");
1290
1291         for (i = 0; i < plat->nr_carveouts; i++) {
1292                 struct nvmap_carveout_node *node = &dev->heaps[dev->nr_carveouts];
1293                 const struct nvmap_platform_carveout *co = &plat->carveouts[i];
1294                 if (!co->size)
1295                         continue;
1296                 node->carveout = nvmap_heap_create(dev->dev_user.this_device,
1297                                    co->name, co->base, co->size,
1298                                    co->buddy_size, node);
1299                 if (!node->carveout) {
1300                         e = -ENOMEM;
1301                         dev_err(&pdev->dev, "couldn't create %s\n", co->name);
1302                         goto fail_heaps;
1303                 }
1304                 node->index = dev->nr_carveouts;
1305                 dev->nr_carveouts++;
1306                 spin_lock_init(&node->clients_lock);
1307                 INIT_LIST_HEAD(&node->clients);
1308                 node->heap_bit = co->usage_mask;
1309                 if (nvmap_heap_create_group(node->carveout,
1310                                             &heap_extra_attr_group))
1311                         dev_warn(&pdev->dev, "couldn't add extra attributes\n");
1312
1313                 dev_info(&pdev->dev, "created carveout %s (%uKiB)\n",
1314                          co->name, co->size / 1024);
1315
1316                 if (!IS_ERR_OR_NULL(nvmap_debug_root)) {
1317                         struct dentry *heap_root =
1318                                 debugfs_create_dir(co->name, nvmap_debug_root);
1319                         if (!IS_ERR_OR_NULL(heap_root)) {
1320                                 debugfs_create_file("clients", S_IRUGO,
1321                                         heap_root, node, &debug_clients_fops);
1322                                 debugfs_create_file("allocations", S_IRUGO,
1323                                     heap_root, node, &debug_allocations_fops);
1324                         }
1325                 }
1326         }
1327         if (!IS_ERR_OR_NULL(nvmap_debug_root)) {
1328                 struct dentry *iovmm_root =
1329                         debugfs_create_dir("iovmm", nvmap_debug_root);
1330                 if (!IS_ERR_OR_NULL(iovmm_root)) {
1331                         debugfs_create_file("clients", S_IRUGO, iovmm_root,
1332                                 dev, &debug_iovmm_clients_fops);
1333                         debugfs_create_file("allocations", S_IRUGO, iovmm_root,
1334                                 dev, &debug_iovmm_allocations_fops);
1335 #ifdef CONFIG_NVMAP_PAGE_POOLS
1336                         for (i = 0; i < NVMAP_NUM_POOLS; i++) {
1337                                 char name[40];
1338                                 char *memtype_string[] = {"uc", "wc",
1339                                                           "iwb", "wb"};
1340                                 sprintf(name, "%s_page_pool_available_pages",
1341                                         memtype_string[i]);
1342                                 debugfs_create_u32(name, S_IRUGO,
1343                                         iovmm_root,
1344                                         &dev->iovmm_master.pools[i].npages);
1345                         }
1346 #endif
1347                 }
1348         }
1349
1350         platform_set_drvdata(pdev, dev);
1351         nvmap_dev = dev;
1352
1353         return 0;
1354 fail_heaps:
1355         for (i = 0; i < dev->nr_carveouts; i++) {
1356                 struct nvmap_carveout_node *node = &dev->heaps[i];
1357                 nvmap_heap_remove_group(node->carveout, &heap_extra_attr_group);
1358                 nvmap_heap_destroy(node->carveout);
1359         }
1360 fail:
1361         kfree(dev->heaps);
1362         nvmap_mru_destroy(&dev->iovmm_master);
1363         if (dev->dev_super.minor != MISC_DYNAMIC_MINOR)
1364                 misc_deregister(&dev->dev_super);
1365         if (dev->dev_user.minor != MISC_DYNAMIC_MINOR)
1366                 misc_deregister(&dev->dev_user);
1367         if (!IS_ERR_OR_NULL(dev->iovmm_master.iovmm))
1368                 tegra_iovmm_free_client(dev->iovmm_master.iovmm);
1369         if (dev->vm_rgn)
1370                 free_vm_area(dev->vm_rgn);
1371         kfree(dev);
1372         nvmap_dev = NULL;
1373         return e;
1374 }
1375
1376 static int nvmap_remove(struct platform_device *pdev)
1377 {
1378         struct nvmap_device *dev = platform_get_drvdata(pdev);
1379         struct rb_node *n;
1380         struct nvmap_handle *h;
1381         int i;
1382
1383         misc_deregister(&dev->dev_super);
1384         misc_deregister(&dev->dev_user);
1385
1386         while ((n = rb_first(&dev->handles))) {
1387                 h = rb_entry(n, struct nvmap_handle, node);
1388                 rb_erase(&h->node, &dev->handles);
1389                 kfree(h);
1390         }
1391
1392         if (!IS_ERR_OR_NULL(dev->iovmm_master.iovmm))
1393                 tegra_iovmm_free_client(dev->iovmm_master.iovmm);
1394
1395         nvmap_mru_destroy(&dev->iovmm_master);
1396
1397         for (i = 0; i < dev->nr_carveouts; i++) {
1398                 struct nvmap_carveout_node *node = &dev->heaps[i];
1399                 nvmap_heap_remove_group(node->carveout, &heap_extra_attr_group);
1400                 nvmap_heap_destroy(node->carveout);
1401         }
1402         kfree(dev->heaps);
1403
1404         free_vm_area(dev->vm_rgn);
1405         kfree(dev);
1406         nvmap_dev = NULL;
1407         return 0;
1408 }
1409
1410 static int nvmap_suspend(struct platform_device *pdev, pm_message_t state)
1411 {
1412         return 0;
1413 }
1414
1415 static int nvmap_resume(struct platform_device *pdev)
1416 {
1417         return 0;
1418 }
1419
1420 static struct platform_driver nvmap_driver = {
1421         .probe          = nvmap_probe,
1422         .remove         = nvmap_remove,
1423         .suspend        = nvmap_suspend,
1424         .resume         = nvmap_resume,
1425
1426         .driver = {
1427                 .name   = "tegra-nvmap",
1428                 .owner  = THIS_MODULE,
1429         },
1430 };
1431
1432 static int __init nvmap_init_driver(void)
1433 {
1434         int e;
1435
1436         nvmap_dev = NULL;
1437
1438         e = nvmap_heap_init();
1439         if (e)
1440                 goto fail;
1441
1442         e = platform_driver_register(&nvmap_driver);
1443         if (e) {
1444                 nvmap_heap_deinit();
1445                 goto fail;
1446         }
1447
1448 fail:
1449         return e;
1450 }
1451 fs_initcall(nvmap_init_driver);
1452
1453 static void __exit nvmap_exit_driver(void)
1454 {
1455         platform_driver_unregister(&nvmap_driver);
1456         nvmap_heap_deinit();
1457         nvmap_dev = NULL;
1458 }
1459 module_exit(nvmap_exit_driver);