video: tegra: nvmap: Fix per client accounting
[linux-2.6.git] / drivers / video / tegra / nvmap / nvmap_dev.c
1 /*
2  * drivers/video/tegra/nvmap/nvmap_dev.c
3  *
4  * User-space interface to nvmap
5  *
6  * Copyright (c) 2010, NVIDIA Corporation.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #include <linux/backing-dev.h>
24 #include <linux/bitmap.h>
25 #include <linux/debugfs.h>
26 #include <linux/kernel.h>
27 #include <linux/miscdevice.h>
28 #include <linux/mm.h>
29 #include <linux/platform_device.h>
30 #include <linux/seq_file.h>
31 #include <linux/slab.h>
32 #include <linux/spinlock.h>
33 #include <linux/uaccess.h>
34 #include <linux/vmalloc.h>
35
36 #include <asm/cacheflush.h>
37 #include <asm/tlbflush.h>
38
39 #include <mach/iovmm.h>
40 #include <mach/nvmap.h>
41
42 #include "nvmap.h"
43 #include "nvmap_ioctl.h"
44 #include "nvmap_mru.h"
45
46 #define NVMAP_NUM_PTES          64
47
48 struct nvmap_carveout_node {
49         unsigned int            heap_bit;
50         struct nvmap_heap       *carveout;
51         int                     index;
52         struct list_head        clients;
53         spinlock_t              clients_lock;
54 };
55
56 struct nvmap_device {
57         struct vm_struct *vm_rgn;
58         pte_t           *ptes[NVMAP_NUM_PTES];
59         unsigned long   ptebits[NVMAP_NUM_PTES / BITS_PER_LONG];
60         unsigned int    lastpte;
61         spinlock_t      ptelock;
62
63         struct rb_root  handles;
64         spinlock_t      handle_lock;
65         wait_queue_head_t pte_wait;
66         struct miscdevice dev_super;
67         struct miscdevice dev_user;
68         struct nvmap_carveout_node *heaps;
69         int nr_carveouts;
70         struct nvmap_share iovmm_master;
71 };
72
73 struct nvmap_device *nvmap_dev;
74
75 static struct backing_dev_info nvmap_bdi = {
76         .ra_pages       = 0,
77         .capabilities   = (BDI_CAP_NO_ACCT_AND_WRITEBACK |
78                            BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP),
79 };
80
81 static int nvmap_open(struct inode *inode, struct file *filp);
82 static int nvmap_release(struct inode *inode, struct file *filp);
83 static long nvmap_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
84 static int nvmap_map(struct file *filp, struct vm_area_struct *vma);
85 static void nvmap_vma_open(struct vm_area_struct *vma);
86 static void nvmap_vma_close(struct vm_area_struct *vma);
87 static int nvmap_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
88
89 static const struct file_operations nvmap_user_fops = {
90         .owner          = THIS_MODULE,
91         .open           = nvmap_open,
92         .release        = nvmap_release,
93         .unlocked_ioctl = nvmap_ioctl,
94         .mmap           = nvmap_map,
95 };
96
97 static const struct file_operations nvmap_super_fops = {
98         .owner          = THIS_MODULE,
99         .open           = nvmap_open,
100         .release        = nvmap_release,
101         .unlocked_ioctl = nvmap_ioctl,
102         .mmap           = nvmap_map,
103 };
104
105 static struct vm_operations_struct nvmap_vma_ops = {
106         .open           = nvmap_vma_open,
107         .close          = nvmap_vma_close,
108         .fault          = nvmap_vma_fault,
109 };
110
111 int is_nvmap_vma(struct vm_area_struct *vma)
112 {
113         return vma->vm_ops == &nvmap_vma_ops;
114 }
115
116 struct device *nvmap_client_to_device(struct nvmap_client *client)
117 {
118         if (client->super)
119                 return client->dev->dev_super.this_device;
120         else
121                 return client->dev->dev_user.this_device;
122 }
123
124 struct nvmap_share *nvmap_get_share_from_dev(struct nvmap_device *dev)
125 {
126         return &dev->iovmm_master;
127 }
128
129 /* allocates a PTE for the caller's use; returns the PTE pointer or
130  * a negative errno. may be called from IRQs */
131 pte_t **nvmap_alloc_pte_irq(struct nvmap_device *dev, void **vaddr)
132 {
133         unsigned long flags;
134         unsigned long bit;
135
136         spin_lock_irqsave(&dev->ptelock, flags);
137         bit = find_next_zero_bit(dev->ptebits, NVMAP_NUM_PTES, dev->lastpte);
138         if (bit == NVMAP_NUM_PTES) {
139                 bit = find_first_zero_bit(dev->ptebits, dev->lastpte);
140                 if (bit == dev->lastpte)
141                         bit = NVMAP_NUM_PTES;
142         }
143
144         if (bit == NVMAP_NUM_PTES) {
145                 spin_unlock_irqrestore(&dev->ptelock, flags);
146                 return ERR_PTR(-ENOMEM);
147         }
148
149         dev->lastpte = bit;
150         set_bit(bit, dev->ptebits);
151         spin_unlock_irqrestore(&dev->ptelock, flags);
152
153         *vaddr = dev->vm_rgn->addr + bit * PAGE_SIZE;
154         return &(dev->ptes[bit]);
155 }
156
157 /* allocates a PTE for the caller's use; returns the PTE pointer or
158  * a negative errno. must be called from sleepable contexts */
159 pte_t **nvmap_alloc_pte(struct nvmap_device *dev, void **vaddr)
160 {
161         int ret;
162         pte_t **pte;
163         ret = wait_event_interruptible(dev->pte_wait,
164                         !IS_ERR(pte = nvmap_alloc_pte_irq(dev, vaddr)));
165
166         if (ret == -ERESTARTSYS)
167                 return ERR_PTR(-EINTR);
168
169         return pte;
170 }
171
172 /* frees a PTE */
173 void nvmap_free_pte(struct nvmap_device *dev, pte_t **pte)
174 {
175         unsigned long addr;
176         unsigned int bit = pte - dev->ptes;
177         unsigned long flags;
178
179         if (WARN_ON(bit >= NVMAP_NUM_PTES))
180                 return;
181
182         addr = (unsigned long)dev->vm_rgn->addr + bit * PAGE_SIZE;
183         set_pte_at(&init_mm, addr, *pte, 0);
184
185         spin_lock_irqsave(&dev->ptelock, flags);
186         clear_bit(bit, dev->ptebits);
187         spin_unlock_irqrestore(&dev->ptelock, flags);
188         wake_up(&dev->pte_wait);
189 }
190
191 /* verifies that the handle ref value "ref" is a valid handle ref for the
192  * file. caller must hold the file's ref_lock prior to calling this function */
193 struct nvmap_handle_ref *_nvmap_validate_id_locked(struct nvmap_client *c,
194                                                    unsigned long id)
195 {
196         struct rb_node *n = c->handle_refs.rb_node;
197
198         while (n) {
199                 struct nvmap_handle_ref *ref;
200                 ref = rb_entry(n, struct nvmap_handle_ref, node);
201                 if ((unsigned long)ref->handle == id)
202                         return ref;
203                 else if (id > (unsigned long)ref->handle)
204                         n = n->rb_right;
205                 else
206                         n = n->rb_left;
207         }
208
209         return NULL;
210 }
211
212 struct nvmap_handle *nvmap_get_handle_id(struct nvmap_client *client,
213                                          unsigned long id)
214 {
215         struct nvmap_handle_ref *ref;
216         struct nvmap_handle *h = NULL;
217
218         nvmap_ref_lock(client);
219         ref = _nvmap_validate_id_locked(client, id);
220         if (ref)
221                 h = ref->handle;
222         if (h)
223                 h = nvmap_handle_get(h);
224         nvmap_ref_unlock(client);
225         return h;
226 }
227
228 unsigned long nvmap_carveout_usage(struct nvmap_client *c,
229                                    struct nvmap_heap_block *b)
230 {
231         struct nvmap_heap *h = nvmap_block_to_heap(b);
232         struct nvmap_carveout_node *n;
233         int i;
234
235         for (i = 0; i < c->dev->nr_carveouts; i++) {
236                 n = &c->dev->heaps[i];
237                 if (n->carveout == h)
238                         return n->heap_bit;
239         }
240         return 0;
241 }
242
243 static int nvmap_flush_heap_block(struct nvmap_client *client,
244                                   struct nvmap_heap_block *block, size_t len)
245 {
246         pte_t **pte;
247         void *addr;
248         unsigned long kaddr;
249         unsigned long phys = block->base;
250         unsigned long end = block->base + len;
251
252         pte = nvmap_alloc_pte(client->dev, &addr);
253         if (IS_ERR(pte))
254                 return PTR_ERR(pte);
255
256         kaddr = (unsigned long)addr;
257
258         while (phys < end) {
259                 unsigned long next = (phys + PAGE_SIZE) & PAGE_MASK;
260                 unsigned long pfn = __phys_to_pfn(phys);
261                 void *base = (void *)kaddr + (phys & ~PAGE_MASK);
262
263                 next = min(next, end);
264                 set_pte_at(&init_mm, kaddr, *pte, pfn_pte(pfn, pgprot_kernel));
265                 flush_tlb_kernel_page(kaddr);
266                 __cpuc_flush_dcache_area(base, next - phys);
267                 phys = next;
268         }
269
270         outer_flush_range(block->base, block->base + len);
271
272         nvmap_free_pte(client->dev, pte);
273         return 0;
274 }
275
276 void nvmap_carveout_commit_add(struct nvmap_client *client,
277                                struct nvmap_carveout_node *node,
278                                size_t len)
279 {
280         unsigned long flags;
281
282         nvmap_ref_lock(client);
283         spin_lock_irqsave(&node->clients_lock, flags);
284         BUG_ON(list_empty(&client->carveout_commit[node->index].list) &&
285                client->carveout_commit[node->index].commit != 0);
286
287         client->carveout_commit[node->index].commit += len;
288         /* if this client isn't already on the list of nodes for this heap,
289            add it */
290         if (list_empty(&client->carveout_commit[node->index].list)) {
291                 list_add(&client->carveout_commit[node->index].list,
292                          &node->clients);
293         }
294         spin_unlock_irqrestore(&node->clients_lock, flags);
295         nvmap_ref_unlock(client);
296 }
297
298 void nvmap_carveout_commit_subtract(struct nvmap_client *client,
299                                     struct nvmap_carveout_node *node,
300                                     size_t len)
301 {
302         unsigned long flags;
303
304         if (!client)
305                 return;
306
307         spin_lock_irqsave(&node->clients_lock, flags);
308         client->carveout_commit[node->index].commit -= len;
309         BUG_ON(client->carveout_commit[node->index].commit < 0);
310         /* if no more allocation in this carveout for this node, delete it */
311         if (!client->carveout_commit[node->index].commit)
312                 list_del_init(&client->carveout_commit[node->index].list);
313         spin_unlock_irqrestore(&node->clients_lock, flags);
314 }
315
316 static struct nvmap_client* get_client_from_carveout_commit(
317         struct nvmap_carveout_node *node, struct nvmap_carveout_commit *commit)
318 {
319         struct nvmap_carveout_commit *first_commit = commit - node->index;
320         return (void *)first_commit - offsetof(struct nvmap_client,
321                                                carveout_commit);
322 }
323
324 struct nvmap_heap_block *nvmap_carveout_alloc(struct nvmap_client *client,
325                                               size_t len, size_t align,
326                                               unsigned long usage,
327                                               unsigned int prot)
328 {
329         struct nvmap_carveout_node *co_heap;
330         struct nvmap_device *dev = client->dev;
331         int i;
332
333         for (i = 0; i < dev->nr_carveouts; i++) {
334                 struct nvmap_heap_block *block;
335                 co_heap = &dev->heaps[i];
336
337                 if (!(co_heap->heap_bit & usage))
338                         continue;
339
340                 block = nvmap_heap_alloc(co_heap->carveout, len, align, prot);
341                 if (block) {
342                         /* flush any stale data that may be left in the
343                          * cache at the block's address, since the new
344                          * block may be mapped uncached */
345                         if (nvmap_flush_heap_block(client, block, len)) {
346                                 nvmap_heap_free(block);
347                                 return NULL;
348                         } else
349                                 return block;
350                 }
351         }
352
353         return NULL;
354 }
355
356 /* remove a handle from the device's tree of all handles; called
357  * when freeing handles. */
358 int nvmap_handle_remove(struct nvmap_device *dev, struct nvmap_handle *h)
359 {
360         spin_lock(&dev->handle_lock);
361
362         /* re-test inside the spinlock if the handle really has no clients;
363          * only remove the handle if it is unreferenced */
364         if (atomic_add_return(0, &h->ref) > 0) {
365                 spin_unlock(&dev->handle_lock);
366                 return -EBUSY;
367         }
368         smp_rmb();
369         BUG_ON(atomic_read(&h->ref) < 0);
370         BUG_ON(atomic_read(&h->pin) != 0);
371
372         rb_erase(&h->node, &dev->handles);
373
374         spin_unlock(&dev->handle_lock);
375         return 0;
376 }
377
378 /* adds a newly-created handle to the device master tree */
379 void nvmap_handle_add(struct nvmap_device *dev, struct nvmap_handle *h)
380 {
381         struct rb_node **p;
382         struct rb_node *parent = NULL;
383
384         spin_lock(&dev->handle_lock);
385         p = &dev->handles.rb_node;
386         while (*p) {
387                 struct nvmap_handle *b;
388
389                 parent = *p;
390                 b = rb_entry(parent, struct nvmap_handle, node);
391                 if (h > b)
392                         p = &parent->rb_right;
393                 else
394                         p = &parent->rb_left;
395         }
396         rb_link_node(&h->node, parent, p);
397         rb_insert_color(&h->node, &dev->handles);
398         spin_unlock(&dev->handle_lock);
399 }
400
401 /* validates that a handle is in the device master tree, and that the
402  * client has permission to access it */
403 struct nvmap_handle *nvmap_validate_get(struct nvmap_client *client,
404                                         unsigned long id)
405 {
406         struct nvmap_handle *h = NULL;
407         struct rb_node *n;
408
409         spin_lock(&client->dev->handle_lock);
410
411         n = client->dev->handles.rb_node;
412
413         while (n) {
414                 h = rb_entry(n, struct nvmap_handle, node);
415                 if ((unsigned long)h == id) {
416                         if (client->super || h->global || (h->owner == client))
417                                 h = nvmap_handle_get(h);
418                         else
419                                 h = NULL;
420                         spin_unlock(&client->dev->handle_lock);
421                         return h;
422                 }
423                 if (id > (unsigned long)h)
424                         n = n->rb_right;
425                 else
426                         n = n->rb_left;
427         }
428         spin_unlock(&client->dev->handle_lock);
429         return NULL;
430 }
431
432 struct nvmap_client *nvmap_create_client(struct nvmap_device *dev,
433                                          const char *name)
434 {
435         struct nvmap_client *client;
436         int i;
437
438         if (WARN_ON(!dev))
439                 return NULL;
440
441         client = kzalloc(sizeof(*client) + (sizeof(struct nvmap_carveout_commit)
442                          * dev->nr_carveouts), GFP_KERNEL);
443         if (!client)
444                 return NULL;
445
446         client->name = name;
447         client->super = true;
448         client->dev = dev;
449         /* TODO: allocate unique IOVMM client for each nvmap client */
450         client->share = &dev->iovmm_master;
451         client->handle_refs = RB_ROOT;
452
453         atomic_set(&client->iovm_commit, 0);
454
455         client->iovm_limit = nvmap_mru_vm_size(client->share->iovmm);
456
457         for (i = 0; i < dev->nr_carveouts; i++) {
458                 INIT_LIST_HEAD(&client->carveout_commit[i].list);
459                 client->carveout_commit[i].commit = 0;
460         }
461
462         get_task_struct(current);
463         client->task = current;
464
465         spin_lock_init(&client->ref_lock);
466         atomic_set(&client->count, 1);
467
468         return client;
469 }
470
471 static void destroy_client(struct nvmap_client *client)
472 {
473         struct rb_node *n;
474         int i;
475
476         if (!client)
477                 return;
478
479         while ((n = rb_first(&client->handle_refs))) {
480                 struct nvmap_handle_ref *ref;
481                 int pins, dupes;
482
483                 ref = rb_entry(n, struct nvmap_handle_ref, node);
484                 rb_erase(&ref->node, &client->handle_refs);
485
486                 smp_rmb();
487                 pins = atomic_read(&ref->pin);
488
489                 mutex_lock(&ref->handle->lock);
490                 if (ref->handle->owner == client)
491                     ref->handle->owner = NULL;
492                 mutex_unlock(&ref->handle->lock);
493
494                 while (pins--)
495                         nvmap_unpin_handles(client, &ref->handle, 1);
496
497                 dupes = atomic_read(&ref->dupes);
498                 while (dupes--)
499                         nvmap_handle_put(ref->handle);
500
501                 kfree(ref);
502         }
503
504         for (i = 0; i < client->dev->nr_carveouts; i++)
505                 list_del(&client->carveout_commit[i].list);
506
507         if (client->task)
508                 put_task_struct(client->task);
509
510         kfree(client);
511 }
512
513 struct nvmap_client *nvmap_client_get(struct nvmap_client *client)
514 {
515         if (WARN_ON(!client))
516                 return NULL;
517
518         if (WARN_ON(!atomic_add_unless(&client->count, 1, 0)))
519                 return NULL;
520
521         return client;
522 }
523
524 struct nvmap_client *nvmap_client_get_file(int fd)
525 {
526         struct nvmap_client *client = ERR_PTR(-EFAULT);
527         struct file *f = fget(fd);
528         if (!f)
529                 return ERR_PTR(-EINVAL);
530
531         if ((f->f_op == &nvmap_user_fops) || (f->f_op == &nvmap_super_fops)) {
532                 client = f->private_data;
533                 atomic_inc(&client->count);
534         }
535
536         fput(f);
537         return client;
538 }
539
540 void nvmap_client_put(struct nvmap_client *client)
541 {
542         if (!client)
543                 return;
544
545         if (!atomic_dec_return(&client->count))
546                 destroy_client(client);
547 }
548
549 static int nvmap_open(struct inode *inode, struct file *filp)
550 {
551         struct miscdevice *miscdev = filp->private_data;
552         struct nvmap_device *dev = dev_get_drvdata(miscdev->parent);
553         struct nvmap_client *priv;
554         int ret;
555
556         ret = nonseekable_open(inode, filp);
557         if (unlikely(ret))
558                 return ret;
559
560         BUG_ON(dev != nvmap_dev);
561         priv = nvmap_create_client(dev, "user");
562         if (!priv)
563                 return -ENOMEM;
564
565         priv->super = (filp->f_op == &nvmap_super_fops);
566
567         filp->f_mapping->backing_dev_info = &nvmap_bdi;
568
569         filp->private_data = priv;
570         return 0;
571 }
572
573 static int nvmap_release(struct inode *inode, struct file *filp)
574 {
575         nvmap_client_put(filp->private_data);
576         return 0;
577 }
578
579 static int nvmap_map(struct file *filp, struct vm_area_struct *vma)
580 {
581         struct nvmap_vma_priv *priv;
582
583         /* after NVMAP_IOC_MMAP, the handle that is mapped by this VMA
584          * will be stored in vm_private_data and faulted in. until the
585          * ioctl is made, the VMA is mapped no-access */
586         vma->vm_private_data = NULL;
587
588         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
589         if (!priv)
590                 return -ENOMEM;
591
592         priv->offs = 0;
593         priv->handle = NULL;
594         atomic_set(&priv->count, 1);
595
596         vma->vm_flags |= VM_SHARED;
597         vma->vm_flags |= (VM_IO | VM_DONTEXPAND | VM_MIXEDMAP | VM_RESERVED);
598         vma->vm_ops = &nvmap_vma_ops;
599         vma->vm_private_data = priv;
600
601         return 0;
602 }
603
604 static long nvmap_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
605 {
606         int err = 0;
607         void __user *uarg = (void __user *)arg;
608
609         if (_IOC_TYPE(cmd) != NVMAP_IOC_MAGIC)
610                 return -ENOTTY;
611
612         if (_IOC_NR(cmd) > NVMAP_IOC_MAXNR)
613                 return -ENOTTY;
614
615         if (_IOC_DIR(cmd) & _IOC_READ)
616                 err = !access_ok(VERIFY_WRITE, uarg, _IOC_SIZE(cmd));
617         if (_IOC_DIR(cmd) & _IOC_WRITE)
618                 err = !access_ok(VERIFY_READ, uarg, _IOC_SIZE(cmd));
619
620         if (err)
621                 return -EFAULT;
622
623         switch (cmd) {
624         case NVMAP_IOC_CLAIM:
625                 nvmap_warn(filp->private_data, "preserved handles not"
626                            "supported\n");
627                 err = -ENODEV;
628                 break;
629         case NVMAP_IOC_CREATE:
630         case NVMAP_IOC_FROM_ID:
631                 err = nvmap_ioctl_create(filp, cmd, uarg);
632                 break;
633
634         case NVMAP_IOC_GET_ID:
635                 err = nvmap_ioctl_getid(filp, uarg);
636                 break;
637
638         case NVMAP_IOC_PARAM:
639                 err = nvmap_ioctl_get_param(filp, uarg);
640                 break;
641
642         case NVMAP_IOC_UNPIN_MULT:
643         case NVMAP_IOC_PIN_MULT:
644                 err = nvmap_ioctl_pinop(filp, cmd == NVMAP_IOC_PIN_MULT, uarg);
645                 break;
646
647         case NVMAP_IOC_ALLOC:
648                 err = nvmap_ioctl_alloc(filp, uarg);
649                 break;
650
651         case NVMAP_IOC_FREE:
652                 err = nvmap_ioctl_free(filp, arg);
653                 break;
654
655         case NVMAP_IOC_MMAP:
656                 err = nvmap_map_into_caller_ptr(filp, uarg);
657                 break;
658
659         case NVMAP_IOC_WRITE:
660         case NVMAP_IOC_READ:
661                 err = nvmap_ioctl_rw_handle(filp, cmd == NVMAP_IOC_READ, uarg);
662                 break;
663
664         case NVMAP_IOC_CACHE:
665                 err = nvmap_ioctl_cache_maint(filp, uarg);
666                 break;
667
668         default:
669                 return -ENOTTY;
670         }
671         return err;
672 }
673
674 /* to ensure that the backing store for the VMA isn't freed while a fork'd
675  * reference still exists, nvmap_vma_open increments the reference count on
676  * the handle, and nvmap_vma_close decrements it. alternatively, we could
677  * disallow copying of the vma, or behave like pmem and zap the pages. FIXME.
678 */
679 static void nvmap_vma_open(struct vm_area_struct *vma)
680 {
681         struct nvmap_vma_priv *priv;
682
683         priv = vma->vm_private_data;
684
685         BUG_ON(!priv);
686
687         atomic_inc(&priv->count);
688 }
689
690 static void nvmap_vma_close(struct vm_area_struct *vma)
691 {
692         struct nvmap_vma_priv *priv = vma->vm_private_data;
693
694         if (priv && !atomic_dec_return(&priv->count)) {
695                 if (priv->handle)
696                         nvmap_handle_put(priv->handle);
697                 kfree(priv);
698         }
699
700         vma->vm_private_data = NULL;
701 }
702
703 static int nvmap_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
704 {
705         struct nvmap_vma_priv *priv;
706         unsigned long offs;
707
708         offs = (unsigned long)(vmf->virtual_address - vma->vm_start);
709         priv = vma->vm_private_data;
710         if (!priv || !priv->handle || !priv->handle->alloc)
711                 return VM_FAULT_SIGBUS;
712
713         offs += priv->offs;
714         /* if the VMA was split for some reason, vm_pgoff will be the VMA's
715          * offset from the original VMA */
716         offs += (vma->vm_pgoff << PAGE_SHIFT);
717
718         if (offs >= priv->handle->size)
719                 return VM_FAULT_SIGBUS;
720
721         if (!priv->handle->heap_pgalloc) {
722                 unsigned long pfn;
723                 BUG_ON(priv->handle->carveout->base & ~PAGE_MASK);
724                 pfn = ((priv->handle->carveout->base + offs) >> PAGE_SHIFT);
725                 vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
726                 return VM_FAULT_NOPAGE;
727         } else {
728                 struct page *page;
729                 offs >>= PAGE_SHIFT;
730                 page = priv->handle->pgalloc.pages[offs];
731                 if (page)
732                         get_page(page);
733                 vmf->page = page;
734                 return (page) ? 0 : VM_FAULT_SIGBUS;
735         }
736 }
737
738 static ssize_t attr_show_usage(struct device *dev,
739                                struct device_attribute *attr, char *buf)
740 {
741         struct nvmap_carveout_node *node = nvmap_heap_device_to_arg(dev);
742
743         return sprintf(buf, "%08x\n", node->heap_bit);
744 }
745
746 static struct device_attribute heap_attr_show_usage =
747         __ATTR(usage, S_IRUGO, attr_show_usage, NULL);
748
749 static struct attribute *heap_extra_attrs[] = {
750         &heap_attr_show_usage.attr,
751         NULL,
752 };
753
754 static struct attribute_group heap_extra_attr_group = {
755         .attrs = heap_extra_attrs,
756 };
757
758 static void client_stringify(struct nvmap_client *client, struct seq_file *s)
759 {
760         char task_comm[sizeof(client->task->comm)];
761         get_task_comm(task_comm, client->task);
762         seq_printf(s, "%8s %16s %8u", client->name, task_comm,
763                    client->task->pid);
764 }
765
766 static void allocations_stringify(struct nvmap_client *client,
767                                   struct seq_file *s)
768 {
769         struct rb_node *n = rb_first(&client->handle_refs);
770         unsigned long long total = 0;
771
772         for (; n != NULL; n = rb_next(n)) {
773                 struct nvmap_handle_ref *ref =
774                         rb_entry(n, struct nvmap_handle_ref, node);
775                 struct nvmap_handle *handle = ref->handle;
776                 if (handle->alloc && !handle->heap_pgalloc) {
777                         seq_printf(s, " %8u@%8lx ", handle->size,
778                                    handle->carveout->base);
779                         total += handle->size;
780                 }
781         }
782         seq_printf(s, " total: %llu\n", total);
783 }
784
785 static int nvmap_debug_allocations_show(struct seq_file *s, void *unused)
786 {
787         struct nvmap_carveout_node *node = s->private;
788         struct nvmap_carveout_commit *commit;
789         unsigned long flags;
790
791         spin_lock_irqsave(&node->clients_lock, flags);
792         list_for_each_entry(commit, &node->clients, list) {
793                 struct nvmap_client *client =
794                         get_client_from_carveout_commit(node, commit);
795                 client_stringify(client, s);
796                 allocations_stringify(client, s);
797         }
798         spin_unlock_irqrestore(&node->clients_lock, flags);
799
800         return 0;
801 }
802
803 static int nvmap_debug_allocations_open(struct inode *inode, struct file *file)
804 {
805         return single_open(file, nvmap_debug_allocations_show,
806                            inode->i_private);
807 }
808
809 static struct file_operations debug_allocations_fops = {
810         .open = nvmap_debug_allocations_open,
811         .read = seq_read,
812         .llseek = seq_lseek,
813         .release = single_release,
814 };
815
816 static int nvmap_debug_clients_show(struct seq_file *s, void *unused)
817 {
818         struct nvmap_carveout_node *node = s->private;
819         struct nvmap_carveout_commit *commit;
820         unsigned long flags;
821
822         spin_lock_irqsave(&node->clients_lock, flags);
823         list_for_each_entry(commit, &node->clients, list) {
824                 struct nvmap_client *client =
825                         get_client_from_carveout_commit(node, commit);
826                 client_stringify(client, s);
827                 seq_printf(s, " %8u\n", commit->commit);
828         }
829         spin_unlock_irqrestore(&node->clients_lock, flags);
830
831         return 0;
832 }
833
834 static int nvmap_debug_clients_open(struct inode *inode, struct file *file)
835 {
836         return single_open(file, nvmap_debug_clients_show, inode->i_private);
837 }
838
839 static struct file_operations debug_clients_fops = {
840         .open = nvmap_debug_clients_open,
841         .read = seq_read,
842         .llseek = seq_lseek,
843         .release = single_release,
844 };
845
846 static int nvmap_probe(struct platform_device *pdev)
847 {
848         struct nvmap_platform_data *plat = pdev->dev.platform_data;
849         struct nvmap_device *dev;
850         struct dentry *nvmap_debug_root;
851         unsigned int i;
852         int e;
853
854         if (!plat) {
855                 dev_err(&pdev->dev, "no platform data?\n");
856                 return -ENODEV;
857         }
858
859         if (WARN_ON(nvmap_dev != NULL)) {
860                 dev_err(&pdev->dev, "only one nvmap device may be present\n");
861                 return -ENODEV;
862         }
863
864         dev = kzalloc(sizeof(*dev), GFP_KERNEL);
865         if (!dev) {
866                 dev_err(&pdev->dev, "out of memory for device\n");
867                 return -ENOMEM;
868         }
869
870         dev->dev_user.minor = MISC_DYNAMIC_MINOR;
871         dev->dev_user.name = "nvmap";
872         dev->dev_user.fops = &nvmap_user_fops;
873         dev->dev_user.parent = &pdev->dev;
874
875         dev->dev_super.minor = MISC_DYNAMIC_MINOR;
876         dev->dev_super.name = "knvmap";
877         dev->dev_super.fops = &nvmap_super_fops;
878         dev->dev_super.parent = &pdev->dev;
879
880         dev->handles = RB_ROOT;
881
882         init_waitqueue_head(&dev->pte_wait);
883
884         init_waitqueue_head(&dev->iovmm_master.pin_wait);
885         mutex_init(&dev->iovmm_master.pin_lock);
886         dev->iovmm_master.iovmm =
887                 tegra_iovmm_alloc_client(dev_name(&pdev->dev), NULL);
888         if (IS_ERR(dev->iovmm_master.iovmm)) {
889                 e = PTR_ERR(dev->iovmm_master.iovmm);
890                 dev_err(&pdev->dev, "couldn't create iovmm client\n");
891                 goto fail;
892         }
893         dev->vm_rgn = alloc_vm_area(NVMAP_NUM_PTES * PAGE_SIZE);
894         if (!dev->vm_rgn) {
895                 e = -ENOMEM;
896                 dev_err(&pdev->dev, "couldn't allocate remapping region\n");
897                 goto fail;
898         }
899         e = nvmap_mru_init(&dev->iovmm_master);
900         if (e) {
901                 dev_err(&pdev->dev, "couldn't initialize MRU lists\n");
902                 goto fail;
903         }
904
905         spin_lock_init(&dev->ptelock);
906         spin_lock_init(&dev->handle_lock);
907
908         for (i = 0; i < NVMAP_NUM_PTES; i++) {
909                 unsigned long addr;
910                 pgd_t *pgd;
911                 pud_t *pud;
912                 pmd_t *pmd;
913
914                 addr = (unsigned long)dev->vm_rgn->addr + (i * PAGE_SIZE);
915                 pgd = pgd_offset_k(addr);
916                 pud = pud_alloc(&init_mm, pgd, addr);
917                 if (!pud) {
918                         e = -ENOMEM;
919                         dev_err(&pdev->dev, "couldn't allocate page tables\n");
920                         goto fail;
921                 }
922                 pmd = pmd_alloc(&init_mm, pud, addr);
923                 if (!pmd) {
924                         e = -ENOMEM;
925                         dev_err(&pdev->dev, "couldn't allocate page tables\n");
926                         goto fail;
927                 }
928                 dev->ptes[i] = pte_alloc_kernel(pmd, addr);
929                 if (!dev->ptes[i]) {
930                         e = -ENOMEM;
931                         dev_err(&pdev->dev, "couldn't allocate page tables\n");
932                         goto fail;
933                 }
934         }
935
936         e = misc_register(&dev->dev_user);
937         if (e) {
938                 dev_err(&pdev->dev, "unable to register miscdevice %s\n",
939                         dev->dev_user.name);
940                 goto fail;
941         }
942
943         e = misc_register(&dev->dev_super);
944         if (e) {
945                 dev_err(&pdev->dev, "unable to register miscdevice %s\n",
946                         dev->dev_super.name);
947                 goto fail;
948         }
949
950         dev->nr_carveouts = 0;
951         dev->heaps = kzalloc(sizeof(struct nvmap_carveout_node) *
952                              plat->nr_carveouts, GFP_KERNEL);
953         if (!dev->heaps) {
954                 e = -ENOMEM;
955                 dev_err(&pdev->dev, "couldn't allocate carveout memory\n");
956                 goto fail;
957         }
958
959         nvmap_debug_root = debugfs_create_dir("nvmap", NULL);
960         if (IS_ERR_OR_NULL(nvmap_debug_root))
961                 dev_err(&pdev->dev, "couldn't create debug files\n");
962
963         for (i = 0; i < plat->nr_carveouts; i++) {
964                 struct nvmap_carveout_node *node = &dev->heaps[i];
965                 const struct nvmap_platform_carveout *co = &plat->carveouts[i];
966                 node->carveout = nvmap_heap_create(dev->dev_user.this_device,
967                                    co->name, co->base, co->size,
968                                    co->buddy_size, node);
969                 if (!node->carveout) {
970                         e = -ENOMEM;
971                         dev_err(&pdev->dev, "couldn't create %s\n", co->name);
972                         goto fail_heaps;
973                 }
974                 dev->nr_carveouts++;
975                 spin_lock_init(&node->clients_lock);
976                 node->index = i;
977                 INIT_LIST_HEAD(&node->clients);
978                 node->heap_bit = co->usage_mask;
979                 if (nvmap_heap_create_group(node->carveout,
980                                             &heap_extra_attr_group))
981                         dev_warn(&pdev->dev, "couldn't add extra attributes\n");
982
983                 dev_info(&pdev->dev, "created carveout %s (%uKiB)\n",
984                          co->name, co->size / 1024);
985
986                 if (!IS_ERR_OR_NULL(nvmap_debug_root)) {
987                         struct dentry *heap_root =
988                                 debugfs_create_dir(co->name, nvmap_debug_root);
989                         if (!IS_ERR_OR_NULL(heap_root)) {
990                                 debugfs_create_file("clients", 0664, heap_root,
991                                     node, &debug_clients_fops);
992                                 debugfs_create_file("allocations", 0664,
993                                     heap_root, node, &debug_allocations_fops);
994                         }
995                 }
996         }
997
998         platform_set_drvdata(pdev, dev);
999         nvmap_dev = dev;
1000         return 0;
1001 fail_heaps:
1002         for (i = 0; i < dev->nr_carveouts; i++) {
1003                 struct nvmap_carveout_node *node = &dev->heaps[i];
1004                 nvmap_heap_remove_group(node->carveout, &heap_extra_attr_group);
1005                 nvmap_heap_destroy(node->carveout);
1006         }
1007 fail:
1008         kfree(dev->heaps);
1009         nvmap_mru_destroy(&dev->iovmm_master);
1010         if (dev->dev_super.minor != MISC_DYNAMIC_MINOR)
1011                 misc_deregister(&dev->dev_super);
1012         if (dev->dev_user.minor != MISC_DYNAMIC_MINOR)
1013                 misc_deregister(&dev->dev_user);
1014         if (!IS_ERR_OR_NULL(dev->iovmm_master.iovmm))
1015                 tegra_iovmm_free_client(dev->iovmm_master.iovmm);
1016         if (dev->vm_rgn)
1017                 free_vm_area(dev->vm_rgn);
1018         kfree(dev);
1019         nvmap_dev = NULL;
1020         return e;
1021 }
1022
1023 static int nvmap_remove(struct platform_device *pdev)
1024 {
1025         struct nvmap_device *dev = platform_get_drvdata(pdev);
1026         struct rb_node *n;
1027         struct nvmap_handle *h;
1028         int i;
1029
1030         misc_deregister(&dev->dev_super);
1031         misc_deregister(&dev->dev_user);
1032
1033         while ((n = rb_first(&dev->handles))) {
1034                 h = rb_entry(n, struct nvmap_handle, node);
1035                 rb_erase(&h->node, &dev->handles);
1036                 kfree(h);
1037         }
1038
1039         if (!IS_ERR_OR_NULL(dev->iovmm_master.iovmm))
1040                 tegra_iovmm_free_client(dev->iovmm_master.iovmm);
1041
1042         nvmap_mru_destroy(&dev->iovmm_master);
1043
1044         for (i = 0; i < dev->nr_carveouts; i++) {
1045                 struct nvmap_carveout_node *node = &dev->heaps[i];
1046                 nvmap_heap_remove_group(node->carveout, &heap_extra_attr_group);
1047                 nvmap_heap_destroy(node->carveout);
1048         }
1049         kfree(dev->heaps);
1050
1051         free_vm_area(dev->vm_rgn);
1052         kfree(dev);
1053         nvmap_dev = NULL;
1054         return 0;
1055 }
1056
1057 static int nvmap_suspend(struct platform_device *pdev, pm_message_t state)
1058 {
1059         return 0;
1060 }
1061
1062 static int nvmap_resume(struct platform_device *pdev)
1063 {
1064         return 0;
1065 }
1066
1067 static struct platform_driver nvmap_driver = {
1068         .probe          = nvmap_probe,
1069         .remove         = nvmap_remove,
1070         .suspend        = nvmap_suspend,
1071         .resume         = nvmap_resume,
1072
1073         .driver = {
1074                 .name   = "tegra-nvmap",
1075                 .owner  = THIS_MODULE,
1076         },
1077 };
1078
1079 static int __init nvmap_init_driver(void)
1080 {
1081         int e;
1082
1083         nvmap_dev = NULL;
1084
1085         e = nvmap_heap_init();
1086         if (e)
1087                 goto fail;
1088
1089         e = platform_driver_register(&nvmap_driver);
1090         if (e) {
1091                 nvmap_heap_deinit();
1092                 goto fail;
1093         }
1094
1095 fail:
1096         return e;
1097 }
1098 fs_initcall(nvmap_init_driver);
1099
1100 static void __exit nvmap_exit_driver(void)
1101 {
1102         platform_driver_unregister(&nvmap_driver);
1103         nvmap_heap_deinit();
1104         nvmap_dev = NULL;
1105 }
1106 module_exit(nvmap_exit_driver);