2 * drivers/video/tegra/nvmap/nvmap_dev.c
4 * User-space interface to nvmap
6 * Copyright (c) 2010, NVIDIA Corporation.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
23 #include <linux/backing-dev.h>
24 #include <linux/bitmap.h>
25 #include <linux/debugfs.h>
26 #include <linux/delay.h>
27 #include <linux/kernel.h>
28 #include <linux/miscdevice.h>
30 #include <linux/oom.h>
31 #include <linux/platform_device.h>
32 #include <linux/seq_file.h>
33 #include <linux/slab.h>
34 #include <linux/spinlock.h>
35 #include <linux/uaccess.h>
36 #include <linux/vmalloc.h>
38 #include <asm/cacheflush.h>
39 #include <asm/tlbflush.h>
41 #include <mach/iovmm.h>
42 #include <mach/nvmap.h>
45 #include "nvmap_ioctl.h"
46 #include "nvmap_mru.h"
48 #define NVMAP_NUM_PTES 64
49 #define NVMAP_CARVEOUT_KILLER_RETRY_TIME 100 /* msecs */
51 #ifdef CONFIG_NVMAP_CARVEOUT_KILLER
52 static bool carveout_killer = true;
54 static bool carveout_killer;
56 module_param(carveout_killer, bool, 0640);
58 struct nvmap_carveout_node {
59 unsigned int heap_bit;
60 struct nvmap_heap *carveout;
62 struct list_head clients;
63 spinlock_t clients_lock;
67 struct vm_struct *vm_rgn;
68 pte_t *ptes[NVMAP_NUM_PTES];
69 unsigned long ptebits[NVMAP_NUM_PTES / BITS_PER_LONG];
73 struct rb_root handles;
74 spinlock_t handle_lock;
75 wait_queue_head_t pte_wait;
76 struct miscdevice dev_super;
77 struct miscdevice dev_user;
78 struct nvmap_carveout_node *heaps;
80 struct nvmap_share iovmm_master;
83 struct nvmap_device *nvmap_dev;
85 static struct backing_dev_info nvmap_bdi = {
87 .capabilities = (BDI_CAP_NO_ACCT_AND_WRITEBACK |
88 BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP),
91 static int nvmap_open(struct inode *inode, struct file *filp);
92 static int nvmap_release(struct inode *inode, struct file *filp);
93 static long nvmap_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
94 static int nvmap_map(struct file *filp, struct vm_area_struct *vma);
95 static void nvmap_vma_open(struct vm_area_struct *vma);
96 static void nvmap_vma_close(struct vm_area_struct *vma);
97 static int nvmap_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
99 static const struct file_operations nvmap_user_fops = {
100 .owner = THIS_MODULE,
102 .release = nvmap_release,
103 .unlocked_ioctl = nvmap_ioctl,
107 static const struct file_operations nvmap_super_fops = {
108 .owner = THIS_MODULE,
110 .release = nvmap_release,
111 .unlocked_ioctl = nvmap_ioctl,
115 static struct vm_operations_struct nvmap_vma_ops = {
116 .open = nvmap_vma_open,
117 .close = nvmap_vma_close,
118 .fault = nvmap_vma_fault,
121 int is_nvmap_vma(struct vm_area_struct *vma)
123 return vma->vm_ops == &nvmap_vma_ops;
126 struct device *nvmap_client_to_device(struct nvmap_client *client)
129 return client->dev->dev_super.this_device;
131 return client->dev->dev_user.this_device;
134 struct nvmap_share *nvmap_get_share_from_dev(struct nvmap_device *dev)
136 return &dev->iovmm_master;
139 /* allocates a PTE for the caller's use; returns the PTE pointer or
140 * a negative errno. may be called from IRQs */
141 pte_t **nvmap_alloc_pte_irq(struct nvmap_device *dev, void **vaddr)
146 spin_lock_irqsave(&dev->ptelock, flags);
147 bit = find_next_zero_bit(dev->ptebits, NVMAP_NUM_PTES, dev->lastpte);
148 if (bit == NVMAP_NUM_PTES) {
149 bit = find_first_zero_bit(dev->ptebits, dev->lastpte);
150 if (bit == dev->lastpte)
151 bit = NVMAP_NUM_PTES;
154 if (bit == NVMAP_NUM_PTES) {
155 spin_unlock_irqrestore(&dev->ptelock, flags);
156 return ERR_PTR(-ENOMEM);
160 set_bit(bit, dev->ptebits);
161 spin_unlock_irqrestore(&dev->ptelock, flags);
163 *vaddr = dev->vm_rgn->addr + bit * PAGE_SIZE;
164 return &(dev->ptes[bit]);
167 /* allocates a PTE for the caller's use; returns the PTE pointer or
168 * a negative errno. must be called from sleepable contexts */
169 pte_t **nvmap_alloc_pte(struct nvmap_device *dev, void **vaddr)
173 ret = wait_event_interruptible(dev->pte_wait,
174 !IS_ERR(pte = nvmap_alloc_pte_irq(dev, vaddr)));
176 if (ret == -ERESTARTSYS)
177 return ERR_PTR(-EINTR);
183 void nvmap_free_pte(struct nvmap_device *dev, pte_t **pte)
186 unsigned int bit = pte - dev->ptes;
189 if (WARN_ON(bit >= NVMAP_NUM_PTES))
192 addr = (unsigned long)dev->vm_rgn->addr + bit * PAGE_SIZE;
193 set_pte_at(&init_mm, addr, *pte, 0);
195 spin_lock_irqsave(&dev->ptelock, flags);
196 clear_bit(bit, dev->ptebits);
197 spin_unlock_irqrestore(&dev->ptelock, flags);
198 wake_up(&dev->pte_wait);
201 /* verifies that the handle ref value "ref" is a valid handle ref for the
202 * file. caller must hold the file's ref_lock prior to calling this function */
203 struct nvmap_handle_ref *_nvmap_validate_id_locked(struct nvmap_client *c,
206 struct rb_node *n = c->handle_refs.rb_node;
209 struct nvmap_handle_ref *ref;
210 ref = rb_entry(n, struct nvmap_handle_ref, node);
211 if ((unsigned long)ref->handle == id)
213 else if (id > (unsigned long)ref->handle)
222 struct nvmap_handle *nvmap_get_handle_id(struct nvmap_client *client,
225 struct nvmap_handle_ref *ref;
226 struct nvmap_handle *h = NULL;
228 nvmap_ref_lock(client);
229 ref = _nvmap_validate_id_locked(client, id);
233 h = nvmap_handle_get(h);
234 nvmap_ref_unlock(client);
238 unsigned long nvmap_carveout_usage(struct nvmap_client *c,
239 struct nvmap_heap_block *b)
241 struct nvmap_heap *h = nvmap_block_to_heap(b);
242 struct nvmap_carveout_node *n;
245 for (i = 0; i < c->dev->nr_carveouts; i++) {
246 n = &c->dev->heaps[i];
247 if (n->carveout == h)
253 static int nvmap_flush_heap_block(struct nvmap_client *client,
254 struct nvmap_heap_block *block, size_t len)
259 unsigned long phys = block->base;
260 unsigned long end = block->base + len;
262 pte = nvmap_alloc_pte(client->dev, &addr);
266 kaddr = (unsigned long)addr;
269 unsigned long next = (phys + PAGE_SIZE) & PAGE_MASK;
270 unsigned long pfn = __phys_to_pfn(phys);
271 void *base = (void *)kaddr + (phys & ~PAGE_MASK);
273 next = min(next, end);
274 set_pte_at(&init_mm, kaddr, *pte, pfn_pte(pfn, pgprot_kernel));
275 flush_tlb_kernel_page(kaddr);
276 __cpuc_flush_dcache_area(base, next - phys);
280 outer_flush_range(block->base, block->base + len);
282 nvmap_free_pte(client->dev, pte);
286 void nvmap_carveout_commit_add(struct nvmap_client *client,
287 struct nvmap_carveout_node *node,
292 nvmap_ref_lock(client);
293 spin_lock_irqsave(&node->clients_lock, flags);
294 BUG_ON(list_empty(&client->carveout_commit[node->index].list) &&
295 client->carveout_commit[node->index].commit != 0);
297 client->carveout_commit[node->index].commit += len;
298 /* if this client isn't already on the list of nodes for this heap,
300 if (list_empty(&client->carveout_commit[node->index].list)) {
301 list_add(&client->carveout_commit[node->index].list,
304 spin_unlock_irqrestore(&node->clients_lock, flags);
305 nvmap_ref_unlock(client);
308 void nvmap_carveout_commit_subtract(struct nvmap_client *client,
309 struct nvmap_carveout_node *node,
317 spin_lock_irqsave(&node->clients_lock, flags);
318 client->carveout_commit[node->index].commit -= len;
319 BUG_ON(client->carveout_commit[node->index].commit < 0);
320 /* if no more allocation in this carveout for this node, delete it */
321 if (!client->carveout_commit[node->index].commit)
322 list_del_init(&client->carveout_commit[node->index].list);
323 spin_unlock_irqrestore(&node->clients_lock, flags);
326 static struct nvmap_client* get_client_from_carveout_commit(
327 struct nvmap_carveout_node *node, struct nvmap_carveout_commit *commit)
329 struct nvmap_carveout_commit *first_commit = commit - node->index;
330 return (void *)first_commit - offsetof(struct nvmap_client,
334 static DECLARE_WAIT_QUEUE_HEAD(wait_reclaim);
335 static int wait_count;
336 bool nvmap_shrink_carveout(struct nvmap_carveout_node *node)
338 struct nvmap_carveout_commit *commit;
339 size_t selected_size = 0;
340 int selected_oom_adj = OOM_ADJUST_MIN;
341 struct task_struct *selected_task = NULL;
344 int current_oom_adj = OOM_ADJUST_MIN;
348 current_oom_adj = current->signal->oom_adj;
349 task_unlock(current);
351 spin_lock_irqsave(&node->clients_lock, flags);
352 /* find the task with the smallest oom_adj (lowest priority)
353 * and largest carveout allocation -- ignore kernel allocations,
354 * there's no way to handle them */
355 list_for_each_entry(commit, &node->clients, list) {
356 struct nvmap_client *client =
357 get_client_from_carveout_commit(node, commit);
358 size_t size = commit->commit;
359 struct task_struct *task = client->task;
360 struct signal_struct *sig;
367 if (!task->mm || !sig)
369 /* don't try to kill current */
370 if (task == current->group_leader)
372 /* don't try to kill higher priority tasks */
373 if (sig->oom_adj < current_oom_adj)
375 if (sig->oom_adj < selected_oom_adj)
377 if (sig->oom_adj == selected_oom_adj &&
378 size <= selected_size)
380 selected_oom_adj = sig->oom_adj;
381 selected_size = size;
382 selected_task = task;
388 if (fatal_signal_pending(selected_task)) {
389 pr_warning("carveout_killer: process %d dying "
390 "slowly\n", selected_task->pid);
393 pr_info("carveout_killer: killing process %d with oom_adj %d "
394 "to reclaim %d (for process with oom_adj %d)\n",
395 selected_task->pid, selected_oom_adj,
396 selected_size, current_oom_adj);
397 force_sig(SIGKILL, selected_task);
400 spin_unlock_irqrestore(&node->clients_lock, flags);
404 struct nvmap_heap_block *do_nvmap_carveout_alloc(struct nvmap_client *client,
405 size_t len, size_t align,
409 struct nvmap_carveout_node *co_heap;
410 struct nvmap_device *dev = client->dev;
413 for (i = 0; i < dev->nr_carveouts; i++) {
414 struct nvmap_heap_block *block;
415 co_heap = &dev->heaps[i];
417 if (!(co_heap->heap_bit & usage))
420 block = nvmap_heap_alloc(co_heap->carveout, len, align, prot);
422 /* flush any stale data that may be left in the
423 * cache at the block's address, since the new
424 * block may be mapped uncached */
425 if (nvmap_flush_heap_block(client, block, len)) {
426 nvmap_heap_free(block);
435 static bool nvmap_carveout_freed(int count)
438 return count != wait_count;
441 struct nvmap_heap_block *nvmap_carveout_alloc(struct nvmap_client *client,
442 size_t len, size_t align,
446 struct nvmap_heap_block *block;
447 struct nvmap_carveout_node *co_heap;
448 struct nvmap_device *dev = client->dev;
450 unsigned long end = jiffies +
451 msecs_to_jiffies(NVMAP_CARVEOUT_KILLER_RETRY_TIME);
455 block = do_nvmap_carveout_alloc(client, len, align,
457 if (!carveout_killer)
464 char task_comm[TASK_COMM_LEN];
466 get_task_comm(task_comm, client->task);
469 pr_info("%s: failed to allocate %u bytes for "
470 "process %s, firing carveout "
471 "killer!\n", __func__, len, task_comm);
474 pr_info("%s: still can't allocate %u bytes, "
475 "attempt %d!\n", __func__, len, count);
478 /* shrink carveouts that matter and try again */
479 for (i = 0; i < dev->nr_carveouts; i++) {
481 co_heap = &dev->heaps[i];
483 if (!(co_heap->heap_bit & usage))
487 /* indicates we didn't find anything to kill,
488 might as well stop trying */
489 if (!nvmap_shrink_carveout(co_heap))
492 if (time_is_after_jiffies(end))
493 wait_event_interruptible_timeout(wait_reclaim,
494 nvmap_carveout_freed(count),
497 } while (time_is_after_jiffies(end));
499 if (time_is_before_jiffies(end))
500 pr_info("carveout_killer: timeout expired without "
501 "allocation succeeding.\n");
506 /* remove a handle from the device's tree of all handles; called
507 * when freeing handles. */
508 int nvmap_handle_remove(struct nvmap_device *dev, struct nvmap_handle *h)
510 spin_lock(&dev->handle_lock);
512 /* re-test inside the spinlock if the handle really has no clients;
513 * only remove the handle if it is unreferenced */
514 if (atomic_add_return(0, &h->ref) > 0) {
515 spin_unlock(&dev->handle_lock);
519 BUG_ON(atomic_read(&h->ref) < 0);
520 BUG_ON(atomic_read(&h->pin) != 0);
522 rb_erase(&h->node, &dev->handles);
524 spin_unlock(&dev->handle_lock);
528 /* adds a newly-created handle to the device master tree */
529 void nvmap_handle_add(struct nvmap_device *dev, struct nvmap_handle *h)
532 struct rb_node *parent = NULL;
534 spin_lock(&dev->handle_lock);
535 p = &dev->handles.rb_node;
537 struct nvmap_handle *b;
540 b = rb_entry(parent, struct nvmap_handle, node);
542 p = &parent->rb_right;
544 p = &parent->rb_left;
546 rb_link_node(&h->node, parent, p);
547 rb_insert_color(&h->node, &dev->handles);
548 spin_unlock(&dev->handle_lock);
551 /* validates that a handle is in the device master tree, and that the
552 * client has permission to access it */
553 struct nvmap_handle *nvmap_validate_get(struct nvmap_client *client,
556 struct nvmap_handle *h = NULL;
559 spin_lock(&client->dev->handle_lock);
561 n = client->dev->handles.rb_node;
564 h = rb_entry(n, struct nvmap_handle, node);
565 if ((unsigned long)h == id) {
566 if (client->super || h->global || (h->owner == client))
567 h = nvmap_handle_get(h);
570 spin_unlock(&client->dev->handle_lock);
573 if (id > (unsigned long)h)
578 spin_unlock(&client->dev->handle_lock);
582 struct nvmap_client *nvmap_create_client(struct nvmap_device *dev,
585 struct nvmap_client *client;
586 struct task_struct *task;
592 client = kzalloc(sizeof(*client) + (sizeof(struct nvmap_carveout_commit)
593 * dev->nr_carveouts), GFP_KERNEL);
598 client->super = true;
600 /* TODO: allocate unique IOVMM client for each nvmap client */
601 client->share = &dev->iovmm_master;
602 client->handle_refs = RB_ROOT;
604 atomic_set(&client->iovm_commit, 0);
606 client->iovm_limit = nvmap_mru_vm_size(client->share->iovmm);
608 for (i = 0; i < dev->nr_carveouts; i++) {
609 INIT_LIST_HEAD(&client->carveout_commit[i].list);
610 client->carveout_commit[i].commit = 0;
613 get_task_struct(current->group_leader);
614 task_lock(current->group_leader);
615 /* don't bother to store task struct for kernel threads,
616 they can't be killed anyway */
617 if (current->flags & PF_KTHREAD) {
618 put_task_struct(current->group_leader);
621 task = current->group_leader;
623 task_unlock(current->group_leader);
626 spin_lock_init(&client->ref_lock);
627 atomic_set(&client->count, 1);
632 static void destroy_client(struct nvmap_client *client)
641 while ((n = rb_first(&client->handle_refs))) {
642 struct nvmap_handle_ref *ref;
645 ref = rb_entry(n, struct nvmap_handle_ref, node);
646 rb_erase(&ref->node, &client->handle_refs);
649 pins = atomic_read(&ref->pin);
651 mutex_lock(&ref->handle->lock);
652 if (ref->handle->owner == client)
653 ref->handle->owner = NULL;
654 mutex_unlock(&ref->handle->lock);
657 nvmap_unpin_handles(client, &ref->handle, 1);
659 dupes = atomic_read(&ref->dupes);
661 nvmap_handle_put(ref->handle);
666 if (carveout_killer) {
669 wake_up_all(&wait_reclaim);
672 for (i = 0; i < client->dev->nr_carveouts; i++)
673 list_del(&client->carveout_commit[i].list);
676 put_task_struct(client->task);
681 struct nvmap_client *nvmap_client_get(struct nvmap_client *client)
683 if (WARN_ON(!client))
686 if (WARN_ON(!atomic_add_unless(&client->count, 1, 0)))
692 struct nvmap_client *nvmap_client_get_file(int fd)
694 struct nvmap_client *client = ERR_PTR(-EFAULT);
695 struct file *f = fget(fd);
697 return ERR_PTR(-EINVAL);
699 if ((f->f_op == &nvmap_user_fops) || (f->f_op == &nvmap_super_fops)) {
700 client = f->private_data;
701 atomic_inc(&client->count);
708 void nvmap_client_put(struct nvmap_client *client)
713 if (!atomic_dec_return(&client->count))
714 destroy_client(client);
717 static int nvmap_open(struct inode *inode, struct file *filp)
719 struct miscdevice *miscdev = filp->private_data;
720 struct nvmap_device *dev = dev_get_drvdata(miscdev->parent);
721 struct nvmap_client *priv;
724 ret = nonseekable_open(inode, filp);
728 BUG_ON(dev != nvmap_dev);
729 priv = nvmap_create_client(dev, "user");
733 priv->super = (filp->f_op == &nvmap_super_fops);
735 filp->f_mapping->backing_dev_info = &nvmap_bdi;
737 filp->private_data = priv;
741 static int nvmap_release(struct inode *inode, struct file *filp)
743 nvmap_client_put(filp->private_data);
747 static int nvmap_map(struct file *filp, struct vm_area_struct *vma)
749 struct nvmap_vma_priv *priv;
751 /* after NVMAP_IOC_MMAP, the handle that is mapped by this VMA
752 * will be stored in vm_private_data and faulted in. until the
753 * ioctl is made, the VMA is mapped no-access */
754 vma->vm_private_data = NULL;
756 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
762 atomic_set(&priv->count, 1);
764 vma->vm_flags |= VM_SHARED;
765 vma->vm_flags |= (VM_IO | VM_DONTEXPAND | VM_MIXEDMAP | VM_RESERVED);
766 vma->vm_ops = &nvmap_vma_ops;
767 vma->vm_private_data = priv;
772 static long nvmap_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
775 void __user *uarg = (void __user *)arg;
777 if (_IOC_TYPE(cmd) != NVMAP_IOC_MAGIC)
780 if (_IOC_NR(cmd) > NVMAP_IOC_MAXNR)
783 if (_IOC_DIR(cmd) & _IOC_READ)
784 err = !access_ok(VERIFY_WRITE, uarg, _IOC_SIZE(cmd));
785 if (_IOC_DIR(cmd) & _IOC_WRITE)
786 err = !access_ok(VERIFY_READ, uarg, _IOC_SIZE(cmd));
792 case NVMAP_IOC_CLAIM:
793 nvmap_warn(filp->private_data, "preserved handles not"
797 case NVMAP_IOC_CREATE:
798 case NVMAP_IOC_FROM_ID:
799 err = nvmap_ioctl_create(filp, cmd, uarg);
802 case NVMAP_IOC_GET_ID:
803 err = nvmap_ioctl_getid(filp, uarg);
806 case NVMAP_IOC_PARAM:
807 err = nvmap_ioctl_get_param(filp, uarg);
810 case NVMAP_IOC_UNPIN_MULT:
811 case NVMAP_IOC_PIN_MULT:
812 err = nvmap_ioctl_pinop(filp, cmd == NVMAP_IOC_PIN_MULT, uarg);
815 case NVMAP_IOC_ALLOC:
816 err = nvmap_ioctl_alloc(filp, uarg);
820 err = nvmap_ioctl_free(filp, arg);
824 err = nvmap_map_into_caller_ptr(filp, uarg);
827 case NVMAP_IOC_WRITE:
829 err = nvmap_ioctl_rw_handle(filp, cmd == NVMAP_IOC_READ, uarg);
832 case NVMAP_IOC_CACHE:
833 err = nvmap_ioctl_cache_maint(filp, uarg);
842 /* to ensure that the backing store for the VMA isn't freed while a fork'd
843 * reference still exists, nvmap_vma_open increments the reference count on
844 * the handle, and nvmap_vma_close decrements it. alternatively, we could
845 * disallow copying of the vma, or behave like pmem and zap the pages. FIXME.
847 static void nvmap_vma_open(struct vm_area_struct *vma)
849 struct nvmap_vma_priv *priv;
851 priv = vma->vm_private_data;
855 atomic_inc(&priv->count);
858 static void nvmap_vma_close(struct vm_area_struct *vma)
860 struct nvmap_vma_priv *priv = vma->vm_private_data;
862 if (priv && !atomic_dec_return(&priv->count)) {
864 nvmap_handle_put(priv->handle);
868 vma->vm_private_data = NULL;
871 static int nvmap_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
873 struct nvmap_vma_priv *priv;
876 offs = (unsigned long)(vmf->virtual_address - vma->vm_start);
877 priv = vma->vm_private_data;
878 if (!priv || !priv->handle || !priv->handle->alloc)
879 return VM_FAULT_SIGBUS;
882 /* if the VMA was split for some reason, vm_pgoff will be the VMA's
883 * offset from the original VMA */
884 offs += (vma->vm_pgoff << PAGE_SHIFT);
886 if (offs >= priv->handle->size)
887 return VM_FAULT_SIGBUS;
889 if (!priv->handle->heap_pgalloc) {
891 BUG_ON(priv->handle->carveout->base & ~PAGE_MASK);
892 pfn = ((priv->handle->carveout->base + offs) >> PAGE_SHIFT);
893 vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
894 return VM_FAULT_NOPAGE;
898 page = priv->handle->pgalloc.pages[offs];
902 return (page) ? 0 : VM_FAULT_SIGBUS;
906 static ssize_t attr_show_usage(struct device *dev,
907 struct device_attribute *attr, char *buf)
909 struct nvmap_carveout_node *node = nvmap_heap_device_to_arg(dev);
911 return sprintf(buf, "%08x\n", node->heap_bit);
914 static struct device_attribute heap_attr_show_usage =
915 __ATTR(usage, S_IRUGO, attr_show_usage, NULL);
917 static struct attribute *heap_extra_attrs[] = {
918 &heap_attr_show_usage.attr,
922 static struct attribute_group heap_extra_attr_group = {
923 .attrs = heap_extra_attrs,
926 static void client_stringify(struct nvmap_client *client, struct seq_file *s)
928 char task_comm[TASK_COMM_LEN];
930 seq_printf(s, "%8s %16s %8u", client->name, "kernel", 0);
933 get_task_comm(task_comm, client->task);
934 seq_printf(s, "%8s %16s %8u", client->name, task_comm,
938 static void allocations_stringify(struct nvmap_client *client,
941 struct rb_node *n = rb_first(&client->handle_refs);
942 unsigned long long total = 0;
944 for (; n != NULL; n = rb_next(n)) {
945 struct nvmap_handle_ref *ref =
946 rb_entry(n, struct nvmap_handle_ref, node);
947 struct nvmap_handle *handle = ref->handle;
948 if (handle->alloc && !handle->heap_pgalloc) {
949 seq_printf(s, " %8u@%8lx ", handle->size,
950 handle->carveout->base);
951 total += handle->size;
954 seq_printf(s, " total: %llu\n", total);
957 static int nvmap_debug_allocations_show(struct seq_file *s, void *unused)
959 struct nvmap_carveout_node *node = s->private;
960 struct nvmap_carveout_commit *commit;
963 spin_lock_irqsave(&node->clients_lock, flags);
964 list_for_each_entry(commit, &node->clients, list) {
965 struct nvmap_client *client =
966 get_client_from_carveout_commit(node, commit);
967 client_stringify(client, s);
968 allocations_stringify(client, s);
970 spin_unlock_irqrestore(&node->clients_lock, flags);
975 static int nvmap_debug_allocations_open(struct inode *inode, struct file *file)
977 return single_open(file, nvmap_debug_allocations_show,
981 static struct file_operations debug_allocations_fops = {
982 .open = nvmap_debug_allocations_open,
985 .release = single_release,
988 static int nvmap_debug_clients_show(struct seq_file *s, void *unused)
990 struct nvmap_carveout_node *node = s->private;
991 struct nvmap_carveout_commit *commit;
994 spin_lock_irqsave(&node->clients_lock, flags);
995 list_for_each_entry(commit, &node->clients, list) {
996 struct nvmap_client *client =
997 get_client_from_carveout_commit(node, commit);
998 client_stringify(client, s);
999 seq_printf(s, " %8u\n", commit->commit);
1001 spin_unlock_irqrestore(&node->clients_lock, flags);
1006 static int nvmap_debug_clients_open(struct inode *inode, struct file *file)
1008 return single_open(file, nvmap_debug_clients_show, inode->i_private);
1011 static struct file_operations debug_clients_fops = {
1012 .open = nvmap_debug_clients_open,
1014 .llseek = seq_lseek,
1015 .release = single_release,
1018 static int nvmap_probe(struct platform_device *pdev)
1020 struct nvmap_platform_data *plat = pdev->dev.platform_data;
1021 struct nvmap_device *dev;
1022 struct dentry *nvmap_debug_root;
1027 dev_err(&pdev->dev, "no platform data?\n");
1031 if (WARN_ON(nvmap_dev != NULL)) {
1032 dev_err(&pdev->dev, "only one nvmap device may be present\n");
1036 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1038 dev_err(&pdev->dev, "out of memory for device\n");
1042 dev->dev_user.minor = MISC_DYNAMIC_MINOR;
1043 dev->dev_user.name = "nvmap";
1044 dev->dev_user.fops = &nvmap_user_fops;
1045 dev->dev_user.parent = &pdev->dev;
1047 dev->dev_super.minor = MISC_DYNAMIC_MINOR;
1048 dev->dev_super.name = "knvmap";
1049 dev->dev_super.fops = &nvmap_super_fops;
1050 dev->dev_super.parent = &pdev->dev;
1052 dev->handles = RB_ROOT;
1054 init_waitqueue_head(&dev->pte_wait);
1056 init_waitqueue_head(&dev->iovmm_master.pin_wait);
1057 mutex_init(&dev->iovmm_master.pin_lock);
1058 dev->iovmm_master.iovmm =
1059 tegra_iovmm_alloc_client(dev_name(&pdev->dev), NULL);
1060 if (IS_ERR(dev->iovmm_master.iovmm)) {
1061 e = PTR_ERR(dev->iovmm_master.iovmm);
1062 dev_err(&pdev->dev, "couldn't create iovmm client\n");
1065 dev->vm_rgn = alloc_vm_area(NVMAP_NUM_PTES * PAGE_SIZE);
1068 dev_err(&pdev->dev, "couldn't allocate remapping region\n");
1071 e = nvmap_mru_init(&dev->iovmm_master);
1073 dev_err(&pdev->dev, "couldn't initialize MRU lists\n");
1077 spin_lock_init(&dev->ptelock);
1078 spin_lock_init(&dev->handle_lock);
1080 for (i = 0; i < NVMAP_NUM_PTES; i++) {
1086 addr = (unsigned long)dev->vm_rgn->addr + (i * PAGE_SIZE);
1087 pgd = pgd_offset_k(addr);
1088 pud = pud_alloc(&init_mm, pgd, addr);
1091 dev_err(&pdev->dev, "couldn't allocate page tables\n");
1094 pmd = pmd_alloc(&init_mm, pud, addr);
1097 dev_err(&pdev->dev, "couldn't allocate page tables\n");
1100 dev->ptes[i] = pte_alloc_kernel(pmd, addr);
1101 if (!dev->ptes[i]) {
1103 dev_err(&pdev->dev, "couldn't allocate page tables\n");
1108 e = misc_register(&dev->dev_user);
1110 dev_err(&pdev->dev, "unable to register miscdevice %s\n",
1111 dev->dev_user.name);
1115 e = misc_register(&dev->dev_super);
1117 dev_err(&pdev->dev, "unable to register miscdevice %s\n",
1118 dev->dev_super.name);
1122 dev->nr_carveouts = 0;
1123 dev->heaps = kzalloc(sizeof(struct nvmap_carveout_node) *
1124 plat->nr_carveouts, GFP_KERNEL);
1127 dev_err(&pdev->dev, "couldn't allocate carveout memory\n");
1131 nvmap_debug_root = debugfs_create_dir("nvmap", NULL);
1132 if (IS_ERR_OR_NULL(nvmap_debug_root))
1133 dev_err(&pdev->dev, "couldn't create debug files\n");
1135 for (i = 0; i < plat->nr_carveouts; i++) {
1136 struct nvmap_carveout_node *node = &dev->heaps[i];
1137 const struct nvmap_platform_carveout *co = &plat->carveouts[i];
1138 node->carveout = nvmap_heap_create(dev->dev_user.this_device,
1139 co->name, co->base, co->size,
1140 co->buddy_size, node);
1141 if (!node->carveout) {
1143 dev_err(&pdev->dev, "couldn't create %s\n", co->name);
1146 dev->nr_carveouts++;
1147 spin_lock_init(&node->clients_lock);
1149 INIT_LIST_HEAD(&node->clients);
1150 node->heap_bit = co->usage_mask;
1151 if (nvmap_heap_create_group(node->carveout,
1152 &heap_extra_attr_group))
1153 dev_warn(&pdev->dev, "couldn't add extra attributes\n");
1155 dev_info(&pdev->dev, "created carveout %s (%uKiB)\n",
1156 co->name, co->size / 1024);
1158 if (!IS_ERR_OR_NULL(nvmap_debug_root)) {
1159 struct dentry *heap_root =
1160 debugfs_create_dir(co->name, nvmap_debug_root);
1161 if (!IS_ERR_OR_NULL(heap_root)) {
1162 debugfs_create_file("clients", 0664, heap_root,
1163 node, &debug_clients_fops);
1164 debugfs_create_file("allocations", 0664,
1165 heap_root, node, &debug_allocations_fops);
1170 platform_set_drvdata(pdev, dev);
1174 for (i = 0; i < dev->nr_carveouts; i++) {
1175 struct nvmap_carveout_node *node = &dev->heaps[i];
1176 nvmap_heap_remove_group(node->carveout, &heap_extra_attr_group);
1177 nvmap_heap_destroy(node->carveout);
1181 nvmap_mru_destroy(&dev->iovmm_master);
1182 if (dev->dev_super.minor != MISC_DYNAMIC_MINOR)
1183 misc_deregister(&dev->dev_super);
1184 if (dev->dev_user.minor != MISC_DYNAMIC_MINOR)
1185 misc_deregister(&dev->dev_user);
1186 if (!IS_ERR_OR_NULL(dev->iovmm_master.iovmm))
1187 tegra_iovmm_free_client(dev->iovmm_master.iovmm);
1189 free_vm_area(dev->vm_rgn);
1195 static int nvmap_remove(struct platform_device *pdev)
1197 struct nvmap_device *dev = platform_get_drvdata(pdev);
1199 struct nvmap_handle *h;
1202 misc_deregister(&dev->dev_super);
1203 misc_deregister(&dev->dev_user);
1205 while ((n = rb_first(&dev->handles))) {
1206 h = rb_entry(n, struct nvmap_handle, node);
1207 rb_erase(&h->node, &dev->handles);
1211 if (!IS_ERR_OR_NULL(dev->iovmm_master.iovmm))
1212 tegra_iovmm_free_client(dev->iovmm_master.iovmm);
1214 nvmap_mru_destroy(&dev->iovmm_master);
1216 for (i = 0; i < dev->nr_carveouts; i++) {
1217 struct nvmap_carveout_node *node = &dev->heaps[i];
1218 nvmap_heap_remove_group(node->carveout, &heap_extra_attr_group);
1219 nvmap_heap_destroy(node->carveout);
1223 free_vm_area(dev->vm_rgn);
1229 static int nvmap_suspend(struct platform_device *pdev, pm_message_t state)
1234 static int nvmap_resume(struct platform_device *pdev)
1239 static struct platform_driver nvmap_driver = {
1240 .probe = nvmap_probe,
1241 .remove = nvmap_remove,
1242 .suspend = nvmap_suspend,
1243 .resume = nvmap_resume,
1246 .name = "tegra-nvmap",
1247 .owner = THIS_MODULE,
1251 static int __init nvmap_init_driver(void)
1257 e = nvmap_heap_init();
1261 e = platform_driver_register(&nvmap_driver);
1263 nvmap_heap_deinit();
1270 fs_initcall(nvmap_init_driver);
1272 static void __exit nvmap_exit_driver(void)
1274 platform_driver_unregister(&nvmap_driver);
1275 nvmap_heap_deinit();
1278 module_exit(nvmap_exit_driver);