video: tegra: nvmap: Use nvmap.h include file from kernel/include
[linux-2.6.git] / drivers / video / tegra / nvmap / nvmap_dev.c
index 32008e2..c788187 100644 (file)
@@ -3,7 +3,7 @@
  *
  * User-space interface to nvmap
  *
- * Copyright (c) 2010, NVIDIA Corporation.
+ * Copyright (c) 2011-2012, NVIDIA Corporation.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
 #include <asm/tlbflush.h>
 
 #include <mach/iovmm.h>
-#include <mach/nvmap.h>
+#include <linux/nvmap.h>
 
 #include "nvmap.h"
 #include "nvmap_ioctl.h"
 #include "nvmap_mru.h"
+#include "nvmap_common.h"
 
 #define NVMAP_NUM_PTES         64
 #define NVMAP_CARVEOUT_KILLER_RETRY_TIME 100 /* msecs */
 
+#ifdef CONFIG_NVMAP_CARVEOUT_KILLER
+static bool carveout_killer = true;
+#else
+static bool carveout_killer;
+#endif
+module_param(carveout_killer, bool, 0640);
+
 struct nvmap_carveout_node {
        unsigned int            heap_bit;
        struct nvmap_heap       *carveout;
        int                     index;
        struct list_head        clients;
-       spinlock_t              clients_lock;
+       spinlock_t              clients_lock;
 };
 
 struct nvmap_device {
@@ -71,6 +79,8 @@ struct nvmap_device {
        struct nvmap_carveout_node *heaps;
        int nr_carveouts;
        struct nvmap_share iovmm_master;
+       struct list_head clients;
+       spinlock_t      clients_lock;
 };
 
 struct nvmap_device *nvmap_dev;
@@ -243,23 +253,55 @@ unsigned long nvmap_carveout_usage(struct nvmap_client *c,
        return 0;
 }
 
-static int nvmap_flush_heap_block(struct nvmap_client *client,
-                                 struct nvmap_heap_block *block, size_t len)
+/*
+ * This routine is used to flush the carveout memory from cache.
+ * Why cache flush is needed for carveout? Consider the case, where a piece of
+ * carveout is allocated as cached and released. After this, if the same memory is
+ * allocated for uncached request and the memory is not flushed out from cache.
+ * In this case, the client might pass this to H/W engine and it could start modify
+ * the memory. As this was cached earlier, it might have some portion of it in cache.
+ * During cpu request to read/write other memory, the cached portion of this memory
+ * might get flushed back to main memory and would cause corruptions, if it happens
+ * after H/W writes data to memory.
+ *
+ * But flushing out the memory blindly on each carveout allocation is redundant.
+ *
+ * In order to optimize the carveout buffer cache flushes, the following
+ * strategy is used.
+ *
+ * The whole Carveout is flushed out from cache during its initialization.
+ * During allocation, carveout buffers are not flused from cache.
+ * During deallocation, carveout buffers are flushed, if they were allocated as cached.
+ * if they were allocated as uncached/writecombined, no cache flush is needed.
+ * Just draining store buffers is enough.
+ */
+int nvmap_flush_heap_block(struct nvmap_client *client,
+       struct nvmap_heap_block *block, size_t len, unsigned int prot)
 {
        pte_t **pte;
        void *addr;
-       unsigned long kaddr;
-       unsigned long phys = block->base;
-       unsigned long end = block->base + len;
+       phys_addr_t kaddr;
+       phys_addr_t phys = block->base;
+       phys_addr_t end = block->base + len;
+
+       if (prot == NVMAP_HANDLE_UNCACHEABLE || prot == NVMAP_HANDLE_WRITE_COMBINE)
+               goto out;
+
+       if (len >= FLUSH_CLEAN_BY_SET_WAY_THRESHOLD) {
+               inner_flush_cache_all();
+               if (prot != NVMAP_HANDLE_INNER_CACHEABLE)
+                       outer_flush_range(block->base, block->base + len);
+               goto out;
+       }
 
-       pte = nvmap_alloc_pte(client->dev, &addr);
+       pte = nvmap_alloc_pte((client ? client->dev : nvmap_dev), &addr);
        if (IS_ERR(pte))
                return PTR_ERR(pte);
 
-       kaddr = (unsigned long)addr;
+       kaddr = (phys_addr_t)addr;
 
        while (phys < end) {
-               unsigned long next = (phys + PAGE_SIZE) & PAGE_MASK;
+               phys_addr_t next = (phys + PAGE_SIZE) & PAGE_MASK;
                unsigned long pfn = __phys_to_pfn(phys);
                void *base = (void *)kaddr + (phys & ~PAGE_MASK);
 
@@ -270,9 +312,12 @@ static int nvmap_flush_heap_block(struct nvmap_client *client,
                phys = next;
        }
 
-       outer_flush_range(block->base, block->base + len);
+       if (prot != NVMAP_HANDLE_INNER_CACHEABLE)
+               outer_flush_range(block->base, block->base + len);
 
-       nvmap_free_pte(client->dev, pte);
+       nvmap_free_pte((client ? client->dev : nvmap_dev), pte);
+out:
+       wmb();
        return 0;
 }
 
@@ -308,15 +353,15 @@ void nvmap_carveout_commit_subtract(struct nvmap_client *client,
                return;
 
        spin_lock_irqsave(&node->clients_lock, flags);
+       BUG_ON(client->carveout_commit[node->index].commit < len);
        client->carveout_commit[node->index].commit -= len;
-       BUG_ON(client->carveout_commit[node->index].commit < 0);
        /* if no more allocation in this carveout for this node, delete it */
        if (!client->carveout_commit[node->index].commit)
                list_del_init(&client->carveout_commit[node->index].list);
        spin_unlock_irqrestore(&node->clients_lock, flags);
 }
 
-static struct nvmap_client* get_client_from_carveout_commit(
+static struct nvmap_client *get_client_from_carveout_commit(
        struct nvmap_carveout_node *node, struct nvmap_carveout_commit *commit)
 {
        struct nvmap_carveout_commit *first_commit = commit - node->index;
@@ -324,7 +369,8 @@ static struct nvmap_client* get_client_from_carveout_commit(
                                               carveout_commit);
 }
 
-#ifdef CONFIG_NVMAP_CARVEOUT_KILLER
+static DECLARE_WAIT_QUEUE_HEAD(wait_reclaim);
+static int wait_count;
 bool nvmap_shrink_carveout(struct nvmap_carveout_node *node)
 {
        struct nvmap_carveout_commit *commit;
@@ -332,7 +378,13 @@ bool nvmap_shrink_carveout(struct nvmap_carveout_node *node)
        int selected_oom_adj = OOM_ADJUST_MIN;
        struct task_struct *selected_task = NULL;
        unsigned long flags;
-       bool death_pending = false;
+       bool wait = false;
+       int current_oom_adj = OOM_ADJUST_MIN;
+
+       task_lock(current);
+       if (current->signal)
+               current_oom_adj = current->signal->oom_adj;
+       task_unlock(current);
 
        spin_lock_irqsave(&node->clients_lock, flags);
        /* find the task with the smallest oom_adj (lowest priority)
@@ -347,40 +399,50 @@ bool nvmap_shrink_carveout(struct nvmap_carveout_node *node)
 
                if (!task)
                        continue;
+
+               task_lock(task);
                sig = task->signal;
                if (!task->mm || !sig)
-                       continue;
+                       goto end;
+               /* don't try to kill current */
+               if (task == current->group_leader)
+                       goto end;
+               /* don't try to kill higher priority tasks */
+               if (sig->oom_adj < current_oom_adj)
+                       goto end;
                if (sig->oom_adj < selected_oom_adj)
-                       continue;
+                       goto end;
                if (sig->oom_adj == selected_oom_adj &&
                    size <= selected_size)
-                       continue;
+                       goto end;
                selected_oom_adj = sig->oom_adj;
                selected_size = size;
                selected_task = task;
+end:
+               task_unlock(task);
        }
        if (selected_task) {
-               death_pending = selected_task == current;
+               wait = true;
                if (fatal_signal_pending(selected_task)) {
                        pr_warning("carveout_killer: process %d dying "
                                   "slowly\n", selected_task->pid);
                        goto out;
                }
                pr_info("carveout_killer: killing process %d with oom_adj %d "
-                       "to reclaim %d\n", selected_task->pid, selected_oom_adj,
-                       selected_size);
+                       "to reclaim %d (for process with oom_adj %d)\n",
+                       selected_task->pid, selected_oom_adj,
+                       selected_size, current_oom_adj);
                force_sig(SIGKILL, selected_task);
        }
 out:
        spin_unlock_irqrestore(&node->clients_lock, flags);
-       return death_pending;
+       return wait;
 }
-#endif
 
+static
 struct nvmap_heap_block *do_nvmap_carveout_alloc(struct nvmap_client *client,
-                                                size_t len, size_t align,
-                                                unsigned long usage,
-                                                unsigned int prot)
+                                             struct nvmap_handle *handle,
+                                             unsigned long type)
 {
        struct nvmap_carveout_node *co_heap;
        struct nvmap_device *dev = client->dev;
@@ -390,63 +452,83 @@ struct nvmap_heap_block *do_nvmap_carveout_alloc(struct nvmap_client *client,
                struct nvmap_heap_block *block;
                co_heap = &dev->heaps[i];
 
-               if (!(co_heap->heap_bit & usage))
+               if (!(co_heap->heap_bit & type))
                        continue;
 
-               block = nvmap_heap_alloc(co_heap->carveout, len, align, prot);
-               if (block) {
-                       /* flush any stale data that may be left in the
-                        * cache at the block's address, since the new
-                        * block may be mapped uncached */
-                       if (nvmap_flush_heap_block(client, block, len)) {
-                               nvmap_heap_free(block);
-                               return NULL;
-                       } else
-                               return block;
-               }
+               block = nvmap_heap_alloc(co_heap->carveout, handle);
+               if (block)
+                       return block;
        }
        return NULL;
 }
 
+static bool nvmap_carveout_freed(int count)
+{
+       smp_rmb();
+       return count != wait_count;
+}
+
 struct nvmap_heap_block *nvmap_carveout_alloc(struct nvmap_client *client,
-                                             size_t len, size_t align,
-                                             unsigned long usage,
-                                             unsigned int prot)
+                                             struct nvmap_handle *handle,
+                                             unsigned long type)
 {
        struct nvmap_heap_block *block;
-#ifdef CONFIG_NVMAP_CARVEOUT_KILLER
        struct nvmap_carveout_node *co_heap;
        struct nvmap_device *dev = client->dev;
        int i;
        unsigned long end = jiffies +
                msecs_to_jiffies(NVMAP_CARVEOUT_KILLER_RETRY_TIME);
+       int count = 0;
 
        do {
-               block = do_nvmap_carveout_alloc(client, len, align, usage,
-                                               prot);
+               block = do_nvmap_carveout_alloc(client, handle, type);
+               if (!carveout_killer)
+                       return block;
+
                if (block)
                        return block;
 
+               if (!count++) {
+                       char task_comm[TASK_COMM_LEN];
+                       if (client->task)
+                               get_task_comm(task_comm, client->task);
+                       else
+                               task_comm[0] = 0;
+                       pr_info("%s: failed to allocate %u bytes for "
+                               "process %s, firing carveout "
+                               "killer!\n", __func__, handle->size, task_comm);
+
+               } else {
+                       pr_info("%s: still can't allocate %u bytes, "
+                               "attempt %d!\n", __func__, handle->size, count);
+               }
+
                /* shrink carveouts that matter and try again */
                for (i = 0; i < dev->nr_carveouts; i++) {
+                       int count;
                        co_heap = &dev->heaps[i];
 
-                       if (!(co_heap->heap_bit & usage))
+                       if (!(co_heap->heap_bit & type))
                                continue;
 
-                       /* indicates we just delivered a sigkill to current,
-                          might as well stop trying so the process can exit */
-                       if (nvmap_shrink_carveout(co_heap))
+                       count = wait_count;
+                       /* indicates we didn't find anything to kill,
+                          might as well stop trying */
+                       if (!nvmap_shrink_carveout(co_heap))
                                return NULL;
+
+                       if (time_is_after_jiffies(end))
+                               wait_event_interruptible_timeout(wait_reclaim,
+                                        nvmap_carveout_freed(count),
+                                        end - jiffies);
                }
-               yield();
        } while (time_is_after_jiffies(end));
 
+       if (time_is_before_jiffies(end))
+               pr_info("carveout_killer: timeout expired without "
+                       "allocation succeeding.\n");
+
        return NULL;
-#else
-       block = do_nvmap_carveout_alloc(client, len, align, usage, prot);
-       return block;
-#endif
 }
 
 /* remove a handle from the device's tree of all handles; called
@@ -556,22 +638,25 @@ struct nvmap_client *nvmap_create_client(struct nvmap_device *dev,
                client->carveout_commit[i].commit = 0;
        }
 
-       get_task_struct(current);
-       task_lock(current);
+       get_task_struct(current->group_leader);
+       task_lock(current->group_leader);
        /* don't bother to store task struct for kernel threads,
           they can't be killed anyway */
        if (current->flags & PF_KTHREAD) {
-               put_task_struct(current);
+               put_task_struct(current->group_leader);
                task = NULL;
        } else {
-               task = current;
+               task = current->group_leader;
        }
-       task_unlock(current);
+       task_unlock(current->group_leader);
        client->task = task;
 
-       spin_lock_init(&client->ref_lock);
+       mutex_init(&client->ref_lock);
        atomic_set(&client->count, 1);
 
+       spin_lock(&dev->clients_lock);
+       list_add(&client->list, &dev->clients);
+       spin_unlock(&dev->clients_lock);
        return client;
 }
 
@@ -583,6 +668,7 @@ static void destroy_client(struct nvmap_client *client)
        if (!client)
                return;
 
+
        while ((n = rb_first(&client->handle_refs))) {
                struct nvmap_handle_ref *ref;
                int pins, dupes;
@@ -593,10 +679,8 @@ static void destroy_client(struct nvmap_client *client)
                smp_rmb();
                pins = atomic_read(&ref->pin);
 
-               mutex_lock(&ref->handle->lock);
                if (ref->handle->owner == client)
-                   ref->handle->owner = NULL;
-               mutex_unlock(&ref->handle->lock);
+                       ref->handle->owner = NULL;
 
                while (pins--)
                        nvmap_unpin_handles(client, &ref->handle, 1);
@@ -608,12 +692,21 @@ static void destroy_client(struct nvmap_client *client)
                kfree(ref);
        }
 
+       if (carveout_killer) {
+               wait_count++;
+               smp_wmb();
+               wake_up_all(&wait_reclaim);
+       }
+
        for (i = 0; i < client->dev->nr_carveouts; i++)
                list_del(&client->carveout_commit[i].list);
 
        if (client->task)
                put_task_struct(client->task);
 
+       spin_lock(&client->dev->clients_lock);
+       list_del(&client->list);
+       spin_unlock(&client->dev->clients_lock);
        kfree(client);
 }
 
@@ -798,12 +891,17 @@ static void nvmap_vma_close(struct vm_area_struct *vma)
 {
        struct nvmap_vma_priv *priv = vma->vm_private_data;
 
-       if (priv && !atomic_dec_return(&priv->count)) {
-               if (priv->handle)
-                       nvmap_handle_put(priv->handle);
-               kfree(priv);
+       if (priv) {
+               if (priv->handle) {
+                       nvmap_usecount_dec(priv->handle);
+                       BUG_ON(priv->handle->usecount < 0);
+               }
+               if (!atomic_dec_return(&priv->count)) {
+                       if (priv->handle)
+                               nvmap_handle_put(priv->handle);
+                       kfree(priv);
+               }
        }
-
        vma->vm_private_data = NULL;
 }
 
@@ -866,31 +964,33 @@ static void client_stringify(struct nvmap_client *client, struct seq_file *s)
 {
        char task_comm[TASK_COMM_LEN];
        if (!client->task) {
-               seq_printf(s, "%8s %16s %8u", client->name, "kernel", 0);
+               seq_printf(s, "%-18s %18s %8u", client->name, "kernel", 0);
                return;
        }
        get_task_comm(task_comm, client->task);
-       seq_printf(s, "%8s %16s %8u", client->name, task_comm,
+       seq_printf(s, "%-18s %18s %8u", client->name, task_comm,
                   client->task->pid);
 }
 
 static void allocations_stringify(struct nvmap_client *client,
                                  struct seq_file *s)
 {
+       unsigned long base = 0;
        struct rb_node *n = rb_first(&client->handle_refs);
-       unsigned long long total = 0;
 
        for (; n != NULL; n = rb_next(n)) {
                struct nvmap_handle_ref *ref =
                        rb_entry(n, struct nvmap_handle_ref, node);
                struct nvmap_handle *handle = ref->handle;
                if (handle->alloc && !handle->heap_pgalloc) {
-                       seq_printf(s, " %8u@%8lx ", handle->size,
-                                  handle->carveout->base);
-                       total += handle->size;
+                       seq_printf(s, "%-18s %-18s %8lx %10u %8x\n", "", "",
+                                       (unsigned long)(handle->carveout->base),
+                                       handle->size, handle->userflags);
+               } else if (handle->alloc && handle->heap_pgalloc) {
+                       seq_printf(s, "%-18s %-18s %8lx %10u %8x\n", "", "",
+                                       base, handle->size, handle->userflags);
                }
        }
-       seq_printf(s, " total: %llu\n", total);
 }
 
 static int nvmap_debug_allocations_show(struct seq_file *s, void *unused)
@@ -898,14 +998,23 @@ static int nvmap_debug_allocations_show(struct seq_file *s, void *unused)
        struct nvmap_carveout_node *node = s->private;
        struct nvmap_carveout_commit *commit;
        unsigned long flags;
+       unsigned int total = 0;
 
        spin_lock_irqsave(&node->clients_lock, flags);
+       seq_printf(s, "%-18s %18s %8s %10s %8s\n", "CLIENT", "PROCESS", "PID",
+               "SIZE", "FLAGS");
+       seq_printf(s, "%-18s %18s %8s %10s\n", "", "",
+                                       "BASE", "SIZE");
        list_for_each_entry(commit, &node->clients, list) {
                struct nvmap_client *client =
                        get_client_from_carveout_commit(node, commit);
                client_stringify(client, s);
+               seq_printf(s, " %10u\n", commit->commit);
                allocations_stringify(client, s);
+               seq_printf(s, "\n");
+               total += commit->commit;
        }
+       seq_printf(s, "%-18s %-18s %8u %10u\n", "total", "", 0, total);
        spin_unlock_irqrestore(&node->clients_lock, flags);
 
        return 0;
@@ -917,7 +1026,7 @@ static int nvmap_debug_allocations_open(struct inode *inode, struct file *file)
                           inode->i_private);
 }
 
-static struct file_operations debug_allocations_fops = {
+static const struct file_operations debug_allocations_fops = {
        .open = nvmap_debug_allocations_open,
        .read = seq_read,
        .llseek = seq_lseek,
@@ -929,14 +1038,19 @@ static int nvmap_debug_clients_show(struct seq_file *s, void *unused)
        struct nvmap_carveout_node *node = s->private;
        struct nvmap_carveout_commit *commit;
        unsigned long flags;
+       unsigned int total = 0;
 
        spin_lock_irqsave(&node->clients_lock, flags);
+       seq_printf(s, "%-18s %18s %8s %10s\n", "CLIENT", "PROCESS", "PID",
+               "SIZE");
        list_for_each_entry(commit, &node->clients, list) {
                struct nvmap_client *client =
                        get_client_from_carveout_commit(node, commit);
                client_stringify(client, s);
-               seq_printf(s, " %8u\n", commit->commit);
+               seq_printf(s, " %10u\n", commit->commit);
+               total += commit->commit;
        }
+       seq_printf(s, "%-18s %18s %8u %10u\n", "total", "", 0, total);
        spin_unlock_irqrestore(&node->clients_lock, flags);
 
        return 0;
@@ -947,13 +1061,87 @@ static int nvmap_debug_clients_open(struct inode *inode, struct file *file)
        return single_open(file, nvmap_debug_clients_show, inode->i_private);
 }
 
-static struct file_operations debug_clients_fops = {
+static const struct file_operations debug_clients_fops = {
        .open = nvmap_debug_clients_open,
        .read = seq_read,
        .llseek = seq_lseek,
        .release = single_release,
 };
 
+static int nvmap_debug_iovmm_clients_show(struct seq_file *s, void *unused)
+{
+       unsigned long flags;
+       unsigned int total = 0;
+       struct nvmap_client *client;
+       struct nvmap_device *dev = s->private;
+
+       spin_lock_irqsave(&dev->clients_lock, flags);
+       seq_printf(s, "%-18s %18s %8s %10s\n", "CLIENT", "PROCESS", "PID",
+               "SIZE");
+       list_for_each_entry(client, &dev->clients, list) {
+               client_stringify(client, s);
+               seq_printf(s, " %10u\n", atomic_read(&client->iovm_commit));
+               total += atomic_read(&client->iovm_commit);
+       }
+       seq_printf(s, "%-18s %18s %8u %10u\n", "total", "", 0, total);
+       spin_unlock_irqrestore(&dev->clients_lock, flags);
+
+       return 0;
+}
+
+static int nvmap_debug_iovmm_clients_open(struct inode *inode,
+                                           struct file *file)
+{
+       return single_open(file, nvmap_debug_iovmm_clients_show,
+                           inode->i_private);
+}
+
+static const struct file_operations debug_iovmm_clients_fops = {
+       .open = nvmap_debug_iovmm_clients_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+};
+
+static int nvmap_debug_iovmm_allocations_show(struct seq_file *s, void *unused)
+{
+       unsigned long flags;
+       unsigned int total = 0;
+       struct nvmap_client *client;
+       struct nvmap_device *dev = s->private;
+
+       spin_lock_irqsave(&dev->clients_lock, flags);
+       seq_printf(s, "%-18s %18s %8s %10s\n", "CLIENT", "PROCESS", "PID",
+               "SIZE");
+       seq_printf(s, "%-18s %18s %8s %10s\n", "", "",
+                                       "BASE", "SIZE");
+       list_for_each_entry(client, &dev->clients, list) {
+               client_stringify(client, s);
+               seq_printf(s, " %10u\n", atomic_read(&client->iovm_commit));
+               allocations_stringify(client, s);
+               seq_printf(s, "\n");
+               total += atomic_read(&client->iovm_commit);
+       }
+       seq_printf(s, "%-18s %-18s %8u %10u\n", "total", "", 0, total);
+       spin_unlock_irqrestore(&dev->clients_lock, flags);
+
+       return 0;
+}
+
+static int nvmap_debug_iovmm_allocations_open(struct inode *inode,
+                                               struct file *file)
+{
+       return single_open(file, nvmap_debug_iovmm_allocations_show,
+                           inode->i_private);
+}
+
+static const struct file_operations debug_iovmm_allocations_fops = {
+       .open = nvmap_debug_iovmm_allocations_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+};
+
 static int nvmap_probe(struct platform_device *pdev)
 {
        struct nvmap_platform_data *plat = pdev->dev.platform_data;
@@ -994,13 +1182,21 @@ static int nvmap_probe(struct platform_device *pdev)
 
        init_waitqueue_head(&dev->iovmm_master.pin_wait);
        mutex_init(&dev->iovmm_master.pin_lock);
+#ifdef CONFIG_NVMAP_PAGE_POOLS
+       for (i = 0; i < NVMAP_NUM_POOLS; i++)
+               nvmap_page_pool_init(&dev->iovmm_master.pools[i], i);
+#endif
+
        dev->iovmm_master.iovmm =
-               tegra_iovmm_alloc_client(dev_name(&pdev->dev), NULL);
-       if (IS_ERR(dev->iovmm_master.iovmm)) {
+               tegra_iovmm_alloc_client(dev_name(&pdev->dev), NULL,
+                       &(dev->dev_user));
+#ifdef CONFIG_TEGRA_IOVMM
+       if (!dev->iovmm_master.iovmm) {
                e = PTR_ERR(dev->iovmm_master.iovmm);
                dev_err(&pdev->dev, "couldn't create iovmm client\n");
                goto fail;
        }
+#endif
        dev->vm_rgn = alloc_vm_area(NVMAP_NUM_PTES * PAGE_SIZE);
        if (!dev->vm_rgn) {
                e = -ENOMEM;
@@ -1015,6 +1211,8 @@ static int nvmap_probe(struct platform_device *pdev)
 
        spin_lock_init(&dev->ptelock);
        spin_lock_init(&dev->handle_lock);
+       INIT_LIST_HEAD(&dev->clients);
+       spin_lock_init(&dev->clients_lock);
 
        for (i = 0; i < NVMAP_NUM_PTES; i++) {
                unsigned long addr;
@@ -1072,8 +1270,10 @@ static int nvmap_probe(struct platform_device *pdev)
                dev_err(&pdev->dev, "couldn't create debug files\n");
 
        for (i = 0; i < plat->nr_carveouts; i++) {
-               struct nvmap_carveout_node *node = &dev->heaps[i];
+               struct nvmap_carveout_node *node = &dev->heaps[dev->nr_carveouts];
                const struct nvmap_platform_carveout *co = &plat->carveouts[i];
+               if (!co->size)
+                       continue;
                node->carveout = nvmap_heap_create(dev->dev_user.this_device,
                                   co->name, co->base, co->size,
                                   co->buddy_size, node);
@@ -1082,9 +1282,9 @@ static int nvmap_probe(struct platform_device *pdev)
                        dev_err(&pdev->dev, "couldn't create %s\n", co->name);
                        goto fail_heaps;
                }
+               node->index = dev->nr_carveouts;
                dev->nr_carveouts++;
                spin_lock_init(&node->clients_lock);
-               node->index = i;
                INIT_LIST_HEAD(&node->clients);
                node->heap_bit = co->usage_mask;
                if (nvmap_heap_create_group(node->carveout,
@@ -1105,9 +1305,32 @@ static int nvmap_probe(struct platform_device *pdev)
                        }
                }
        }
+       if (!IS_ERR_OR_NULL(nvmap_debug_root)) {
+               struct dentry *iovmm_root =
+                       debugfs_create_dir("iovmm", nvmap_debug_root);
+               if (!IS_ERR_OR_NULL(iovmm_root)) {
+                       debugfs_create_file("clients", 0664, iovmm_root,
+                               dev, &debug_iovmm_clients_fops);
+                       debugfs_create_file("allocations", 0664, iovmm_root,
+                               dev, &debug_iovmm_allocations_fops);
+#ifdef CONFIG_NVMAP_PAGE_POOLS
+                       for (i = 0; i < NVMAP_NUM_POOLS; i++) {
+                               char name[40];
+                               char *memtype_string[] = {"uc", "wc",
+                                                         "iwb", "wb"};
+                               sprintf(name, "%s_page_pool_available_pages",
+                                       memtype_string[i]);
+                               debugfs_create_u32(name, S_IRUGO|S_IWUSR,
+                                       iovmm_root,
+                                       &dev->iovmm_master.pools[i].npages);
+                       }
+#endif
+               }
+       }
 
        platform_set_drvdata(pdev, dev);
        nvmap_dev = dev;
+
        return 0;
 fail_heaps:
        for (i = 0; i < dev->nr_carveouts; i++) {