gpu: ion: Add IOMMU heap allocator with IOMMU API
Hiroshi DOYU [Fri, 23 Dec 2011 12:10:33 +0000 (14:10 +0200)]
Implemented IOMMU heap allocator("ion_iommu_heap_ops") with IOMMU
API. This implementation is independenf of SoC because the standard
IOMMU API is used in the backend implementation of this heap_ops.

[Krishna Reddy: Refactored the original version heavily, especially
making "allocation" and "mapping" features independent with "struct
page" based mapping functions.]

Change-Id: Ia692d9a0dc00424fa3e95a830f557808351b1ad7
Signed-off-by: Hiroshi DOYU <hdoyu@nvidia.com>
Signed-off-by: Krishna Reddy <vdumpa@nvidia.com>
Reviewed-on: http://git-master/r/72217
Reviewed-by: Automatic_Commit_Validation_User

drivers/gpu/ion/Kconfig
drivers/gpu/ion/Makefile
drivers/gpu/ion/ion_heap.c
drivers/gpu/ion/ion_iommu_heap.c [new file with mode: 0644]
drivers/gpu/ion/ion_priv.h
include/linux/ion.h

index 5b48b4e..9a8cbdd 100644 (file)
@@ -4,9 +4,14 @@ menuconfig ION
        help
          Chose this option to enable the ION Memory Manager.
 
+config ION_IOMMU
+       bool
+
 config ION_TEGRA
        tristate "Ion for Tegra"
        depends on ARCH_TEGRA && ION
+       select TEGRA_IOMMU_SMMU if !ARCH_TEGRA_2x_SOC
+       select ION_IOMMU if TEGRA_IOMMU_SMMU
        help
          Choose this option if you wish to use ion on an nVidia Tegra.
 
index 73fe3fa..4ddc78e 100644 (file)
@@ -1,2 +1,3 @@
 obj-$(CONFIG_ION) +=   ion.o ion_heap.o ion_system_heap.o ion_carveout_heap.o
+obj-$(CONFIG_ION_IOMMU)        += ion_iommu_heap.o
 obj-$(CONFIG_ION_TEGRA) += tegra/
index 8ce3c19..6d09778 100644 (file)
@@ -32,6 +32,9 @@ struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
        case ION_HEAP_TYPE_CARVEOUT:
                heap = ion_carveout_heap_create(heap_data);
                break;
+       case ION_HEAP_TYPE_IOMMU:
+               heap = ion_iommu_heap_create(heap_data);
+               break;
        default:
                pr_err("%s: Invalid heap type %d\n", __func__,
                       heap_data->type);
@@ -65,6 +68,9 @@ void ion_heap_destroy(struct ion_heap *heap)
        case ION_HEAP_TYPE_CARVEOUT:
                ion_carveout_heap_destroy(heap);
                break;
+       case ION_HEAP_TYPE_IOMMU:
+               ion_iommu_heap_destroy(heap);
+               break;
        default:
                pr_err("%s: Invalid heap type %d\n", __func__,
                       heap->type);
diff --git a/drivers/gpu/ion/ion_iommu_heap.c b/drivers/gpu/ion/ion_iommu_heap.c
new file mode 100644 (file)
index 0000000..f0246cb
--- /dev/null
@@ -0,0 +1,350 @@
+/*
+ * drivers/gpu/ion/ion_iommu_heap.c
+ *
+ * Copyright (c) 2012, NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#define pr_fmt(fmt)    "%s(): " fmt, __func__
+
+#include <linux/spinlock.h>
+#include <linux/err.h>
+#include <linux/genalloc.h>
+#include <linux/io.h>
+#include <linux/ion.h>
+#include <linux/mm.h>
+#include <linux/platform_device.h>
+#include <linux/tegra_ion.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/iommu.h>
+#include <linux/highmem.h>
+
+#include <asm/page.h>
+#include <asm/cacheflush.h>
+
+#include "ion_priv.h"
+
+#define NUM_PAGES(buf) (PAGE_ALIGN((buf)->size) >> PAGE_SHIFT)
+
+#define GFP_ION                (GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN)
+
+struct ion_iommu_heap {
+       struct ion_heap         heap;
+       struct gen_pool         *pool;
+       struct iommu_domain     *domain;
+       struct device           *dev;
+};
+
+static struct scatterlist *iommu_heap_map_dma(struct ion_heap *heap,
+                                             struct ion_buffer *buf)
+{
+       struct ion_iommu_heap *h =
+               container_of(heap, struct ion_iommu_heap, heap);
+       int err, npages = NUM_PAGES(buf);
+       unsigned int i;
+       struct scatterlist *sg;
+       unsigned long da = (unsigned long)buf->priv_virt;
+
+       for_each_sg(buf->sglist, sg, npages, i) {
+               phys_addr_t pa;
+
+               pa = sg_phys(sg);
+               BUG_ON(!ALIGN(sg->length, PAGE_SIZE));
+               err = iommu_map(h->domain, da, pa, 0, 0);
+               if (err)
+                       goto err_out;
+
+               sg->dma_address = da;
+               da += PAGE_SIZE;
+       }
+
+       pr_debug("da:%p pa:%08x va:%p\n",
+                buf->priv_virt, sg_phys(buf->sglist), buf->vaddr);
+
+       return buf->sglist;
+
+err_out:
+       if (i-- > 0) {
+               unsigned int j;
+               for_each_sg(buf->sglist, sg, i, j)
+                       iommu_unmap(h->domain, sg_dma_address(sg), 0);
+       }
+       return ERR_PTR(err);
+}
+
+static void iommu_heap_unmap_dma(struct ion_heap *heap, struct ion_buffer *buf)
+{
+       struct ion_iommu_heap *h =
+               container_of(heap, struct ion_iommu_heap, heap);
+       unsigned int i;
+       struct scatterlist *sg;
+       int npages = NUM_PAGES(buf);
+
+       for_each_sg(buf->sglist, sg, npages, i)
+               iommu_unmap(h->domain, sg_dma_address(sg), 0);
+
+       pr_debug("da:%p\n", buf->priv_virt);
+}
+
+
+static int ion_buffer_allocate(struct ion_buffer *buf)
+{
+       int i, npages = NUM_PAGES(buf);
+
+       buf->pages = kmalloc(npages * sizeof(*buf->pages), GFP_KERNEL);
+       if (!buf->pages)
+               goto err_pages;
+
+       buf->sglist = vmalloc(npages * sizeof(*buf->sglist));
+       if (!buf->sglist)
+               goto err_sgl;
+
+       memset(buf->sglist, 0, npages * sizeof(*buf->sglist));
+       sg_init_table(buf->sglist, npages);
+
+       for (i = 0; i < npages; i++) {
+               struct page *page;
+               phys_addr_t pa;
+
+               page = alloc_page(GFP_ION);
+               if (!page)
+                       goto err_pgalloc;
+               pa = page_to_phys(page);
+
+               sg_set_page(&buf->sglist[i], page, PAGE_SIZE, 0);
+
+               flush_dcache_page(page);
+               outer_flush_range(pa, pa + PAGE_SIZE);
+
+               buf->pages[i] = page;
+
+               pr_debug_once("pa:%08x\n", pa);
+       }
+       return 0;
+
+err_pgalloc:
+       while (i-- > 0)
+               __free_page(buf->pages[i]);
+       vfree(buf->sglist);
+err_sgl:
+       kfree(buf->pages);
+err_pages:
+       return -ENOMEM;
+}
+
+static void ion_buffer_free(struct ion_buffer *buf)
+{
+       int i, npages = NUM_PAGES(buf);
+
+       for (i = 0; i < npages; i++)
+               __free_page(buf->pages[i]);
+       vfree(buf->sglist);
+       kfree(buf->pages);
+}
+
+static int iommu_heap_allocate(struct ion_heap *heap, struct ion_buffer *buf,
+                              unsigned long len, unsigned long align,
+                              unsigned long flags)
+{
+       int err;
+       struct ion_iommu_heap *h =
+               container_of(heap, struct ion_iommu_heap, heap);
+       unsigned long da;
+       struct scatterlist *sgl;
+
+       len = round_up(len, PAGE_SIZE);
+
+       da = gen_pool_alloc(h->pool, len);
+       if (!da) {
+               buf->priv_virt = (void *)ION_CARVEOUT_ALLOCATE_FAIL;
+               return -ENOMEM;
+       }
+       buf->priv_virt = (void *)da;
+       buf->size = len;
+
+       WARN_ON(!IS_ALIGNED(da, PAGE_SIZE));
+
+       err = ion_buffer_allocate(buf);
+       if (err)
+               goto err_alloc_buf;
+
+       sgl = iommu_heap_map_dma(heap, buf);
+       if (IS_ERR_OR_NULL(sgl))
+               goto err_heap_map_dma;
+       buf->vaddr = 0;
+       return 0;
+
+err_heap_map_dma:
+       ion_buffer_free(buf);
+err_alloc_buf:
+       gen_pool_free(h->pool, da, len);
+       buf->size = 0;
+       buf->pages = NULL;
+       buf->priv_virt = NULL;
+       return err;
+}
+
+static void iommu_heap_free(struct ion_buffer *buf)
+{
+       struct ion_heap *heap = buf->heap;
+       struct ion_iommu_heap *h =
+               container_of(heap, struct ion_iommu_heap, heap);
+       void *da = buf->priv_virt;
+
+       /*
+        * FIXME:
+        * Buf should not be in use.
+        * Forcibly remove iommu mappings, if any exists.
+        * Free physical pages here.
+        */
+
+       if (da == (void *)ION_CARVEOUT_ALLOCATE_FAIL)
+               return;
+
+       iommu_heap_unmap_dma(heap, buf);
+       ion_buffer_free(buf);
+       gen_pool_free(h->pool, (unsigned long)da, buf->size);
+
+       buf->pages = NULL;
+       buf->priv_virt = NULL;
+       pr_debug("da:%p\n", da);
+}
+
+static int iommu_heap_phys(struct ion_heap *heap, struct ion_buffer *buf,
+                          ion_phys_addr_t *addr, size_t *len)
+{
+       *addr = (unsigned long)buf->priv_virt;
+       *len = buf->size;
+       pr_debug("da:%08lx(%x)\n", *addr, *len);
+       return 0;
+}
+
+static void *iommu_heap_map_kernel(struct ion_heap *heap,
+                                  struct ion_buffer *buf)
+{
+       int npages = NUM_PAGES(buf);
+
+       BUG_ON(!buf->pages);
+       buf->vaddr = vm_map_ram(buf->pages, npages, -1,
+                               pgprot_noncached(pgprot_kernel));
+       pr_debug("va:%p\n", buf->vaddr);
+       WARN_ON(!buf->vaddr);
+       return buf->vaddr;
+}
+
+static void iommu_heap_unmap_kernel(struct ion_heap *heap,
+                                   struct ion_buffer *buf)
+{
+       int npages = NUM_PAGES(buf);
+
+       BUG_ON(!buf->pages);
+       WARN_ON(!buf->vaddr);
+       vm_unmap_ram(buf->vaddr, npages);
+       buf->vaddr = NULL;
+       pr_debug("va:%p\n", buf->vaddr);
+}
+
+static int iommu_heap_map_user(struct ion_heap *mapper,
+                              struct ion_buffer *buf,
+                              struct vm_area_struct *vma)
+{
+       int i = vma->vm_pgoff >> PAGE_SHIFT;
+       unsigned long uaddr = vma->vm_start;
+       unsigned long usize = vma->vm_end - vma->vm_start;
+
+       pr_debug("vma:%08lx-%08lx\n", vma->vm_start, vma->vm_end);
+       BUG_ON(!buf->pages);
+
+       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+       do {
+               int ret;
+               struct page *page = buf->pages[i++];
+
+               ret = vm_insert_page(vma, uaddr, page);
+               if (ret)
+                       return ret;
+
+               uaddr += PAGE_SIZE;
+               usize -= PAGE_SIZE;
+       } while (usize > 0);
+
+       return 0;
+}
+
+static struct ion_heap_ops iommu_heap_ops = {
+       .allocate       = iommu_heap_allocate,
+       .free           = iommu_heap_free,
+       .phys           = iommu_heap_phys,
+       .map_dma        = iommu_heap_map_dma,
+       .unmap_dma      = iommu_heap_unmap_dma,
+       .map_kernel     = iommu_heap_map_kernel,
+       .unmap_kernel   = iommu_heap_unmap_kernel,
+       .map_user       = iommu_heap_map_user,
+};
+
+struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *data)
+{
+       struct ion_iommu_heap *h;
+       int err;
+
+       h = kzalloc(sizeof(*h), GFP_KERNEL);
+       if (!h) {
+               err = -ENOMEM;
+               goto err_heap;
+       }
+
+       h->pool = gen_pool_create(12, -1);
+       if (!h->pool) {
+               err = -ENOMEM;
+               goto err_genpool;
+       }
+       gen_pool_add(h->pool, data->base, data->size, -1);
+
+       h->heap.ops = &iommu_heap_ops;
+       h->domain = iommu_domain_alloc();
+       h->dev = data->priv;
+       if (!h->domain) {
+               err = -ENOMEM;
+               goto err_iommu_alloc;
+       }
+
+       err = iommu_attach_device(h->domain, h->dev);
+       if (err)
+               goto err_iommu_attach;
+
+       return &h->heap;
+
+err_iommu_attach:
+       iommu_domain_free(h->domain);
+err_iommu_alloc:
+       gen_pool_destroy(h->pool);
+err_genpool:
+       kfree(h);
+err_heap:
+       return ERR_PTR(err);
+}
+
+void ion_iommu_heap_destroy(struct ion_heap *heap)
+{
+       struct ion_iommu_heap *h =
+               container_of(heap, struct  ion_iommu_heap, heap);
+
+       iommu_detach_device(h->domain, h->dev);
+       gen_pool_destroy(h->pool);
+       iommu_domain_free(h->domain);
+       kfree(h);
+}
index 8c75ff5..c8415b8 100644 (file)
@@ -145,7 +145,8 @@ void ion_handle_add(struct ion_client *client, struct ion_handle *handle);
  * @vaddr:             the kenrel mapping if kmap_cnt is not zero
  * @dmap_cnt:          number of times the buffer is mapped for dma
  * @sglist:            the scatterlist for the buffer is dmap_cnt is not zero
-*/
+ * @pages:             list for allocated pages for the buffer
+ */
 struct ion_buffer {
        struct kref ref;
        struct rb_node node;
@@ -162,6 +163,7 @@ struct ion_buffer {
        void *vaddr;
        int dmap_cnt;
        struct scatterlist *sglist;
+       struct page **pages;
 };
 
 /**
@@ -266,6 +268,18 @@ ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, unsigned long size,
                                      unsigned long align);
 void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
                       unsigned long size);
+#ifdef CONFIG_ION_IOMMU
+struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *);
+void ion_iommu_heap_destroy(struct ion_heap *);
+#else
+static inline struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *)
+{
+       return NULL;
+}
+static inline void ion_iommu_heap_destroy(struct ion_heap *)
+{
+}
+#endif
 /**
  * The carveout heap returns physical addresses, since 0 may be a valid
  * physical address, this is used to indicate allocation failed
index aed8349..9a32243 100644 (file)
@@ -33,6 +33,7 @@ enum ion_heap_type {
        ION_HEAP_TYPE_SYSTEM,
        ION_HEAP_TYPE_SYSTEM_CONTIG,
        ION_HEAP_TYPE_CARVEOUT,
+       ION_HEAP_TYPE_IOMMU,
        ION_HEAP_TYPE_CUSTOM, /* must be last so device specific heaps always
                                 are at the end of this enum */
        ION_NUM_HEAPS,
@@ -63,6 +64,7 @@ struct ion_buffer;
  * @name:      used for debug purposes
  * @base:      base address of heap in physical memory if applicable
  * @size:      size of the heap in bytes if applicable
+ * @priv:      heap specific data
  *
  * Provided by the board file.
  */
@@ -72,6 +74,7 @@ struct ion_platform_heap {
        const char *name;
        ion_phys_addr_t base;
        size_t size;
+       void *priv;
 };
 
 /**