video: tegra: nvmap: Add support for nvmap_kmap
Krishna Reddy [Wed, 17 Oct 2012 08:52:49 +0000 (11:52 +0300)]
Add support for mapping a single page from a buffer to kernel address
space.

Bug 1158533

Change-Id: Ie331e787663d98b644aa2e7f220020982f15fd9d
Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com>
Signed-off-by: Krishna Reddy <vdumpa@nvidia.com>
Reviewed-on: http://git-master/r/145503
Reviewed-by: Automatic_Commit_Validation_User
Reviewed-by: Ken Adams <kadams@nvidia.com>
Reviewed-by: Juha Tukkinen <jtukkinen@nvidia.com>

drivers/video/tegra/nvmap/nvmap.c
drivers/video/tegra/nvmap/nvmap.h
drivers/video/tegra/nvmap/nvmap_dev.c
drivers/video/tegra/nvmap/nvmap_dmabuf.c
include/linux/nvmap.h

index c78aad4..f088553 100644 (file)
@@ -446,6 +446,65 @@ void nvmap_unpin_handles(struct nvmap_client *client,
                wake_up(&client->share->pin_wait);
 }
 
+void *nvmap_kmap(struct nvmap_handle_ref *ref, unsigned int pagenum)
+{
+       struct nvmap_handle *h;
+       unsigned long paddr;
+       unsigned long kaddr;
+       pgprot_t prot;
+       pte_t **pte;
+
+       BUG_ON(!ref);
+       h = nvmap_handle_get(ref->handle);
+       if (!h)
+               return NULL;
+
+       BUG_ON(pagenum >= h->size >> PAGE_SHIFT);
+       prot = nvmap_pgprot(h, pgprot_kernel);
+       pte = nvmap_alloc_pte(nvmap_dev, (void **)&kaddr);
+       if (!pte)
+               goto out;
+
+       if (h->heap_pgalloc)
+               paddr = page_to_phys(h->pgalloc.pages[pagenum]);
+       else
+               paddr = h->carveout->base + pagenum * PAGE_SIZE;
+
+       set_pte_at(&init_mm, kaddr, *pte,
+                               pfn_pte(__phys_to_pfn(paddr), prot));
+       flush_tlb_kernel_page(kaddr);
+       return (void *)kaddr;
+out:
+       nvmap_handle_put(ref->handle);
+       return NULL;
+}
+
+void nvmap_kunmap(struct nvmap_handle_ref *ref, unsigned int pagenum,
+                 void *addr)
+{
+       struct nvmap_handle *h;
+       unsigned long paddr;
+       pte_t **pte;
+
+       BUG_ON(!addr || !ref);
+       h = ref->handle;
+
+       if (h->heap_pgalloc)
+               paddr = page_to_phys(h->pgalloc.pages[pagenum]);
+       else
+               paddr = h->carveout->base + pagenum * PAGE_SIZE;
+
+       if (h->flags != NVMAP_HANDLE_UNCACHEABLE &&
+           h->flags != NVMAP_HANDLE_WRITE_COMBINE) {
+               dmac_flush_range(addr, addr + PAGE_SIZE);
+               outer_flush_range(paddr, paddr + PAGE_SIZE);
+       }
+
+       pte = nvmap_vaddr_to_pte(nvmap_dev, (unsigned long)addr);
+       nvmap_free_pte(nvmap_dev, pte);
+       nvmap_handle_put(h);
+}
+
 void *nvmap_mmap(struct nvmap_handle_ref *ref)
 {
        struct nvmap_handle *h;
index a852ce3..dda58c8 100644 (file)
@@ -216,6 +216,8 @@ pte_t **nvmap_alloc_pte_irq(struct nvmap_device *dev, void **vaddr);
 
 void nvmap_free_pte(struct nvmap_device *dev, pte_t **pte);
 
+pte_t **nvmap_vaddr_to_pte(struct nvmap_device *dev, unsigned long vaddr);
+
 void nvmap_usecount_inc(struct nvmap_handle *h);
 void nvmap_usecount_dec(struct nvmap_handle *h);
 
index 78b01e0..39b7bd3 100644 (file)
@@ -205,6 +205,17 @@ void nvmap_free_pte(struct nvmap_device *dev, pte_t **pte)
        wake_up(&dev->pte_wait);
 }
 
+/* get pte for the virtual address */
+pte_t **nvmap_vaddr_to_pte(struct nvmap_device *dev, unsigned long vaddr)
+{
+       unsigned int bit;
+
+       BUG_ON(vaddr < (unsigned long)dev->vm_rgn->addr);
+       bit = (vaddr - (unsigned long)dev->vm_rgn->addr) >> PAGE_SHIFT;
+       BUG_ON(bit >= NVMAP_NUM_PTES);
+       return &(dev->ptes[bit]);
+}
+
 /* verifies that the handle ref value "ref" is a valid handle ref for the
  * file. caller must hold the file's ref_lock prior to calling this function */
 struct nvmap_handle_ref *_nvmap_validate_id_locked(struct nvmap_client *c,
index fcfb718..ba101b2 100644 (file)
@@ -134,15 +134,18 @@ static void nvmap_dmabuf_release(struct dma_buf *dmabuf)
 static void *nvmap_dmabuf_kmap(struct dma_buf *dmabuf, unsigned long page_num)
 {
        struct nvmap_handle_info *info = dmabuf->priv;
-       unsigned long allpages = PAGE_ALIGN(dmabuf->size) >> PAGE_SHIFT;
 
-       if (page_num != allpages) {
-               pr_err("%s() doesn't support partial kmap\n", __func__);
-               return NULL;
-       }
+       pr_debug("%s(%08x)\n", __func__, info->id);
+       return nvmap_kmap(info->ref, page_num);
+}
+
+static void nvmap_dmabuf_kunmap(struct dma_buf *dmabuf,
+               unsigned long page_num, void *addr)
+{
+       struct nvmap_handle_info *info = dmabuf->priv;
 
        pr_debug("%s(%08x)\n", __func__, info->id);
-       return nvmap_mmap(info->ref);
+       nvmap_kunmap(info->ref, page_num, addr);
 }
 
 static void *nvmap_dmabuf_kmap_atomic(struct dma_buf *dmabuf,
@@ -182,6 +185,7 @@ static struct dma_buf_ops nvmap_dma_buf_ops = {
        .release        = nvmap_dmabuf_release,
        .kmap_atomic    = nvmap_dmabuf_kmap_atomic,
        .kmap           = nvmap_dmabuf_kmap,
+       .kunmap         = nvmap_dmabuf_kunmap,
        .mmap           = nvmap_dmabuf_mmap,
        .vmap           = nvmap_dmabuf_vmap,
        .vunmap         = nvmap_dmabuf_vunmap,
index 795a7fc..ca907db 100644 (file)
@@ -108,6 +108,11 @@ void *nvmap_mmap(struct nvmap_handle_ref *r);
 
 void nvmap_munmap(struct nvmap_handle_ref *r, void *addr);
 
+void *nvmap_kmap(struct nvmap_handle_ref *r, unsigned int pagenum);
+
+void nvmap_kunmap(struct nvmap_handle_ref *r, unsigned int pagenum,
+               void *addr);
+
 struct nvmap_client *nvmap_client_get_file(int fd);
 
 struct nvmap_client *nvmap_client_get(struct nvmap_client *client);