drivers: video: tegra: post merge integration
Ken Adams [Wed, 17 Oct 2012 21:01:07 +0000 (17:01 -0400)]
The largest part of this change is accounting for
changes in the memory manager API introduced in
nvhost.  Other minor fixes as necessary.

Change-Id: I3e689610c865ab9f88300a0b2ef17d5ca3ab1459
Signed-off-by: Ken Adams <kadams@nvidia.com>
Reviewed-on: http://git-master/r/145364
Reviewed-by: Jeff Smith <jsmith@nvidia.com>

13 files changed:
drivers/video/tegra/host/gk20a/channel_gk20a.c
drivers/video/tegra/host/gk20a/fifo_gk20a.c
drivers/video/tegra/host/gk20a/gr_gk20a.c
drivers/video/tegra/host/gk20a/mm_gk20a.c
drivers/video/tegra/host/gk20a/mm_gk20a.h
drivers/video/tegra/host/gk20a/pmu_gk20a.c
drivers/video/tegra/host/nvhost_as.c
drivers/video/tegra/host/nvhost_memmgr.c
drivers/video/tegra/host/nvhost_memmgr.h
drivers/video/tegra/host/nvmap.h
drivers/video/tegra/host/t124/t124.c
drivers/video/tegra/host/vic03/vic03.c
drivers/video/tegra/host/vic03/vic03.h

index adcd76b..d337b08 100644 (file)
@@ -22,8 +22,8 @@
 #include <linux/delay.h>
 #include <linux/highmem.h> /* need for nvmap.h*/
 #include <trace/events/nvhost.h>
+#include <linux/scatterlist.h>
 
-#include "../../nvmap/nvmap.h"
 
 #include "../dev.h"
 #include "../nvhost_as.h"
@@ -302,17 +302,18 @@ static int channel_gk20a_alloc_inst(struct gk20a *g,
 
        ch->inst_block.mem.ref =
                mem_op().alloc(memmgr, ram_in_alloc_size_v(),
-                           DEFAULT_NVMAP_ALLOC_ALIGNMENT,
-                           DEFAULT_NVMAP_ALLOC_FLAGS,
-                           NVMAP_HEAP_CARVEOUT_GENERIC);
+                           DEFAULT_ALLOC_ALIGNMENT,
+                           DEFAULT_ALLOC_FLAGS,
+                           0);
 
        if (IS_ERR(ch->inst_block.mem.ref)) {
                ch->inst_block.mem.ref = 0;
                goto clean_up;
        }
 
-       ch->inst_block.cpu_pa =
+       ch->inst_block.mem.sgt = 
                mem_op().pin(memmgr, ch->inst_block.mem.ref);
+       ch->inst_block.cpu_pa = sg_dma_address(ch->inst_block.mem.sgt->sgl);
 
        /* IS_ERR throws a warning here (expecting void *) */
        if (ch->inst_block.cpu_pa == -EINVAL ||
@@ -340,7 +341,7 @@ static void channel_gk20a_free_inst(struct gk20a *g,
 {
        struct mem_mgr *memmgr = mem_mgr_from_g(g);
 
-       mem_op().unpin(memmgr, ch->inst_block.mem.ref);
+       mem_op().unpin(memmgr, ch->inst_block.mem.ref, ch->inst_block.mem.sgt);
        mem_op().put(memmgr, ch->inst_block.mem.ref);
        memset(&ch->inst_block, 0, sizeof(struct inst_desc));
 }
@@ -386,8 +387,10 @@ static int channel_gk20a_update_runlist(struct channel_gk20a *c,
        nvhost_dbg_info("runlist_id : %d, switch to new buffer %p",
                runlist_id, runlist->mem[new_buf].ref);
 
-       runlist_pa = mem_op().pin(memmgr,
-                              runlist->mem[new_buf].ref);
+       runlist->mem[new_buf].sgt = mem_op().pin(memmgr,
+                                                runlist->mem[new_buf].ref);
+       runlist_pa = sg_dma_address(runlist->mem[new_buf].sgt->sgl);
+
        if (!runlist_pa) {
                ret = -ENOMEM;
                goto clean_up;
@@ -439,9 +442,9 @@ static int channel_gk20a_update_runlist(struct channel_gk20a *c,
 
 clean_up:
        if (ret != 0)
-               mem_op().unpin(memmgr, runlist->mem[new_buf].ref);
+               mem_op().unpin(memmgr, runlist->mem[new_buf].ref, runlist->mem[new_buf].sgt);
        else
-               mem_op().unpin(memmgr, runlist->mem[old_buf].ref);
+               mem_op().unpin(memmgr, runlist->mem[old_buf].ref, runlist->mem[old_buf].sgt);
 
        mem_op().munmap(runlist->mem[new_buf].ref,
                     runlist_entry_base);
@@ -631,9 +634,9 @@ static int channel_gk20a_alloc_priv_cmdbuf(struct channel_gk20a *c)
        size = GK20A_PRIV_CMDBUF_ENTRY_NUM * sizeof(u32);
        q->mem.ref = mem_op().alloc(memmgr,
                        size,
-                       DEFAULT_NVMAP_ALLOC_ALIGNMENT,
-                       DEFAULT_NVMAP_ALLOC_FLAGS,
-                       NVMAP_HEAP_CARVEOUT_GENERIC);
+                       DEFAULT_ALLOC_ALIGNMENT,
+                       DEFAULT_ALLOC_FLAGS,
+                       0);
        if (IS_ERR_OR_NULL(q->mem.ref)) {
                nvhost_err(d, "ch %d : failed to allocate"
                           " priv cmd buffer(size: %d bytes)",
@@ -918,9 +921,9 @@ int gk20a_alloc_channel_gpfifo(struct channel_gk20a *c,
 
        c->gpfifo.mem.ref = mem_op().alloc(memmgr,
                        gpfifo_size * sizeof(struct gpfifo),
-                       DEFAULT_NVMAP_ALLOC_ALIGNMENT,
-                       DEFAULT_NVMAP_ALLOC_FLAGS,
-                       NVMAP_HEAP_CARVEOUT_GENERIC);
+                       DEFAULT_ALLOC_ALIGNMENT,
+                       DEFAULT_ALLOC_FLAGS,
+                       0);
        if (IS_ERR_OR_NULL(c->gpfifo.mem.ref)) {
                nvhost_err(d, "channel %d :"
                           " failed to allocate gpfifo (size: %d bytes)",
@@ -1192,10 +1195,11 @@ int gk20a_channel_map_buffer(struct channel_gk20a *ch,
                             struct nvhost_map_buffer_args *a)
 {
        struct mem_mgr *memmgr = gk20a_channel_mem_mgr(ch);
+       struct nvhost_device *dev = ch->ch->dev;
        u64 ret_va;
        struct mem_handle *r;
 
-       r = mem_op().get(memmgr, a->nvmap_handle); /*id, really*/
+       r = mem_op().get(memmgr, a->nvmap_handle, dev); /*id, really*/
 
        nvhost_dbg_info("id=0x%x r=%p", a->nvmap_handle, r);
 
@@ -1225,6 +1229,7 @@ int gk20a_channel_wait(struct channel_gk20a *ch,
                       struct nvhost_wait_args *args)
 {
        struct device *d = dev_from_gk20a(ch->g);
+       struct nvhost_device *dev = ch->ch->dev;
        struct mem_mgr *memmgr = gk20a_channel_mem_mgr(ch);
        struct mem_handle *handle_ref;
        struct notification *notif;
@@ -1245,7 +1250,7 @@ int gk20a_channel_wait(struct channel_gk20a *ch,
                id = args->condition.notifier.nvmap_handle;
                offset = args->condition.notifier.offset;
 
-               handle_ref = mem_op().get(memmgr, id);
+               handle_ref = mem_op().get(memmgr, id, dev);
                if (!handle_ref) {
                        nvhost_err(d, "invalid notifier nvmap handle 0x%08x",
                                   id);
index 7cc2914..7ce3cc6 100644 (file)
@@ -20,7 +20,8 @@
  */
 #include <linux/delay.h>
 #include <linux/slab.h>
-#include <linux/nvmap.h>
+#include <linux/scatterlist.h>
+/*#include <linux/nvmap.h>*/
 
 #include "../dev.h"
 #include "../nvhost_as.h"
@@ -132,7 +133,7 @@ static void gk20a_remove_fifo_support(struct fifo_gk20a *f)
        }
 
        mem_op().munmap(f->userd.mem.ref, f->userd.cpu_va);
-       mem_op().unpin(memmgr, f->userd.mem.ref);
+       mem_op().unpin(memmgr, f->userd.mem.ref, f->userd.mem.sgt);
        mem_op().put(memmgr, f->userd.mem.ref);
        memset(&f->userd, 0, sizeof(struct userd_desc));
 
@@ -177,9 +178,9 @@ static int fifo_gk20a_init_runlist(struct gk20a *g, struct fifo_gk20a *f)
                for (i = 0; i < MAX_RUNLIST_BUFFERS; i++) {
                        runlist->mem[i].ref =
                                mem_op().alloc(memmgr, runlist_size,
-                                           DEFAULT_NVMAP_ALLOC_ALIGNMENT,
-                                           DEFAULT_NVMAP_ALLOC_FLAGS,
-                                           NVMAP_HEAP_CARVEOUT_GENERIC);
+                                           DEFAULT_ALLOC_ALIGNMENT,
+                                           DEFAULT_ALLOC_FLAGS,
+                                           0); /*0);*/
                        if (!runlist->mem[i].ref)
                                goto clean_up;
                        runlist->mem[i].size = runlist_size;
@@ -305,9 +306,9 @@ static int gk20a_init_fifo_setup_sw(struct gk20a *g, bool reinit)
        f->userd_total_size = f->userd_entry_size * f->num_channels;
 
        f->userd.mem.ref = mem_op().alloc(memmgr, f->userd_total_size,
-                                      DEFAULT_NVMAP_ALLOC_ALIGNMENT,
-                                      DEFAULT_NVMAP_ALLOC_FLAGS,
-                                      NVMAP_HEAP_CARVEOUT_GENERIC);
+                                      DEFAULT_ALLOC_ALIGNMENT,
+                                      DEFAULT_ALLOC_FLAGS,
+                                      0); /*, 0);*/
        if (IS_ERR_OR_NULL(f->userd.mem.ref)) {
                err = -ENOMEM;
                goto clean_up;
@@ -321,7 +322,8 @@ static int gk20a_init_fifo_setup_sw(struct gk20a *g, bool reinit)
                goto clean_up;
        }
 
-       f->userd.cpu_pa = mem_op().pin(memmgr, f->userd.mem.ref);
+       f->userd.mem.sgt = mem_op().pin(memmgr, f->userd.mem.ref);
+       f->userd.cpu_pa = sg_dma_address(f->userd.mem.sgt->sgl);
        nvhost_dbg_info("userd physical address : 0x%08x",
                   (u32)f->userd.cpu_pa);
 
@@ -382,7 +384,7 @@ static int gk20a_init_fifo_setup_sw(struct gk20a *g, bool reinit)
 clean_up:
        nvhost_dbg_fn("fail");
        mem_op().munmap(f->userd.mem.ref, f->userd.cpu_va);
-       mem_op().unpin(memmgr, f->userd.mem.ref);
+       mem_op().unpin(memmgr, f->userd.mem.ref, f->userd.mem.sgt);
        mem_op().put(memmgr, f->userd.mem.ref);
        memset(&f->userd, 0, sizeof(struct userd_desc));
 
index 7371c0c..ed0b1aa 100644 (file)
@@ -21,7 +21,7 @@
 
 #include <linux/delay.h>       /* for udelay */
 #include <linux/mm.h>          /* for totalram_pages */
-#include <linux/nvmap.h>
+#include <linux/scatterlist.h>
 
 #include "../dev.h"
 
@@ -1704,9 +1704,9 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
        nvhost_dbg_info("cb_buffer_size : %d", cb_buffer_size);
 
        mem = mem_op().alloc(memmgr, cb_buffer_size,
-                         DEFAULT_NVMAP_ALLOC_ALIGNMENT,
-                         DEFAULT_NVMAP_ALLOC_FLAGS,
-                         NVMAP_HEAP_CARVEOUT_GENERIC);
+                         DEFAULT_ALLOC_ALIGNMENT,
+                         DEFAULT_ALLOC_FLAGS,
+                         0);
        if (IS_ERR_OR_NULL(mem))
                goto clean_up;
 
@@ -1716,9 +1716,9 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
        nvhost_dbg_info("pagepool_buffer_size : %d", pagepool_buffer_size);
 
        mem = mem_op().alloc(memmgr, pagepool_buffer_size,
-                         DEFAULT_NVMAP_ALLOC_ALIGNMENT,
-                         DEFAULT_NVMAP_ALLOC_FLAGS,
-                         NVMAP_HEAP_CARVEOUT_GENERIC);
+                         DEFAULT_ALLOC_ALIGNMENT,
+                         DEFAULT_ALLOC_FLAGS,
+                         0);
        if (IS_ERR_OR_NULL(mem))
                goto clean_up;
 
@@ -1728,9 +1728,9 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
        nvhost_dbg_info("attr_buffer_size : %d", attr_buffer_size);
 
        mem = mem_op().alloc(memmgr, attr_buffer_size,
-                         DEFAULT_NVMAP_ALLOC_ALIGNMENT,
-                         DEFAULT_NVMAP_ALLOC_FLAGS,
-                         NVMAP_HEAP_CARVEOUT_GENERIC);
+                         DEFAULT_ALLOC_ALIGNMENT,
+                         DEFAULT_ALLOC_FLAGS,
+                         0);
        if (IS_ERR_OR_NULL(mem))
                goto clean_up;
 
@@ -1738,9 +1738,9 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
        gr->global_ctx_buffer[ATTRIBUTE].size = attr_buffer_size;
 
        mem = mem_op().alloc(memmgr, attr_buffer_size,
-                         DEFAULT_NVMAP_ALLOC_ALIGNMENT,
-                         DEFAULT_NVMAP_ALLOC_FLAGS,
-                         NVMAP_HEAP_CARVEOUT_GENERIC); /* TBD: use NVMAP_HEAP_CARVEOUT_VPR */
+                         DEFAULT_ALLOC_ALIGNMENT,
+                         DEFAULT_ALLOC_FLAGS,
+                         0); /* TBD: use NVMAP_HEAP_CARVEOUT_VPR */
        if (IS_ERR_OR_NULL(mem))
                goto clean_up;
 
@@ -1751,9 +1751,9 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
                   gr->ctx_vars.golden_image_size);
 
        mem = mem_op().alloc(memmgr, gr->ctx_vars.golden_image_size,
-                         DEFAULT_NVMAP_ALLOC_ALIGNMENT,
-                         DEFAULT_NVMAP_ALLOC_FLAGS,
-                         NVMAP_HEAP_CARVEOUT_GENERIC);
+                         DEFAULT_ALLOC_ALIGNMENT,
+                         DEFAULT_ALLOC_FLAGS,
+                         0);
        if (IS_ERR_OR_NULL(mem))
                goto clean_up;
 
@@ -1886,9 +1886,9 @@ static int gr_gk20a_alloc_channel_gr_ctx(struct gk20a *g,
 
        gr_ctx->mem.ref = mem_op().alloc(memmgr,
                                gr->ctx_vars.buffer_total_size,
-                               DEFAULT_NVMAP_ALLOC_ALIGNMENT,
-                               DEFAULT_NVMAP_ALLOC_FLAGS,
-                               NVMAP_HEAP_CARVEOUT_GENERIC);
+                               DEFAULT_ALLOC_ALIGNMENT,
+                               DEFAULT_ALLOC_FLAGS,
+                               0);
 
        if (IS_ERR(gr_ctx->mem.ref))
                return -ENOMEM;
@@ -1927,9 +1927,9 @@ static int gr_gk20a_alloc_channel_patch_ctx(struct gk20a *g,
        nvhost_dbg_fn("");
 
        patch_ctx->mem.ref = mem_op().alloc(memmgr, 128 * sizeof(u32),
-                               DEFAULT_NVMAP_ALLOC_ALIGNMENT,
-                               DEFAULT_NVMAP_ALLOC_FLAGS,
-                               NVMAP_HEAP_CARVEOUT_GENERIC);
+                               DEFAULT_ALLOC_ALIGNMENT,
+                               DEFAULT_ALLOC_FLAGS,
+                               0);
        if (IS_ERR(patch_ctx->mem.ref))
                return -ENOMEM;
 
@@ -2143,9 +2143,10 @@ static void gk20a_remove_gr_support(struct gk20a *g, struct gr_gk20a *gr)
 
        gr_gk20a_free_global_ctx_buffers(g);
 
-       mem_op().unpin(memmgr, gr->mmu_wr_mem.mem.ref);
-       mem_op().unpin(memmgr, gr->mmu_rd_mem.mem.ref);
-       mem_op().unpin(memmgr, gr->compbit_store.mem.ref);
+       mem_op().unpin(memmgr, gr->mmu_wr_mem.mem.ref, gr->mmu_wr_mem.mem.sgt);
+       mem_op().unpin(memmgr, gr->mmu_rd_mem.mem.ref, gr->mmu_rd_mem.mem.sgt);
+       mem_op().unpin(memmgr, gr->compbit_store.mem.ref,
+                      gr->compbit_store.mem.sgt);
        mem_op().put(memmgr, gr->mmu_wr_mem.mem.ref);
        mem_op().put(memmgr, gr->mmu_rd_mem.mem.ref);
        mem_op().put(memmgr, gr->compbit_store.mem.ref);
@@ -2374,17 +2375,17 @@ static int gr_gk20a_init_mmu_sw(struct gk20a *g, struct gr_gk20a *gr)
        gr->mmu_wr_mem_size = gr->mmu_rd_mem_size = 0x1000;
 
        gr->mmu_wr_mem.mem.ref = mem_op().alloc(memmgr, gr->mmu_wr_mem_size,
-                                            DEFAULT_NVMAP_ALLOC_ALIGNMENT,
-                                            DEFAULT_NVMAP_ALLOC_FLAGS,
-                                            NVMAP_HEAP_CARVEOUT_GENERIC);
+                                            DEFAULT_ALLOC_ALIGNMENT,
+                                            DEFAULT_ALLOC_FLAGS,
+                                            0);
        if (!gr->mmu_wr_mem.mem.ref)
                goto clean_up;
        gr->mmu_wr_mem.mem.size = gr->mmu_wr_mem_size;
 
        gr->mmu_rd_mem.mem.ref = mem_op().alloc(memmgr, gr->mmu_rd_mem_size,
-                                            DEFAULT_NVMAP_ALLOC_ALIGNMENT,
-                                            DEFAULT_NVMAP_ALLOC_FLAGS,
-                                            NVMAP_HEAP_CARVEOUT_GENERIC);
+                                            DEFAULT_ALLOC_ALIGNMENT,
+                                            DEFAULT_ALLOC_FLAGS,
+                                            0);
        if (!gr->mmu_rd_mem.mem.ref)
                goto clean_up;
        gr->mmu_rd_mem.mem.size = gr->mmu_rd_mem_size;
@@ -2401,14 +2402,15 @@ static int gr_gk20a_init_mmu_sw(struct gk20a *g, struct gr_gk20a *gr)
        memset(mmu_ptr, 0, gr->mmu_rd_mem.mem.size);
        mem_op().munmap(gr->mmu_rd_mem.mem.ref, mmu_ptr);
 
-       gr->mmu_wr_mem.cpu_pa = mem_op().pin(memmgr, gr->mmu_wr_mem.mem.ref);
-       if (gr->mmu_wr_mem.cpu_pa == -EINVAL || gr->mmu_wr_mem.cpu_pa == -EINTR)
+       gr->mmu_wr_mem.mem.sgt = mem_op().pin(memmgr, gr->mmu_wr_mem.mem.ref); 
+       if (IS_ERR_OR_NULL(gr->mmu_wr_mem.mem.sgt))
                goto clean_up;
+       gr->mmu_wr_mem.cpu_pa = sg_dma_address(gr->mmu_wr_mem.mem.sgt->sgl);
 
-       gr->mmu_rd_mem.cpu_pa = mem_op().pin(memmgr, gr->mmu_rd_mem.mem.ref);
-       if (gr->mmu_rd_mem.cpu_pa == -EINVAL || gr->mmu_rd_mem.cpu_pa == -EINTR)
+       gr->mmu_rd_mem.mem.sgt = mem_op().pin(memmgr, gr->mmu_rd_mem.mem.ref); 
+       if (IS_ERR_OR_NULL(gr->mmu_rd_mem.mem.sgt))
                goto clean_up;
-
+       gr->mmu_rd_mem.cpu_pa = sg_dma_address(gr->mmu_rd_mem.mem.sgt->sgl);
        return 0;
 
 clean_up:
@@ -2654,9 +2656,9 @@ static int gr_gk20a_init_comptag(struct gk20a *g, struct gr_gk20a *gr)
 
        gr->compbit_store.mem.ref =
                mem_op().alloc(memmgr, compbit_backing_size,
-                           DEFAULT_NVMAP_ALLOC_ALIGNMENT,
-                           DEFAULT_NVMAP_ALLOC_FLAGS,
-                           NVMAP_HEAP_CARVEOUT_GENERIC);
+                           DEFAULT_ALLOC_ALIGNMENT,
+                           DEFAULT_ALLOC_FLAGS,
+                           0);
        if (IS_ERR_OR_NULL(gr->compbit_store.mem.ref)) {
                nvhost_err(dev_from_gk20a(g), "failed to allocate"
                           "backing store for compbit : size %d",
@@ -2665,13 +2667,14 @@ static int gr_gk20a_init_comptag(struct gk20a *g, struct gr_gk20a *gr)
        }
        gr->compbit_store.mem.size = compbit_backing_size;
 
-       gr->compbit_store.base_pa =
+       gr->compbit_store.mem.sgt =
                mem_op().pin(memmgr, gr->compbit_store.mem.ref);
-       if (gr->compbit_store.base_pa == -EINVAL ||
-           gr->compbit_store.base_pa == -EINTR) {
+       if (IS_ERR_OR_NULL(gr->compbit_store.mem.sgt)) {
                ret = -ENOMEM;
                goto clean_up;
        }
+       gr->compbit_store.base_pa =
+               sg_dma_address(gr->compbit_store.mem.sgt->sgl);
 
        nvhost_allocator_init(&gr->comp_tags, "comptag",
                        1, max_comptag_lines, 1);
index 1774cd3..9182ec2 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/highmem.h>
 #include <linux/log2.h>
 #include <linux/nvhost.h>
+#include <linux/scatterlist.h>
 #include <linux/nvmap.h>
 
 #include "../../nvmap/nvmap.h"
@@ -180,7 +181,8 @@ int gk20a_init_mm_support(struct gk20a *g, bool reinit)
 }
 
 #ifdef CONFIG_TEGRA_SIMULATION_SPLIT_MEM
-static int alloc_gmmu_nvmap_pages(struct vm_gk20a *vm, u32 order, phys_addr_t *pa, void **handle)
+static int alloc_gmmu_nvmap_pages(struct vm_gk20a *vm, u32 order, phys_addr_t *pa, void **handle,
+                                 struct sg_table **sgt)
 {
        struct mem_mgr *client = mem_mgr_from_vm(vm);
        struct mem_handle *r;
@@ -192,9 +194,9 @@ static int alloc_gmmu_nvmap_pages(struct vm_gk20a *vm, u32 order, phys_addr_t *p
        nvhost_dbg_fn("");
 
        r = mem_op().alloc(client, len,
-                       DEFAULT_NVMAP_ALLOC_ALIGNMENT,
-                       DEFAULT_NVMAP_ALLOC_FLAGS,
-                       NVMAP_HEAP_CARVEOUT_GENERIC);
+                       DEFAULT_ALLOC_ALIGNMENT,
+                       DEFAULT_ALLOC_FLAGS,
+                       0);
        if (IS_ERR_OR_NULL(r)) {
                nvhost_dbg(dbg_pte, "nvmap_alloc failed\n");
                goto err_out;
@@ -204,12 +206,12 @@ static int alloc_gmmu_nvmap_pages(struct vm_gk20a *vm, u32 order, phys_addr_t *p
                nvhost_dbg(dbg_pte, "nvmap_mmap failed\n");
                goto err_alloced;
        }
-       phys = mem_op().pin(client, r);
-       if (IS_ERR_OR_NULL((void *)pa)) {
+       *sgt =  mem_op().pin(client, r);
+       if (IS_ERR_OR_NULL(*sgt)) {
                nvhost_dbg(dbg_pte, "nvmap_pin failed\n");
                goto err_alloced;
        }
-
+       phys = sg_dma_address((*sgt)->sgl);
        memset(va, 0, len);
        mem_op().munmap(r, va);
        *pa = phys;
@@ -224,7 +226,8 @@ err_out:
 }
 #endif
 
-static int alloc_gmmu_sysmem_pages(struct vm_gk20a *vm, u32 order, phys_addr_t *pa, void **handle)
+static int alloc_gmmu_sysmem_pages(struct vm_gk20a *vm, u32 order, phys_addr_t *pa,
+                                  void **handle, struct sg_table **sgt)
 {
        struct page *pte_page;
 
@@ -235,21 +238,23 @@ static int alloc_gmmu_sysmem_pages(struct vm_gk20a *vm, u32 order, phys_addr_t *
                return -ENOMEM;
 
        *pa = page_to_phys(pte_page);
+       *sgt = 0;
        *handle = pte_page;
        return 0;
 }
 
-static int alloc_gmmu_pages(struct vm_gk20a *vm, u32 order, phys_addr_t *pa, void **handle)
+static int alloc_gmmu_pages(struct vm_gk20a *vm, u32 order, phys_addr_t *pa,
+                           void **handle, struct sg_table **sgt)
 {
 
        nvhost_dbg_fn("");
 
 #ifdef CONFIG_TEGRA_SIMULATION_SPLIT_MEM
        if (tegra_split_mem_active())
-               return alloc_gmmu_nvmap_pages(vm, order, pa, handle);
+               return alloc_gmmu_nvmap_pages(vm, order, pa, handle, sgt);
        else
 #endif
-               return alloc_gmmu_sysmem_pages(vm, order, pa, handle);
+               return alloc_gmmu_sysmem_pages(vm, order, pa, handle, sgt);
 }
 
 static void free_gmmu_pages(struct vm_gk20a *vm, void *handle, u32 order)
@@ -325,6 +330,7 @@ static int zalloc_gmmu_page_table_gk20a(struct vm_gk20a *vm,
        u32 pte_order;
        phys_addr_t phys;
        void *handle;
+       struct sg_table *sgt;
 
        nvhost_dbg_fn("");
 
@@ -332,7 +338,7 @@ static int zalloc_gmmu_page_table_gk20a(struct vm_gk20a *vm,
        page_size_idx = gmmu_page_size_idx(gmmu_page_size);
        pte_order = vm->mm->page_table_sizing[page_size_idx].order;
 
-       err = alloc_gmmu_pages(vm, pte_order, &phys, &handle);
+       err = alloc_gmmu_pages(vm, pte_order, &phys, &handle, &sgt);
        if (err)
                return err;
 
@@ -340,6 +346,7 @@ static int zalloc_gmmu_page_table_gk20a(struct vm_gk20a *vm,
 
        *pa = phys;
        pte->ref = handle;
+       pte->sgt = sgt;
        pte->page_size_idx = page_size_idx;
 
        return 0;
@@ -724,6 +731,7 @@ static u64 gk20a_vm_map(struct vm_gk20a *vm,
        struct nvhost_allocator *ctag_allocator = &g->gr.comp_tags;
        struct device *d = &g->dev->dev;
        struct mapped_buffer_node *mapped_buffer = 0;
+       struct sg_table *sgt;
        bool inserted = false, va_allocated = false;
        u32 gmmu_page_size = 0;
        u64 map_offset = 0;
@@ -817,7 +825,12 @@ static u64 gk20a_vm_map(struct vm_gk20a *vm,
        }
 
        /* pin buffer to get phys/iovmm addr */
-       bfr.addr = mem_op().pin(memmgr, r);
+       sgt = mem_op().pin(memmgr, r);
+       if (IS_ERR_OR_NULL(sgt)) {
+               nvhost_warn(d, "oom allocating tracking buffer");
+               goto clean_up;
+       }
+       bfr.addr = sg_dma_address(sgt->sgl);
 
        nvhost_dbg_info("nvmap pinned buffer @ 0x%x", bfr.addr);
        nvhost_dbg_fn("r=%p, map_offset=0x%llx, contig=%d "
@@ -837,6 +850,7 @@ static u64 gk20a_vm_map(struct vm_gk20a *vm,
        }
        mapped_buffer->memmgr     = memmgr;
        mapped_buffer->handle_ref = r;
+       mapped_buffer->sgt        = sgt;
        mapped_buffer->addr       = map_offset;
        mapped_buffer->size       = bfr.size;
        mapped_buffer->page_size  = gmmu_page_size;
@@ -1091,7 +1105,8 @@ static void gk20a_channel_vm_unmap(struct vm_gk20a *vm,
        }
 
        mem_op().unpin(mapped_buffer->memmgr,
-                   mapped_buffer->handle_ref);
+                      mapped_buffer->handle_ref,
+                      mapped_buffer->sgt);
 
        /* remove from mapped buffer tree, free */
        rb_erase(&mapped_buffer->node, &vm->mapped_buffers);
@@ -1152,7 +1167,7 @@ static int gk20a_as_alloc_share(struct nvhost_as_share *as_share)
                   vm->va_limit, vm->pdes.num_pdes);
 
        /* allocate the page table directory */
-       err = alloc_gmmu_pages(vm, 0, &vm->pdes.phys, &vm->pdes.ref);
+       err = alloc_gmmu_pages(vm, 0, &vm->pdes.phys, &vm->pdes.ref, &vm->pdes.sgt);
        if (err) {
                return -ENOMEM;
        }
@@ -1408,7 +1423,7 @@ int gk20a_init_bar1_vm(struct mm_gk20a *mm)
 
 
        /* allocate the page table directory */
-       err = alloc_gmmu_pages(vm, 0, &vm->pdes.phys, &vm->pdes.ref);
+       err = alloc_gmmu_pages(vm, 0, &vm->pdes.phys, &vm->pdes.ref, &vm->pdes.sgt);
        if (err)
                goto clean_up;
 
@@ -1431,9 +1446,9 @@ int gk20a_init_bar1_vm(struct mm_gk20a *mm)
        inst_block->mem.size = ram_in_alloc_size_v();
        inst_block->mem.ref =
                mem_op().alloc(nvmap, inst_block->mem.size,
-                           DEFAULT_NVMAP_ALLOC_ALIGNMENT,
-                           DEFAULT_NVMAP_ALLOC_FLAGS,
-                           NVMAP_HEAP_CARVEOUT_GENERIC);
+                           DEFAULT_ALLOC_ALIGNMENT,
+                           DEFAULT_ALLOC_FLAGS,
+                           0);
 
        if (IS_ERR(inst_block->mem.ref)) {
                inst_block->mem.ref = 0;
@@ -1441,15 +1456,15 @@ int gk20a_init_bar1_vm(struct mm_gk20a *mm)
                goto clean_up;
        }
 
-       inst_block->cpu_pa = inst_pa =
-               mem_op().pin(nvmap, inst_block->mem.ref);
-
+       inst_block->mem.sgt = mem_op().pin(nvmap, inst_block->mem.ref);
        /* IS_ERR throws a warning here (expecting void *) */
-       if (inst_pa == -EINVAL || inst_pa == -EINTR) {
+       if (IS_ERR_OR_NULL(inst_block->mem.sgt)) {
                inst_pa = 0;
                err = (int)inst_pa;
                goto clean_up;
        }
+       inst_block->cpu_pa = inst_pa = sg_dma_address(inst_block->mem.sgt->sgl);
+
        inst_ptr = mem_op().mmap(inst_block->mem.ref);
        if (IS_ERR(inst_ptr)) {
                return -ENOMEM;
@@ -1535,7 +1550,7 @@ int gk20a_init_pmu_vm(struct mm_gk20a *mm)
                   vm->va_limit, vm->pdes.num_pdes);
 
        /* allocate the page table directory */
-       err = alloc_gmmu_pages(vm, 0, &vm->pdes.phys, &vm->pdes.ref);
+       err = alloc_gmmu_pages(vm, 0, &vm->pdes.phys, &vm->pdes.ref, &vm->pdes.sgt);
        if (err)
                goto clean_up;
 
@@ -1558,9 +1573,9 @@ int gk20a_init_pmu_vm(struct mm_gk20a *mm)
        inst_block->mem.size = GK20A_PMU_INST_SIZE;
        inst_block->mem.ref =
                mem_op().alloc(nvmap, inst_block->mem.size,
-                           DEFAULT_NVMAP_ALLOC_ALIGNMENT,
-                           DEFAULT_NVMAP_ALLOC_FLAGS,
-                           NVMAP_HEAP_CARVEOUT_GENERIC);
+                           DEFAULT_ALLOC_ALIGNMENT,
+                           DEFAULT_ALLOC_FLAGS,
+                           0);
 
        if (IS_ERR(inst_block->mem.ref)) {
                inst_block->mem.ref = 0;
@@ -1568,15 +1583,16 @@ int gk20a_init_pmu_vm(struct mm_gk20a *mm)
                goto clean_up;
        }
 
-       inst_block->cpu_pa = inst_pa =
-               mem_op().pin(nvmap, inst_block->mem.ref);
-
+       inst_block->mem.sgt = mem_op().pin(nvmap, inst_block->mem.ref);
        /* IS_ERR throws a warning here (expecting void *) */
-       if (inst_pa == -EINVAL || inst_pa == -EINTR) {
+       if (IS_ERR_OR_NULL(inst_block->mem.sgt)) {
                inst_pa = 0;
-               err = (int)inst_pa;
+               err = (int)inst_block->mem.sgt;
                goto clean_up;
        }
+       inst_block->cpu_pa = inst_pa =
+               sg_dma_address(inst_block->mem.sgt->sgl);
+
        nvhost_dbg_info("pmu inst block physical addr: 0x%08x",
                   inst_pa);
 
@@ -1801,7 +1817,8 @@ void gk20a_mm_dump_vm(struct vm_gk20a *vm,
 #ifdef CONFIG_TEGRA_SIMULATION_SPLIT_MEM
                if (tegra_split_mem_active()) {
                        err = map_gmmu_pages(pte_s->ref, &pte);
-                       pte_addr = mem_op().pin(client, pte_s->ref);
+                       pte_s->sgt = mem_op().pin(client, pte_s->ref);
+                       pte_addr = sg_dma_address(pte_s->sgt->sgl);
                } else
 #endif
                {
@@ -1860,7 +1877,7 @@ void gk20a_mm_dump_vm(struct vm_gk20a *vm,
 #ifdef CONFIG_TEGRA_SIMULATION_SPLIT_MEM
                if (tegra_split_mem_active()) {
                        unmap_gmmu_pages(pte_s->ref, pte);
-                       mem_op().unpin(client, pte_s->ref);
+                       mem_op().unpin(client, pte_s->ref, pte_s->sgt);
                } else
 #endif
                        unmap_gmmu_pages(
index ed59aef..fdf53dd 100644 (file)
@@ -25,6 +25,7 @@
 
 struct mem_desc {
        struct mem_handle *ref;
+       struct sg_table *sgt;
        u32 size;
 };
 
@@ -106,6 +107,7 @@ struct page_table_gk20a {
        void *ref;
        /* track mapping cnt on this page table */
        u32 ref_cnt;
+       struct sg_table *sgt;
        /* 4k or 128k */
        u32 page_size_idx;
 };
@@ -118,7 +120,7 @@ struct page_directory_gk20a {
        /* Either a *page or a *mem_handle */
        void *ref;
        bool dirty;
-
+       struct sg_table *sgt;
        struct page_table_gk20a *ptes;
 };
 
@@ -128,6 +130,7 @@ struct mapped_buffer_node {
        u64 size;
        struct mem_mgr *memmgr;
        struct mem_handle *handle_ref;
+       struct sg_table *sgt;
        u32 page_size;
        u32 ctag_offset;
        u32 ctag_lines;
@@ -213,8 +216,8 @@ int gk20a_mm_init(struct mm_gk20a *mm);
 #define mem_mgr_from_vm(vm) (gk20a_from_vm(vm)->host->memmgr)
 #define dev_from_vm(vm) dev_from_gk20a(vm->mm->g)
 
-#define DEFAULT_NVMAP_ALLOC_FLAGS (NVMAP_HANDLE_UNCACHEABLE)
-#define DEFAULT_NVMAP_ALLOC_ALIGNMENT (4*1024)
+#define DEFAULT_ALLOC_FLAGS (mem_mgr_flag_uncacheable)
+#define DEFAULT_ALLOC_ALIGNMENT (4*1024)
 
 static inline int bar1_aperture_size_mb_gk20a(void)
 {
index fa3a289..4af7e8e 100644 (file)
@@ -25,6 +25,7 @@
 
 #include "../dev.h"
 #include "../bus_client.h"
+#include "nvhost_memmgr.h"
 
 #include "gk20a.h"
 #include "hw_mc_gk20a.h"
@@ -984,9 +985,9 @@ int gk20a_init_pmu_setup_sw(struct gk20a *g, bool reinit)
 
        pmu->ucode.mem.ref = mem_op().alloc(memmgr,
                        GK20A_PMU_UCODE_SIZE_MAX,
-                       DEFAULT_NVMAP_ALLOC_ALIGNMENT,
-                       DEFAULT_NVMAP_ALLOC_FLAGS,
-                       NVMAP_HEAP_CARVEOUT_GENERIC);
+                       DEFAULT_ALLOC_ALIGNMENT,
+                       DEFAULT_ALLOC_FLAGS,
+                       0);
        if (IS_ERR_OR_NULL(pmu->ucode.mem.ref)) {
                err = -ENOMEM;
                goto clean_up;
@@ -1011,9 +1012,9 @@ int gk20a_init_pmu_setup_sw(struct gk20a *g, bool reinit)
        }
 
        pmu->pg_buf.mem.ref = mem_op().alloc(memmgr, size,
-                               DEFAULT_NVMAP_ALLOC_ALIGNMENT, /* TBD: 256 bytes alignment is sufficient */
-                               DEFAULT_NVMAP_ALLOC_FLAGS,
-                               NVMAP_HEAP_CARVEOUT_GENERIC);
+                               DEFAULT_ALLOC_ALIGNMENT, /* TBD: 256 bytes alignment is sufficient */
+                               DEFAULT_ALLOC_FLAGS,
+                               0);
        if (IS_ERR_OR_NULL(pmu->pg_buf.mem.ref)) {
                nvhost_err(dev_from_gk20a(g),
                        "fail to allocate fecs pg buffer");
@@ -1032,9 +1033,9 @@ int gk20a_init_pmu_setup_sw(struct gk20a *g, bool reinit)
        }
 
        pmu->seq_buf.mem.ref = mem_op().alloc(memmgr, 4096,
-                               DEFAULT_NVMAP_ALLOC_ALIGNMENT,
-                               DEFAULT_NVMAP_ALLOC_FLAGS,
-                               NVMAP_HEAP_CARVEOUT_GENERIC);
+                               DEFAULT_ALLOC_ALIGNMENT,
+                               DEFAULT_ALLOC_FLAGS,
+                               0);
        if (IS_ERR_OR_NULL(pmu->seq_buf.mem.ref)) {
                nvhost_err(dev_from_gk20a(g),
                        "fail to allocate zbc buffer");
index 1f5824c..c8af653 100644 (file)
@@ -385,7 +385,7 @@ int nvhost_as_ioctl_map_buffer(struct nvhost_as_share *as_share,
                return err;
        }
 
-       r = mem_op().get(memmgr, args->nvmap_handle);
+       r = mem_op().get(memmgr, args->nvmap_handle, /*XXX:get device*/0);
        if (!r) {
                err = -EINVAL;
                goto finish;
index d3fc164..617a56f 100644 (file)
@@ -87,11 +87,11 @@ struct mem_mgr *nvhost_memmgr_get_mgr_file(int fd)
 }
 
 struct mem_handle *nvhost_memmgr_alloc(struct mem_mgr *mgr,
-               size_t size, size_t align, int flags)
+       size_t size, size_t align, int flags, unsigned int heap_mask)
 {
        struct mem_handle *h = NULL;
 #ifdef CONFIG_TEGRA_GRHOST_USE_NVMAP
-       h = nvhost_nvmap_alloc(mgr, size, align, flags);
+       h = nvhost_nvmap_alloc(mgr, size, align, flags, heap_mask);
 #else
 #ifdef CONFIG_TEGRA_GRHOST_USE_DMABUF
        h = nvhost_dmabuf_alloc(mgr, size, align, flags);
index e717b5a..27e996b 100644 (file)
@@ -51,7 +51,7 @@ struct mem_mgr *nvhost_memmgr_get_mgr(struct mem_mgr *);
 struct mem_mgr *nvhost_memmgr_get_mgr_file(int fd);
 struct mem_handle *nvhost_memmgr_alloc(struct mem_mgr *,
                size_t size, size_t align,
-               int flags);
+               int flags, unsigned int heap_mask);
 struct mem_handle *nvhost_memmgr_get(struct mem_mgr *,
                u32 id, struct platform_device *dev);
 void nvhost_memmgr_put(struct mem_mgr *mgr, struct mem_handle *handle);
index bad6aaa..1da6926 100644 (file)
@@ -32,7 +32,7 @@ void nvhost_nvmap_put_mgr(struct mem_mgr *mgr);
 struct mem_mgr *nvhost_nvmap_get_mgr(struct mem_mgr *mgr);
 struct mem_mgr *nvhost_nvmap_get_mgr_file(int fd);
 struct mem_handle *nvhost_nvmap_alloc(struct mem_mgr *mgr,
-               size_t size, size_t align, int flags);
+               size_t size, size_t align, int flags, unsigned int heap_flags);
 void nvhost_nvmap_put(struct mem_mgr *mgr, struct mem_handle *handle);
 struct sg_table *nvhost_nvmap_pin(struct mem_mgr *mgr,
                struct mem_handle *handle);
index fc4a3ec..2e75c98 100644 (file)
@@ -265,14 +265,6 @@ static int t124_channel_submit(struct nvhost_job *job)
                return host1x_channel_submit(job);
 }
 
-static int t124_channel_read_3d_reg(struct nvhost_channel *channel,
-                       struct nvhost_hwctx *hwctx,
-                       u32 offset,
-                       u32 *value)
-{
-       return -EPERM;
-}
-
 #if defined(CONFIG_TEGRA_GK20A)
 static int t124_channel_alloc_obj(struct nvhost_hwctx *hwctx,
                                 struct nvhost_alloc_obj_ctx_args *args)
@@ -411,7 +403,6 @@ int nvhost_init_t124_channel_support(struct nvhost_master *host,
 
        op->channel.init          = t124_channel_init;
        op->channel.submit        = t124_channel_submit;
-       op->channel.read3dreg     = t124_channel_read_3d_reg;
 
 #if defined(CONFIG_TEGRA_GK20A)
        op->channel.alloc_obj     = t124_channel_alloc_obj;
index fca6ae0..1186a30 100644 (file)
@@ -22,6 +22,7 @@
 #include <asm/byteorder.h>      /* for parsing ucode image wrt endianness */
 #include <linux/delay.h>       /* for udelay */
 #include <linux/export.h>
+#include <linux/scatterlist.h>
 #include <linux/nvmap.h>
 
 #include "dev.h"
@@ -241,8 +242,8 @@ static int vic03_read_ucode(struct nvhost_device *dev)
        /* allocate pages for ucode */
        v->ucode.mem_r = mem_op().alloc(nvmap_c,
                                     roundup(ucode_fw->size, PAGE_SIZE),
-                                    PAGE_SIZE, NVMAP_HANDLE_UNCACHEABLE,
-                                    NVMAP_HEAP_CARVEOUT_GENERIC);
+                                    PAGE_SIZE, mem_mgr_flag_uncacheable,
+                                    0);
        if (IS_ERR_OR_NULL(v->ucode.mem_r)) {
                nvhost_dbg_fn("nvmap alloc failed");
                err = -ENOMEM;
@@ -357,11 +358,12 @@ void nvhost_vic03_init(struct nvhost_device *dev)
                return;
        }
 
-       v->ucode.pa = mem_op().pin(v->host->memmgr, v->ucode.mem_r);
-       if (v->ucode.pa == -EINVAL || v->ucode.pa == -EINTR) {
+       v->ucode.sgt = mem_op().pin(v->host->memmgr, v->ucode.mem_r);
+       if (IS_ERR_OR_NULL(v->ucode.sgt)){
                nvhost_err(&dev->dev, "nvmap pin failed for ucode");
                goto clean_up;
        }
+       v->ucode.pa = sg_dma_address(v->ucode.sgt->sgl);
 
        err = vic03_boot(dev);
 
@@ -372,7 +374,8 @@ void nvhost_vic03_init(struct nvhost_device *dev)
 
  clean_up:
        nvhost_err(&dev->dev, "failed");
-       mem_op().unpin(nvhost_get_host(dev)->memmgr, v->ucode.mem_r);
+       mem_op().unpin(nvhost_get_host(dev)->memmgr, v->ucode.mem_r,
+                      v->ucode.sgt);
        return /*err*/;
 
 
@@ -384,7 +387,7 @@ void nvhost_vic03_deinit(struct nvhost_device *dev)
        struct vic03 *v = get_vic03(dev);
        /* unpin, free ucode memory */
        if (v->ucode.mem_r) {
-               mem_op().unpin(v->host->memmgr, v->ucode.mem_r);
+               mem_op().unpin(v->host->memmgr, v->ucode.mem_r, v->ucode.sgt);
                mem_op().put(v->host->memmgr, v->ucode.mem_r);
                v->ucode.mem_r = 0;
        }
@@ -421,16 +424,16 @@ static struct nvhost_hwctx *vic03_alloc_hwctx(struct nvhost_hwctx_handler *h,
 
        ctx->restore = mem_op().alloc(nvmap,
                                   nvhost_vic03_restore_size * 4, 32,
-                                  map_restore ? NVMAP_HANDLE_WRITE_COMBINE
-                                  : NVMAP_HANDLE_UNCACHEABLE,
-                                  NVMAP_HEAP_CARVEOUT_GENERIC);
+                                  map_restore ? mem_mgr_flag_write_combine
+                                     : mem_mgr_flag_uncacheable,
+                                  0);
        if (IS_ERR_OR_NULL(ctx->restore))
-               goto fail;
+               goto fail_alloc;
 
        if (map_restore) {
                ctx->restore_virt = mem_op().mmap(ctx->restore);
-               if (!ctx->restore_virt)
-                       goto fail;
+               if (IS_ERR_OR_NULL(ctx->restore_virt))
+                       goto fail_mmap;
        } else
                ctx->restore_virt = NULL;
 
@@ -465,22 +468,22 @@ static struct nvhost_hwctx *vic03_alloc_hwctx(struct nvhost_hwctx_handler *h,
        ctx->save_thresh = 0;
        ctx->save_slots = 0;
 
-       ctx->restore_phys = mem_op().pin(nvmap, ctx->restore);
+       ctx->restore_sgt = mem_op().pin(nvmap, ctx->restore);
        if (IS_ERR_VALUE(ctx->restore_phys))
-               goto fail;
+               goto fail_pin;
+       ctx->restore_phys = sg_dma_address(ctx->restore_sgt->sgl);
 
        ctx->restore_size = nvhost_vic03_restore_size;
        ctx->restore_incrs = 1;
 
        return &ctx->hwctx;
 
- fail:
-       if (map_restore && ctx->restore_virt) {
+ fail_pin:
+       if (map_restore)
                mem_op().munmap(ctx->restore, ctx->restore_virt);
-               ctx->restore_virt = NULL;
-       }
+ fail_mmap:
        mem_op().put(nvmap, ctx->restore);
-       ctx->restore = NULL;
+ fail_alloc:
        kfree(ctx);
        return NULL;
 }
@@ -496,7 +499,7 @@ static void vic03_free_hwctx(struct kref *ref)
                mem_op().munmap(ctx->restore, ctx->restore_virt);
                ctx->restore_virt = NULL;
        }
-       mem_op().unpin(nvmap, ctx->restore);
+       mem_op().unpin(nvmap, ctx->restore, ctx->restore_sgt);
        ctx->restore_phys = 0;
        mem_op().put(nvmap, ctx->restore);
        ctx->restore = NULL;
index c0ef216..8323bee 100644 (file)
@@ -89,6 +89,7 @@ struct vic03 {
                        u32 size;
                } os, fce;
 
+               struct sg_table *sgt;
                phys_addr_t pa;
        } ucode;