video: tegra: host: Fix error handling
Terje Bergstrom [Fri, 23 Aug 2013 11:21:08 +0000 (14:21 +0300)]
Dusted error handling paths. Converts all IS_ERR_OR_NULL() instances
to appropriate versions IS_ERR() or check against NULL. Fixes
several calls to nvhost_memmgr_*() which treated the return value
incorrectly.

Bug 1212465

Change-Id: I764616d98d35b965335a9a2537fe3e2d555f0497
Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com>
Reviewed-on: http://git-master/r/265476

14 files changed:
drivers/video/tegra/host/bus_client.c
drivers/video/tegra/host/gk20a/channel_gk20a.c
drivers/video/tegra/host/gk20a/clk_gk20a.c
drivers/video/tegra/host/gk20a/fifo_gk20a.c
drivers/video/tegra/host/gk20a/gk20a.c
drivers/video/tegra/host/gk20a/gr_ctx_gk20a.c
drivers/video/tegra/host/gk20a/gr_gk20a.c
drivers/video/tegra/host/gk20a/mm_gk20a.c
drivers/video/tegra/host/gk20a/pmu_gk20a.c
drivers/video/tegra/host/msenc/msenc.c
drivers/video/tegra/host/nvhost_as.c
drivers/video/tegra/host/user_hwctx.c
drivers/video/tegra/host/vi/vi.c
drivers/video/tegra/host/vic03/vic03.c

index e10809b..58fb463 100644 (file)
@@ -513,7 +513,7 @@ static int nvhost_ioctl_channel_submit_gpfifo(
        size = args->num_entries * sizeof(struct nvhost_gpfifo);
 
        gpfifo = kzalloc(size, GFP_KERNEL);
-       if (IS_ERR_OR_NULL(gpfifo))
+       if (!gpfifo)
                return -ENOMEM;
 
        if (copy_from_user(gpfifo,
@@ -865,6 +865,9 @@ static int nvhost_ioctl_channel_set_ctxswitch(
                        save_incr.syncpt_incrs, restore_incr.syncpt_incrs);
 
        nhwctx->memmgr = nvhost_memmgr_get_mgr(ctx->memmgr);
+       if (!nhwctx->memmgr)
+               goto fail_set_restore;
+
        err = user_hwctx_set_restore(hwctx, cmdbuf_restore.mem,
                        cmdbuf_restore.offset, cmdbuf_restore.words);
        if (err)
index 953875e..eef7755 100644 (file)
@@ -104,7 +104,7 @@ int channel_gk20a_commit_va(struct channel_gk20a *c)
        nvhost_dbg_fn("");
 
        inst_ptr = nvhost_memmgr_mmap(c->inst_block.mem.ref);
-       if (IS_ERR(inst_ptr))
+       if (!inst_ptr)
                return -ENOMEM;
 
        addr = sg_phys(c->vm->pdes.sgt->sgl);
@@ -144,7 +144,7 @@ static int channel_gk20a_commit_userd(struct channel_gk20a *c)
        nvhost_dbg_fn("");
 
        inst_ptr = nvhost_memmgr_mmap(c->inst_block.mem.ref);
-       if (IS_ERR(inst_ptr))
+       if (!inst_ptr)
                return -ENOMEM;
 
        addr_lo = u64_lo32(c->userd_cpu_pa >> ram_userd_base_shift_v());
@@ -176,7 +176,7 @@ static int channel_gk20a_setup_ramfc(struct channel_gk20a *c,
        nvhost_dbg_fn("");
 
        inst_ptr = nvhost_memmgr_mmap(c->inst_block.mem.ref);
-       if (IS_ERR(inst_ptr))
+       if (!inst_ptr)
                return -ENOMEM;
 
        memset(inst_ptr, 0, ram_fc_size_val_v());
@@ -431,10 +431,10 @@ int gk20a_channel_cycle_stats(struct channel_gk20a *ch,
                /* set up new cyclestats buffer */
                handle_ref = nvhost_memmgr_get(memmgr,
                                args->nvmap_handle, dev);
-               if (handle_ref == NULL)
-                       return -ENOMEM;
+               if (IS_ERR(handle_ref))
+                       return PTR_ERR(handle_ref);
                virtual_address = nvhost_memmgr_mmap(handle_ref);
-               if (IS_ERR(virtual_address))
+               if (!virtual_address)
                        return -ENOMEM;
 
                nvhost_memmgr_get_param(memmgr, handle_ref,
@@ -576,7 +576,7 @@ static void dump_gpfifo(struct channel_gk20a *c)
        nvhost_dbg_fn("");
 
        inst_ptr = nvhost_memmgr_mmap(c->inst_block.mem.ref);
-       if (IS_ERR(inst_ptr))
+       if (!inst_ptr)
                return;
 
        nvhost_dbg_info("ramfc for channel %d:\n"
@@ -661,7 +661,7 @@ static int channel_gk20a_alloc_priv_cmdbuf(struct channel_gk20a *c)
                                         DEFAULT_ALLOC_ALIGNMENT,
                                         DEFAULT_ALLOC_FLAGS,
                                         0);
-       if (IS_ERR_OR_NULL(q->mem.ref)) {
+       if (IS_ERR(q->mem.ref)) {
                nvhost_err(d, "ch %d : failed to allocate"
                           " priv cmd buffer(size: %d bytes)",
                           c->hw_chid, size);
@@ -670,7 +670,7 @@ static int channel_gk20a_alloc_priv_cmdbuf(struct channel_gk20a *c)
        q->mem.size = size;
 
        q->base_ptr = (u32 *)nvhost_memmgr_mmap(q->mem.ref);
-       if (IS_ERR_OR_NULL(q->base_ptr)) {
+       if (!q->base_ptr) {
                nvhost_err(d, "ch %d : failed to map cpu va"
                           "for priv cmd buffer", c->hw_chid);
                goto clean_up;
@@ -963,18 +963,18 @@ int gk20a_alloc_channel_gpfifo(struct channel_gk20a *c,
                                    DEFAULT_ALLOC_ALIGNMENT,
                                    DEFAULT_ALLOC_FLAGS,
                                    0);
-       if (IS_ERR_OR_NULL(c->gpfifo.mem.ref)) {
+       if (IS_ERR(c->gpfifo.mem.ref)) {
                nvhost_err(d, "channel %d :"
                           " failed to allocate gpfifo (size: %d bytes)",
                           c->hw_chid, gpfifo_size);
                c->gpfifo.mem.ref = 0;
-               return -ENOMEM;
+               return PTR_ERR(c->gpfifo.mem.ref);
        }
        c->gpfifo.entry_num = gpfifo_size;
 
        c->gpfifo.cpu_va =
                (struct gpfifo *)nvhost_memmgr_mmap(c->gpfifo.mem.ref);
-       if (IS_ERR_OR_NULL(c->gpfifo.cpu_va))
+       if (!c->gpfifo.cpu_va)
                goto clean_up;
 
        c->gpfifo.get = c->gpfifo.put = 0;
@@ -1080,6 +1080,7 @@ int gk20a_channel_submit_wfi_fence(struct gk20a *g,
        struct priv_cmd_entry *cmd = NULL;
        int cmd_size, j = 0;
        u32 free_count;
+       int err;
 
        cmd_size =  4 + wfi_cmd_size();
 
@@ -1091,11 +1092,11 @@ int gk20a_channel_submit_wfi_fence(struct gk20a *g,
                return -EAGAIN;
        }
 
-       alloc_priv_cmdbuf(c, cmd_size, &cmd);
-       if (unlikely(IS_ERR_OR_NULL(cmd))) {
+       err = alloc_priv_cmdbuf(c, cmd_size, &cmd);
+       if (unlikely(err)) {
                nvhost_err(dev_from_gk20a(g),
                           "not enough priv cmd buffer space");
-               return -EAGAIN;
+               return err;
        }
 
        fence->value = nvhost_syncpt_incr_max(sp, fence->syncpt_id, 1);
@@ -1582,14 +1583,14 @@ int gk20a_channel_wait(struct channel_gk20a *ch,
                offset = args->condition.notifier.offset;
 
                handle_ref = nvhost_memmgr_get(memmgr, id, dev);
-               if (!handle_ref) {
+               if (IS_ERR(handle_ref)) {
                        nvhost_err(d, "invalid notifier nvmap handle 0x%lx",
                                   id);
                        return -EINVAL;
                }
 
                notif = nvhost_memmgr_mmap(handle_ref);
-               if (IS_ERR_OR_NULL(notif)) {
+               if (!notif) {
                        nvhost_err(d, "failed to map notifier memory");
                        return -ENOMEM;
                }
index 953bfc4..ed760c5 100644 (file)
@@ -275,7 +275,7 @@ struct clk *gk20a_clk_get(struct gk20a *g)
                struct clk *clk;
 
                clk = clk_get_sys("tegra_gk20a", "PLLG_ref");
-               if (IS_ERR_OR_NULL(clk)) {
+               if (IS_ERR(clk)) {
                        nvhost_err(dev_from_gk20a(g),
                                "fail to get tegra ref clk tegra_gk20a/PLLG_ref");
                        return NULL;
index da0c552..9908ac6 100644 (file)
@@ -3,7 +3,7 @@
  *
  * GK20A Graphics FIFO (gr host)
  *
- * Copyright (c) 2011, NVIDIA CORPORATION.  All rights reserved.
+ * Copyright (c) 2011-2013, NVIDIA CORPORATION.  All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -273,7 +273,7 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
                                            DEFAULT_ALLOC_ALIGNMENT,
                                            DEFAULT_ALLOC_FLAGS,
                                            0);
-               if (!runlist->mem[i].ref)
+               if (IS_ERR(runlist->mem[i].ref))
                        goto clean_up_runlist;
                sgt = nvhost_memmgr_sg_table(memmgr, runlist->mem[i].ref);
                if (IS_ERR(sgt))
@@ -454,14 +454,14 @@ static int gk20a_init_fifo_setup_sw(struct gk20a *g)
                                               4096, /* 4K pages */
                                               DEFAULT_ALLOC_FLAGS,
                                               0);
-       if (IS_ERR_OR_NULL(f->userd.mem.ref)) {
-               err = -ENOMEM;
+       if (IS_ERR(f->userd.mem.ref)) {
+               err = PTR_ERR(f->userd.mem.ref);
                goto clean_up;
        }
 
        f->userd.cpu_va = nvhost_memmgr_mmap(f->userd.mem.ref);
        /* f->userd.cpu_va = g->bar1; */
-       if (IS_ERR_OR_NULL(f->userd.cpu_va)) {
+       if (!f->userd.cpu_va) {
                f->userd.cpu_va = NULL;
                err = -ENOMEM;
                goto clean_up;
@@ -634,12 +634,10 @@ static struct channel_gk20a *
 channel_from_inst_ptr(struct fifo_gk20a *f, u64 inst_ptr)
 {
        int ci;
-       if (unlikely(IS_ERR_OR_NULL(f->channel)))
+       if (unlikely(!f->channel))
                return NULL;
        for (ci = 0; ci < f->num_channels; ci++) {
                struct channel_gk20a *c = f->channel+ci;
-               if (IS_ERR_OR_NULL(c))
-                       continue;
                if (c->inst_block.mem.ref &&
                    (inst_ptr == (u64)(sg_phys(c->inst_block.mem.sgt->sgl))))
                        return f->channel+ci;
@@ -807,8 +805,8 @@ static void gk20a_fifo_handle_mmu_fault(struct gk20a *g)
                gk20a_fifo_reset_engine(g, engine_id);
 
                fault_ch = channel_from_inst_ptr(&g->fifo, f.inst_ptr);
-               if (!IS_ERR_OR_NULL(fault_ch)) {
-                       if (!IS_ERR_OR_NULL(fault_ch->hwctx)) {
+               if (fault_ch) {
+                       if (fault_ch->hwctx) {
                                nvhost_dbg_fn("channel with hwctx has generated an mmu fault");
                                fault_ch->hwctx->has_timedout = true;
                        }
@@ -1232,7 +1230,7 @@ int gk20a_fifo_update_runlist(struct gk20a *g,
        runlist_pa = sg_phys(runlist->mem[new_buf].sgt->sgl);
 
        runlist_entry_base = nvhost_memmgr_mmap(runlist->mem[new_buf].ref);
-       if (IS_ERR_OR_NULL(runlist_entry_base)) {
+       if (!runlist_entry_base) {
                ret = -ENOMEM;
                goto clean_up;
        }
index a94f48f..ec3ace3 100644 (file)
@@ -960,12 +960,6 @@ static int gk20a_probe(struct platform_device *dev)
        }
 
        gpu_cdev = &gk20a->gk20a_cdev;
-
-       if (IS_ERR_OR_NULL(gpu_cdev)) {
-               dev_err(&dev->dev, "error accessing gpu cooling device");
-               return -ENOMEM;
-       }
-
        gpu_cdev->gk20a_freq_table_size = tegra_gpufreq_table_size_get();
        gpu_cdev->gk20a_freq_state = 0;
        gpu_cdev->g = gk20a;
index 73a8255..707b545 100644 (file)
@@ -3,7 +3,7 @@
  *
  * GK20A Graphics Context
  *
- * Copyright (c) 2011, NVIDIA CORPORATION.  All rights reserved.
+ * Copyright (c) 2011-2013, NVIDIA CORPORATION.  All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -130,7 +130,7 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr)
                }
 
                netlist_fw = nvhost_client_request_firmware(g->dev, name);
-               if (IS_ERR_OR_NULL(netlist_fw)) {
+               if (!netlist_fw) {
                        nvhost_warn(d, "failed to load netlist %s", name);
                        continue;
                }
index 233c25a..9769666 100644 (file)
@@ -430,7 +430,7 @@ static int gr_gk20a_commit_inst(struct channel_gk20a *c, u64 gpu_va)
        gk20a_mm_l2_flush(c->g, true);
 
        inst_ptr = nvhost_memmgr_mmap(c->inst_block.mem.ref);
-       if (IS_ERR(inst_ptr)) {
+       if (!inst_ptr) {
                ret = -ENOMEM;
                goto clean_up;
        }
@@ -470,7 +470,7 @@ static int gr_gk20a_ctx_patch_write(struct gk20a *g, struct channel_gk20a *c,
        if (patch) {
                ch_ctx = &c->ch_ctx;
                patch_ptr = nvhost_memmgr_mmap(ch_ctx->patch_ctx.mem.ref);
-               if (IS_ERR(patch_ptr))
+               if (!patch_ptr)
                        return -ENOMEM;
 
                patch_slot = ch_ctx->patch_ctx.data_count * 2;
@@ -525,7 +525,7 @@ static int gr_gk20a_ctx_zcull_setup(struct gk20a *g, struct channel_gk20a *c,
        nvhost_dbg_fn("");
 
        ctx_ptr = nvhost_memmgr_mmap(ch_ctx->gr_ctx.mem.ref);
-       if (IS_ERR(ctx_ptr))
+       if (!ctx_ptr)
                return -ENOMEM;
 
        if (ch_ctx->zcull_ctx.gpu_va == 0 &&
@@ -1193,11 +1193,11 @@ static int gr_gk20a_init_golden_ctx_image(struct gk20a *g,
                goto clean_up;
 
        gold_ptr = nvhost_memmgr_mmap(gr->global_ctx_buffer[GOLDEN_CTX].ref);
-       if (IS_ERR(gold_ptr))
+       if (!gold_ptr)
                goto clean_up;
 
        ctx_ptr = nvhost_memmgr_mmap(ch_ctx->gr_ctx.mem.ref);
-       if (IS_ERR(ctx_ptr))
+       if (!ctx_ptr)
                goto clean_up;
 
        ctx_header_words =  roundup(ctx_header_bytes, sizeof(u32));
@@ -1285,7 +1285,7 @@ static int gr_gk20a_load_golden_ctx_image(struct gk20a *g,
        gk20a_mm_l2_flush(g, true);
 
        ctx_ptr = nvhost_memmgr_mmap(ch_ctx->gr_ctx.mem.ref);
-       if (IS_ERR(ctx_ptr))
+       if (!ctx_ptr)
                return -ENOMEM;
 
        for (i = 0; i < gr->ctx_vars.golden_image_size / 4; i++)
@@ -1495,7 +1495,7 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
                                  DEFAULT_ALLOC_ALIGNMENT,
                                  DEFAULT_ALLOC_FLAGS,
                                  0);
-       if (IS_ERR_OR_NULL(mem))
+       if (IS_ERR(mem))
                goto clean_up;
 
        gr->global_ctx_buffer[CIRCULAR].ref = mem;
@@ -1505,7 +1505,7 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
                                  DEFAULT_ALLOC_ALIGNMENT,
                                  DEFAULT_ALLOC_FLAGS,
                                  NVMAP_HEAP_CARVEOUT_VPR);
-       if (!IS_ERR_OR_NULL(mem)) {
+       if (!IS_ERR(mem)) {
                gr->global_ctx_buffer[CIRCULAR_VPR].ref = mem;
                gr->global_ctx_buffer[CIRCULAR_VPR].size = cb_buffer_size;
        }
@@ -1516,7 +1516,7 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
                                  DEFAULT_ALLOC_ALIGNMENT,
                                  DEFAULT_ALLOC_FLAGS,
                                  0);
-       if (IS_ERR_OR_NULL(mem))
+       if (IS_ERR(mem))
                goto clean_up;
 
        gr->global_ctx_buffer[PAGEPOOL].ref = mem;
@@ -1526,7 +1526,7 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
                                  DEFAULT_ALLOC_ALIGNMENT,
                                  DEFAULT_ALLOC_FLAGS,
                                  NVMAP_HEAP_CARVEOUT_VPR);
-       if (!IS_ERR_OR_NULL(mem)) {
+       if (!IS_ERR(mem)) {
                gr->global_ctx_buffer[PAGEPOOL_VPR].ref = mem;
                gr->global_ctx_buffer[PAGEPOOL_VPR].size = pagepool_buffer_size;
        }
@@ -1537,7 +1537,7 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
                                  DEFAULT_ALLOC_ALIGNMENT,
                                  DEFAULT_ALLOC_FLAGS,
                                  0);
-       if (IS_ERR_OR_NULL(mem))
+       if (IS_ERR(mem))
                goto clean_up;
 
        gr->global_ctx_buffer[ATTRIBUTE].ref = mem;
@@ -1547,7 +1547,7 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
                                  DEFAULT_ALLOC_ALIGNMENT,
                                  DEFAULT_ALLOC_FLAGS,
                                  NVMAP_HEAP_CARVEOUT_VPR);
-       if (!IS_ERR_OR_NULL(mem)) {
+       if (!IS_ERR(mem)) {
                gr->global_ctx_buffer[ATTRIBUTE_VPR].ref = mem;
                gr->global_ctx_buffer[ATTRIBUTE_VPR].size = attr_buffer_size;
        }
@@ -1559,7 +1559,7 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
                                  DEFAULT_ALLOC_ALIGNMENT,
                                  DEFAULT_ALLOC_FLAGS,
                                  0);
-       if (IS_ERR_OR_NULL(mem))
+       if (IS_ERR(mem))
                goto clean_up;
 
        gr->global_ctx_buffer[GOLDEN_CTX].ref = mem;
@@ -2215,7 +2215,7 @@ static int gr_gk20a_init_mmu_sw(struct gk20a *g, struct gr_gk20a *gr)
                                                     DEFAULT_ALLOC_ALIGNMENT,
                                                     DEFAULT_ALLOC_FLAGS,
                                                     0);
-       if (!gr->mmu_wr_mem.mem.ref)
+       if (IS_ERR(gr->mmu_wr_mem.mem.ref))
                goto clean_up;
        gr->mmu_wr_mem.mem.size = gr->mmu_wr_mem_size;
 
@@ -2224,7 +2224,7 @@ static int gr_gk20a_init_mmu_sw(struct gk20a *g, struct gr_gk20a *gr)
                                                     DEFAULT_ALLOC_ALIGNMENT,
                                                     DEFAULT_ALLOC_FLAGS,
                                                     0);
-       if (!gr->mmu_rd_mem.mem.ref)
+       if (IS_ERR(gr->mmu_rd_mem.mem.ref))
                goto clean_up;
        gr->mmu_rd_mem.mem.size = gr->mmu_rd_mem_size;
 
@@ -2242,12 +2242,12 @@ static int gr_gk20a_init_mmu_sw(struct gk20a *g, struct gr_gk20a *gr)
 
        gr->mmu_wr_mem.mem.sgt =
                nvhost_memmgr_sg_table(memmgr, gr->mmu_wr_mem.mem.ref);
-       if (IS_ERR_OR_NULL(gr->mmu_wr_mem.mem.sgt))
+       if (IS_ERR(gr->mmu_wr_mem.mem.sgt))
                goto clean_up;
 
        gr->mmu_rd_mem.mem.sgt =
                nvhost_memmgr_sg_table(memmgr, gr->mmu_rd_mem.mem.ref);
-       if (IS_ERR_OR_NULL(gr->mmu_rd_mem.mem.sgt))
+       if (IS_ERR(gr->mmu_rd_mem.mem.sgt))
                goto clean_up;
        return 0;
 
@@ -2497,18 +2497,18 @@ static int gr_gk20a_init_comptag(struct gk20a *g, struct gr_gk20a *gr)
                                    DEFAULT_ALLOC_ALIGNMENT,
                                    DEFAULT_ALLOC_FLAGS,
                                    0);
-       if (IS_ERR_OR_NULL(gr->compbit_store.mem.ref)) {
+       if (IS_ERR(gr->compbit_store.mem.ref)) {
                nvhost_err(dev_from_gk20a(g), "failed to allocate"
                           "backing store for compbit : size %d",
                           compbit_backing_size);
-               return -ENOMEM;
+               return PTR_ERR(gr->compbit_store.mem.ref);
        }
        gr->compbit_store.mem.size = compbit_backing_size;
 
        gr->compbit_store.mem.sgt =
                nvhost_memmgr_sg_table(memmgr, gr->compbit_store.mem.ref);
-       if (IS_ERR_OR_NULL(gr->compbit_store.mem.sgt)) {
-               ret = -ENOMEM;
+       if (IS_ERR(gr->compbit_store.mem.sgt)) {
+               ret = PTR_ERR(gr->compbit_store.mem.sgt);
                goto clean_up;
        }
 #ifdef CONFIG_TEGRA_IOMMU_SMMU
index 995435d..0fceb2e 100644 (file)
@@ -330,12 +330,12 @@ static int alloc_gmmu_pages(struct vm_gk20a *vm, u32 order,
                                DEFAULT_ALLOC_ALIGNMENT,
                                DEFAULT_ALLOC_FLAGS,
                                0);
-       if (IS_ERR_OR_NULL(r)) {
+       if (IS_ERR(r)) {
                nvhost_dbg(dbg_pte, "nvmap_alloc failed\n");
                goto err_out;
        }
        va = nvhost_memmgr_mmap(r);
-       if (IS_ERR_OR_NULL(va)) {
+       if (!va) {
                nvhost_dbg(dbg_pte, "nvmap_mmap failed\n");
                goto err_alloced;
        }
@@ -376,7 +376,7 @@ static int map_gmmu_pages(void *handle, struct sg_table *sgt, void **va)
        nvhost_dbg_fn("");
 
        tmp_va = nvhost_memmgr_mmap(r);
-       if (IS_ERR_OR_NULL(tmp_va))
+       if (!tmp_va)
                goto err_out;
 
        *va = tmp_va;
@@ -900,7 +900,7 @@ static u64 gk20a_vm_map(struct vm_gk20a *vm,
 
        /* pin buffer to get phys/iovmm addr */
        bfr.sgt = nvhost_memmgr_pin(memmgr, r, d);
-       if (IS_ERR_OR_NULL(bfr.sgt)) {
+       if (IS_ERR(bfr.sgt)) {
                /* Falling back to physical is actually possible
                 * here in many cases if we use 4K phys pages in the
                 * gmmu.  However we have some regions which require
@@ -1875,7 +1875,7 @@ int gk20a_init_bar1_vm(struct mm_gk20a *mm)
        inst_block->mem.sgt = nvhost_memmgr_sg_table(nvmap,
                        inst_block->mem.ref);
        /* IS_ERR throws a warning here (expecting void *) */
-       if (IS_ERR_OR_NULL(inst_block->mem.sgt)) {
+       if (IS_ERR(inst_block->mem.sgt)) {
                inst_pa = 0;
                err = (int)inst_pa;
                goto clean_up;
@@ -2030,7 +2030,7 @@ int gk20a_init_pmu_vm(struct mm_gk20a *mm)
        inst_block->mem.sgt = nvhost_memmgr_sg_table(nvmap,
                        inst_block->mem.ref);
        /* IS_ERR throws a warning here (expecting void *) */
-       if (IS_ERR_OR_NULL(inst_block->mem.sgt)) {
+       if (IS_ERR(inst_block->mem.sgt)) {
                inst_pa = 0;
                err = (int)((uintptr_t)inst_block->mem.sgt);
                goto clean_up;
index 4540755..b34512e 100644 (file)
@@ -331,7 +331,7 @@ static int pmu_bootstrap(struct pmu_gk20a *pmu)
        nvhost_dbg_fn("");
 
        ucode_ptr = nvhost_memmgr_mmap(pmu->ucode.mem.ref);
-       if (IS_ERR_OR_NULL(ucode_ptr)) {
+       if (!ucode_ptr) {
                nvhost_err(dev_from_gk20a(g),
                        "fail to map pmu ucode memory");
                return -ENOMEM;
@@ -1016,7 +1016,7 @@ int gk20a_init_pmu_setup_sw(struct gk20a *g)
        if (!g->pmu_fw) {
                g->pmu_fw = nvhost_client_request_firmware(g->dev,
                                        GK20A_PMU_UCODE_IMAGE);
-               if (IS_ERR_OR_NULL(g->pmu_fw)) {
+               if (!g->pmu_fw) {
                        nvhost_err(d, "failed to load pmu ucode!!");
                        err = -ENOENT;
                        return err;
@@ -1036,8 +1036,8 @@ int gk20a_init_pmu_setup_sw(struct gk20a *g)
                                                 DEFAULT_ALLOC_ALIGNMENT,
                                                 DEFAULT_ALLOC_FLAGS,
                                                 0);
-       if (IS_ERR_OR_NULL(pmu->ucode.mem.ref)) {
-               err = -ENOMEM;
+       if (IS_ERR(pmu->ucode.mem.ref)) {
+               err = PTR_ERR(pmu->ucode.mem.ref);
                goto clean_up;
        }
 
@@ -1064,10 +1064,10 @@ int gk20a_init_pmu_setup_sw(struct gk20a *g)
                                                  DEFAULT_ALLOC_ALIGNMENT,
                                                  DEFAULT_ALLOC_FLAGS,
                                                  0);
-       if (IS_ERR_OR_NULL(pmu->pg_buf.mem.ref)) {
+       if (IS_ERR(pmu->pg_buf.mem.ref)) {
                nvhost_err(dev_from_gk20a(g),
                        "fail to allocate fecs pg buffer");
-               err = -ENOMEM;
+               err = PTR_ERR(pmu->pg_buf.mem.ref);
                goto clean_up;
        }
        pmu->pg_buf.mem.size = size;
@@ -1085,10 +1085,10 @@ int gk20a_init_pmu_setup_sw(struct gk20a *g)
                                                   DEFAULT_ALLOC_ALIGNMENT,
                                                   DEFAULT_ALLOC_FLAGS,
                                                   0);
-       if (IS_ERR_OR_NULL(pmu->seq_buf.mem.ref)) {
+       if (IS_ERR(pmu->seq_buf.mem.ref)) {
                nvhost_err(dev_from_gk20a(g),
                        "fail to allocate zbc buffer");
-               err = -ENOMEM;
+               err = PTR_ERR(pmu->seq_buf.mem.ref);
                goto clean_up;
        }
 
@@ -1102,7 +1102,7 @@ int gk20a_init_pmu_setup_sw(struct gk20a *g)
        }
 
        ptr = (u8 *)nvhost_memmgr_mmap(pmu->seq_buf.mem.ref);
-       if (IS_ERR_OR_NULL(ptr)) {
+       if (!ptr) {
                nvhost_err(d, "failed to map cpu ptr for zbc buffer");
                goto clean_up;
        }
index d8d5ad1..a2692a4 100644 (file)
@@ -295,7 +295,7 @@ int msenc_read_ucode(struct platform_device *dev, const char *fw_name)
        }
 
        m->mapped = nvhost_memmgr_mmap(m->mem_r);
-       if (IS_ERR_OR_NULL(m->mapped)) {
+       if (!m->mapped) {
                dev_err(&dev->dev, "nvmap mmap failed");
                err = -ENOMEM;
                goto clean_up;
index 7538c14..e40b287 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Tegra Host Address Spaces
  *
- * Copyright (c) 2011-2012, NVIDIA CORPORATION.  All rights reserved.
+ * Copyright (c) 2011-2013, NVIDIA CORPORATION.  All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -411,8 +411,8 @@ int nvhost_as_ioctl_map_buffer(struct nvhost_as_share *as_share,
        }
 
        r = nvhost_memmgr_get(memmgr, args->nvmap_handle, /*XXX:get device*/0);
-       if (!r) {
-               err = -EINVAL;
+       if (IS_ERR(r)) {
+               err = PTR_ERR(r);
                goto finish;
        }
 
index e7aa2d1..f7f370f 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Tegra Graphics Host Hardware Context Interface
  *
- * Copyright (c) 2013, NVIDIA Corporation.
+ * Copyright (c) 2013, NVIDIA Corporation.  All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -117,13 +117,13 @@ int user_hwctx_set_save(struct user_hwctx *ctx,
 
        buf = nvhost_memmgr_get(ctx->hwctx.memmgr,
                        mem, ctx->hwctx.channel->dev);
-       if (IS_ERR_OR_NULL(buf))
-               return -ENOMEM;
+       if (IS_ERR(buf))
+               return PTR_ERR(buf);
 
        sgt = nvhost_memmgr_pin(ctx->hwctx.memmgr, buf,
                        &ctx->hwctx.channel->dev->dev);
-       if (IS_ERR_OR_NULL(sgt))
-               return -ENOMEM;
+       if (IS_ERR(sgt))
+               return PTR_ERR(sgt);
 
        ctx->save_offset = offset;
        ctx->save_size = words;
@@ -153,13 +153,13 @@ int user_hwctx_set_restore(struct user_hwctx *ctx,
 
        buf = nvhost_memmgr_get(ctx->hwctx.memmgr,
                        mem, ctx->hwctx.channel->dev);
-       if (IS_ERR_OR_NULL(buf))
-               return -ENOMEM;
+       if (IS_ERR(buf))
+               return PTR_ERR(buf);
 
        sgt = nvhost_memmgr_pin(ctx->hwctx.memmgr, buf,
                        &ctx->hwctx.channel->dev->dev);
-       if (IS_ERR_OR_NULL(sgt))
-               return -ENOMEM;
+       if (IS_ERR(sgt))
+               return PTR_ERR(sgt);
 
        ctx->restore_offset = offset;
        ctx->restore_size = words;
index 074372a..3ddafb6 100644 (file)
@@ -309,7 +309,7 @@ int nvhost_vi_init(struct platform_device *dev)
        tegra_vi = (struct vi *)nvhost_get_private_data(dev);
 
        tegra_vi->reg = regulator_get(&dev->dev, "avdd_dsi_csi");
-       if (IS_ERR_OR_NULL(tegra_vi->reg)) {
+       if (IS_ERR(tegra_vi->reg)) {
                if (tegra_vi->reg == ERR_PTR(-ENODEV)) {
                        ret = -ENODEV;
                        dev_info(&dev->dev,
index 66f4182..98cfe22 100644 (file)
@@ -250,7 +250,7 @@ static int vic03_read_ucode(struct platform_device *dev)
        int err;
 
        ucode_fw = nvhost_client_request_firmware(dev, VIC03_UCODE_FW_NAME);
-       if (IS_ERR_OR_NULL(ucode_fw)) {
+       if (!ucode_fw) {
                nvhost_dbg_fn("request firmware failed");
                dev_err(&dev->dev, "failed to get vic03 firmware\n");
                err = -ENOENT;
@@ -477,12 +477,12 @@ static struct nvhost_hwctx *vic03_alloc_hwctx(struct nvhost_hwctx_handler *h,
                                                mem_mgr_flag_write_combine
                                              : mem_mgr_flag_uncacheable,
                                           0);
-       if (IS_ERR_OR_NULL(ctx->restore))
+       if (IS_ERR(ctx->restore))
                goto fail_alloc;
 
        if (map_restore) {
                ctx->restore_virt = nvhost_memmgr_mmap(ctx->restore);
-               if (IS_ERR_OR_NULL(ctx->restore_virt))
+               if (!ctx->restore_virt)
                        goto fail_mmap;
        } else
                ctx->restore_virt = NULL;
@@ -520,7 +520,7 @@ static struct nvhost_hwctx *vic03_alloc_hwctx(struct nvhost_hwctx_handler *h,
 
        ctx->restore_sgt = nvhost_memmgr_pin(nvmap,
                        ctx->restore, &ch->dev->dev);
-       if (IS_ERR_VALUE(ctx->restore_phys))
+       if (IS_ERR(ctx->restore_sgt))
                goto fail_pin;
        ctx->restore_phys = sg_dma_address(ctx->restore_sgt->sgl);