gpu: nvgpu: add ptr validation for vm_map_buffer
[linux-3.10.git] / drivers / gpu / nvgpu / gk20a / mm_gk20a.c
index 210fe1b..12f3c09 100644 (file)
@@ -3,7 +3,7 @@
  *
  * GK20A memory management
  *
- * Copyright (c) 2011-2014, NVIDIA CORPORATION.  All rights reserved.
+ * Copyright (c) 2011-2017, NVIDIA CORPORATION.  All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -100,6 +100,7 @@ static inline u32 lo32(u64 f)
        } while (0)
 
 static void gk20a_vm_unmap_locked(struct mapped_buffer_node *mapped_buffer);
+void __gk20a_mm_tlb_invalidate(struct vm_gk20a *vm);
 static struct mapped_buffer_node *find_mapped_buffer_locked(
                                        struct rb_root *root, u64 addr);
 static struct mapped_buffer_node *find_mapped_buffer_reverse_locked(
@@ -107,7 +108,7 @@ static struct mapped_buffer_node *find_mapped_buffer_reverse_locked(
                                u32 kind);
 static int update_gmmu_ptes_locked(struct vm_gk20a *vm,
                                   enum gmmu_pgsz_gk20a pgsz_idx,
-                                  struct sg_table *sgt,
+                                  struct sg_table *sgt, u64 buffer_offset,
                                   u64 first_vaddr, u64 last_vaddr,
                                   u8 kind_v, u32 ctag_offset, bool cacheable,
                                   int rw_flag);
@@ -298,7 +299,6 @@ int gk20a_init_mm_setup_sw(struct gk20a *g)
        }
 
        mm->g = g;
-       mutex_init(&mm->tlb_lock);
        mutex_init(&mm->l2_op_lock);
        mm->big_page_size = gmmu_page_sizes[gmmu_page_size_big];
        mm->compression_page_size = gmmu_page_sizes[gmmu_page_size_big];
@@ -1056,7 +1056,7 @@ static int setup_buffer_kind_and_compression(struct device *d,
 
 static int validate_fixed_buffer(struct vm_gk20a *vm,
                                 struct buffer_attrs *bfr,
-                                u64 map_offset)
+                                u64 map_offset, u64 map_size)
 {
        struct device *dev = dev_from_vm(vm);
        struct vm_reserved_va_node *va_node;
@@ -1083,7 +1083,7 @@ static int validate_fixed_buffer(struct vm_gk20a *vm,
                &va_node->va_buffers_list, va_buffers_list) {
                s64 begin = max(buffer->addr, map_offset);
                s64 end = min(buffer->addr +
-                       buffer->size, map_offset + bfr->size);
+                       buffer->size, map_offset + map_size);
                if (end - begin > 0) {
                        gk20a_warn(dev, "overlapping buffer map requested");
                        return -EINVAL;
@@ -1096,6 +1096,7 @@ static int validate_fixed_buffer(struct vm_gk20a *vm,
 static u64 __locked_gmmu_map(struct vm_gk20a *vm,
                                u64 map_offset,
                                struct sg_table *sgt,
+                               u64 buffer_offset,
                                u64 size,
                                int pgsz_idx,
                                u8 kind_v,
@@ -1104,6 +1105,7 @@ static u64 __locked_gmmu_map(struct vm_gk20a *vm,
                                int rw_flag)
 {
        int err = 0, i = 0;
+       bool allocated = false;
        u32 pde_lo, pde_hi;
        struct device *d = dev_from_vm(vm);
 
@@ -1114,8 +1116,9 @@ static u64 __locked_gmmu_map(struct vm_gk20a *vm,
                if (!map_offset) {
                        gk20a_err(d, "failed to allocate va space");
                        err = -ENOMEM;
-                       goto fail;
+                       goto fail_alloc;
                }
+               allocated = true;
        }
 
        pde_range_from_vaddr_range(vm,
@@ -1130,12 +1133,13 @@ static u64 __locked_gmmu_map(struct vm_gk20a *vm,
                if (err) {
                        gk20a_err(d, "failed to validate page table %d: %d",
                                                           i, err);
-                       goto fail;
+                       goto fail_validate;
                }
        }
 
        err = update_gmmu_ptes_locked(vm, pgsz_idx,
                                      sgt,
+                                     buffer_offset,
                                      map_offset, map_offset + size - 1,
                                      kind_v,
                                      ctag_offset,
@@ -1144,11 +1148,14 @@ static u64 __locked_gmmu_map(struct vm_gk20a *vm,
                                      rw_flag);
        if (err) {
                gk20a_err(d, "failed to update ptes on map");
-               goto fail;
+               goto fail_validate;
        }
 
        return map_offset;
- fail:
+fail_validate:
+       if (allocated)
+               gk20a_vm_free_va(vm, map_offset, size, pgsz_idx);
+fail_alloc:
        gk20a_err(d, "%s: failed with err=%d\n", __func__, err);
        return 0;
 }
@@ -1176,6 +1183,7 @@ static void __locked_gmmu_unmap(struct vm_gk20a *vm,
        err = update_gmmu_ptes_locked(vm,
                                pgsz_idx,
                                0, /* n/a for unmap */
+                               0,
                                vaddr,
                                vaddr + size - 1,
                                0, 0, false /* n/a for unmap */,
@@ -1268,7 +1276,9 @@ u64 gk20a_vm_map(struct vm_gk20a *vm,
                        int kind,
                        struct sg_table **sgt,
                        bool user_mapped,
-                       int rw_flag)
+                       int rw_flag,
+                       u64 buffer_offset,
+                       u64 mapping_size)
 {
        struct gk20a *g = gk20a_from_vm(vm);
        struct gk20a_allocator *ctag_allocator = &g->gr.comp_tags;
@@ -1280,6 +1290,7 @@ u64 gk20a_vm_map(struct vm_gk20a *vm,
        int err = 0;
        struct buffer_attrs bfr = {0};
        struct gk20a_comptags comptags;
+       u64 buf_addr;
 
        mutex_lock(&vm->update_gmmu_lock);
 
@@ -1312,8 +1323,12 @@ u64 gk20a_vm_map(struct vm_gk20a *vm,
 
        bfr.kind_v = kind;
        bfr.size = dmabuf->size;
-       bfr.align = 1 << __ffs((u64)sg_dma_address(bfr.sgt->sgl));
+       buf_addr = (u64)sg_dma_address(bfr.sgt->sgl);
+       if (unlikely(!buf_addr))
+               buf_addr = (u64)sg_phys(bfr.sgt->sgl);
+       bfr.align = 1 << __ffs(buf_addr);
        bfr.pgsz_idx = -1;
+       mapping_size = mapping_size ? mapping_size : bfr.size;
 
        /* If FIX_OFFSET is set, pgsz is determined. Otherwise, select
         * page size according to memory alignment */
@@ -1342,8 +1357,10 @@ u64 gk20a_vm_map(struct vm_gk20a *vm,
        gmmu_page_size = gmmu_page_sizes[bfr.pgsz_idx];
 
        /* Check if we should use a fixed offset for mapping this buffer */
+
        if (flags & NVHOST_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET)  {
-               err = validate_fixed_buffer(vm, &bfr, offset_align);
+               err = validate_fixed_buffer(vm, &bfr,
+                       offset_align, mapping_size);
                if (err)
                        goto clean_up;
 
@@ -1392,11 +1409,13 @@ u64 gk20a_vm_map(struct vm_gk20a *vm,
        /* update gmmu ptes */
        map_offset = __locked_gmmu_map(vm, map_offset,
                                        bfr.sgt,
-                                       bfr.size,
+                                       buffer_offset, /* sg offset */
+                                       mapping_size,
                                        bfr.pgsz_idx,
                                        bfr.kind_v,
                                        bfr.ctag_offset,
                                        flags, rw_flag);
+
        if (!map_offset)
                goto clean_up;
 
@@ -1439,7 +1458,7 @@ u64 gk20a_vm_map(struct vm_gk20a *vm,
        mapped_buffer->dmabuf      = dmabuf;
        mapped_buffer->sgt         = bfr.sgt;
        mapped_buffer->addr        = map_offset;
-       mapped_buffer->size        = bfr.size;
+       mapped_buffer->size        = mapping_size;
        mapped_buffer->pgsz_idx    = bfr.pgsz_idx;
        mapped_buffer->ctag_offset = bfr.ctag_offset;
        mapped_buffer->ctag_lines  = bfr.ctag_lines;
@@ -1510,6 +1529,7 @@ u64 gk20a_gmmu_map(struct vm_gk20a *vm,
        mutex_lock(&vm->update_gmmu_lock);
        vaddr = __locked_gmmu_map(vm, 0, /* already mapped? - No */
                                *sgt, /* sg table */
+                               0, /* sg offset */
                                size,
                                0, /* page size index = 0 i.e. SZ_4K */
                                0, /* kind */
@@ -1639,6 +1659,7 @@ u64 gk20a_mm_iova_addr(struct scatterlist *sgl)
 static int update_gmmu_ptes_locked(struct vm_gk20a *vm,
                                   enum gmmu_pgsz_gk20a pgsz_idx,
                                   struct sg_table *sgt,
+                                  u64 buffer_offset,
                                   u64 first_vaddr, u64 last_vaddr,
                                   u8 kind_v, u32 ctag_offset,
                                   bool cacheable,
@@ -1653,6 +1674,8 @@ static int update_gmmu_ptes_locked(struct vm_gk20a *vm,
        u32 ctag_incr;
        u32 page_size  = gmmu_page_sizes[pgsz_idx];
        u64 addr = 0;
+       u64 space_to_skip = buffer_offset;
+       bool set_tlb_dirty = false;
 
        pde_range_from_vaddr_range(vm, first_vaddr, last_vaddr,
                                   &pde_lo, &pde_hi);
@@ -1665,13 +1688,31 @@ static int update_gmmu_ptes_locked(struct vm_gk20a *vm,
         * comptags are active) is 128KB. We have checks elsewhere for that. */
        ctag_incr = !!ctag_offset;
 
-       if (sgt)
+       cur_offset = 0;
+       if (sgt) {
                cur_chunk = sgt->sgl;
+               /* space_to_skip must be page aligned */
+               BUG_ON(space_to_skip & (page_size - 1));
+
+               while (space_to_skip > 0 && cur_chunk) {
+                       u64 new_addr = gk20a_mm_iova_addr(cur_chunk);
+                       if (new_addr) {
+                               addr = new_addr;
+                               addr += cur_offset;
+                       }
+                       cur_offset += page_size;
+                       addr += page_size;
+                       while (cur_chunk &&
+                               cur_offset >= cur_chunk->length) {
+                               cur_offset -= cur_chunk->length;
+                               cur_chunk = sg_next(cur_chunk);
+                       }
+                       space_to_skip -= page_size;
+               }
+       }
        else
                cur_chunk = NULL;
 
-       cur_offset = 0;
-
        for (pde_i = pde_lo; pde_i <= pde_hi; pde_i++) {
                u32 pte_lo, pte_hi;
                u32 pte_cur;
@@ -1679,6 +1720,8 @@ static int update_gmmu_ptes_locked(struct vm_gk20a *vm,
 
                struct page_table_gk20a *pte = vm->pdes.ptes[pgsz_idx] + pde_i;
 
+               set_tlb_dirty = true;
+
                if (pde_i == pde_lo)
                        pte_lo = pte_index_from_vaddr(vm, first_vaddr,
                                                      pgsz_idx);
@@ -1703,14 +1746,12 @@ static int update_gmmu_ptes_locked(struct vm_gk20a *vm,
 
                gk20a_dbg(gpu_dbg_pte, "pte_lo=%d, pte_hi=%d", pte_lo, pte_hi);
                for (pte_cur = pte_lo; pte_cur <= pte_hi; pte_cur++) {
-
                        if (likely(sgt)) {
                                u64 new_addr = gk20a_mm_iova_addr(cur_chunk);
                                if (new_addr) {
                                        addr = new_addr;
                                        addr += cur_offset;
                                }
-
                                pte_w[0] = gmmu_pte_valid_true_f() |
                                        gmmu_pte_address_sys_f(addr
                                                >> gmmu_pte_address_shift_v());
@@ -1727,20 +1768,16 @@ static int update_gmmu_ptes_locked(struct vm_gk20a *vm,
                                        pte_w[1] |=
                                                gmmu_pte_read_disable_true_f();
                                }
-
                                if (!cacheable)
                                        pte_w[1] |= gmmu_pte_vol_true_f();
 
                                pte->ref_cnt++;
-
-                               gk20a_dbg(gpu_dbg_pte,
-                                          "pte_cur=%d addr=0x%x,%08x kind=%d"
+                               gk20a_dbg(gpu_dbg_pte, "pte_cur=%d addr=0x%x,%08x kind=%d"
                                           " ctag=%d vol=%d refs=%d"
                                           " [0x%08x,0x%08x]",
                                           pte_cur, hi32(addr), lo32(addr),
                                           kind_v, ctag, !cacheable,
                                           pte->ref_cnt, pte_w[1], pte_w[0]);
-
                                ctag += ctag_incr;
                                cur_offset += page_size;
                                addr += page_size;
@@ -1764,6 +1801,8 @@ static int update_gmmu_ptes_locked(struct vm_gk20a *vm,
                unmap_gmmu_pages(pte->ref, pte->sgt, pte_kv_cur);
 
                if (pte->ref_cnt == 0) {
+                       void *pte_ref_ptr = pte->ref;
+
                        /* It can make sense to keep around one page table for
                         * each flavor (empty)... in case a new map is coming
                         * right back to alloc (and fill it in) again.
@@ -1771,20 +1810,27 @@ static int update_gmmu_ptes_locked(struct vm_gk20a *vm,
                         * unmap/map/unmap/map cases where we'd trigger pte
                         * free/alloc/free/alloc.
                         */
-                       free_gmmu_pages(vm, pte->ref, pte->sgt,
-                               vm->mm->page_table_sizing[pgsz_idx].order,
-                               pte->size);
                        pte->ref = NULL;
 
                        /* rewrite pde */
                        update_gmmu_pde_locked(vm, pde_i);
+
+                       __gk20a_mm_tlb_invalidate(vm);
+                       set_tlb_dirty = false;
+
+                       free_gmmu_pages(vm, pte_ref_ptr, pte->sgt,
+                               vm->mm->page_table_sizing[pgsz_idx].order,
+                               pte->size);
+
                }
 
        }
 
        smp_mb();
-       vm->tlb_dirty = true;
-       gk20a_dbg_fn("set tlb dirty");
+       if (set_tlb_dirty) {
+               vm->tlb_dirty = true;
+               gk20a_dbg_fn("set tlb dirty");
+       }
 
        return 0;
 
@@ -1916,7 +1962,7 @@ static int gk20a_vm_put_empty(struct vm_gk20a *vm, u64 vaddr,
 
        for (i = 0; i < num_pages; i++) {
                u64 page_vaddr = __locked_gmmu_map(vm, vaddr,
-                       vm->zero_page_sgt, pgsz, pgsz_idx, 0, 0,
+                       vm->zero_page_sgt, 0, pgsz, pgsz_idx, 0, 0,
                        NVHOST_AS_ALLOC_SPACE_FLAGS_FIXED_OFFSET,
                        gk20a_mem_flag_none);
 
@@ -1927,8 +1973,6 @@ static int gk20a_vm_put_empty(struct vm_gk20a *vm, u64 vaddr,
                vaddr += pgsz;
        }
 
-       gk20a_mm_l2_flush(mm->g, true);
-
        return 0;
 
 err_unmap:
@@ -2004,6 +2048,7 @@ void gk20a_vm_unmap(struct vm_gk20a *vm, u64 offset)
                gk20a_err(d, "invalid addr to unmap 0x%llx", offset);
                return;
        }
+
        kref_put(&mapped_buffer->ref, gk20a_vm_unmap_locked_kref);
        mutex_unlock(&vm->update_gmmu_lock);
 }
@@ -2014,6 +2059,7 @@ static void gk20a_vm_remove_support(struct vm_gk20a *vm)
        struct mapped_buffer_node *mapped_buffer;
        struct vm_reserved_va_node *va_node, *va_node_tmp;
        struct rb_node *node;
+       int i;
 
        gk20a_dbg_fn("");
        mutex_lock(&vm->update_gmmu_lock);
@@ -2036,8 +2082,25 @@ static void gk20a_vm_remove_support(struct vm_gk20a *vm)
                kfree(va_node);
        }
 
-       /* TBD: unmapping all buffers above may not actually free
+       /* unmapping all buffers above may not actually free
         * all vm ptes.  jettison them here for certain... */
+       for (i = 0; i < vm->pdes.num_pdes; i++) {
+               struct page_table_gk20a *pte =
+                       &vm->pdes.ptes[gmmu_page_size_small][i];
+               if (pte->ref) {
+                       free_gmmu_pages(vm, pte->ref, pte->sgt,
+                               vm->mm->page_table_sizing[gmmu_page_size_small].order,
+                               pte->size);
+                       pte->ref = NULL;
+               }
+               pte = &vm->pdes.ptes[gmmu_page_size_big][i];
+               if (pte->ref) {
+                       free_gmmu_pages(vm, pte->ref, pte->sgt,
+                               vm->mm->page_table_sizing[gmmu_page_size_big].order,
+                               pte->size);
+                       pte->ref = NULL;
+               }
+       }
 
        unmap_gmmu_pages(vm->pdes.ref, vm->pdes.sgt, vm->pdes.kv);
        free_gmmu_pages(vm, vm->pdes.ref, vm->pdes.sgt, 0, vm->pdes.size);
@@ -2275,7 +2338,6 @@ int gk20a_vm_alloc_space(struct gk20a_as_share *as_share,
 
                va_node->sparse = true;
        }
-
        list_add_tail(&va_node->reserved_va_list, &vm->reserved_va_list);
 
        mutex_unlock(&vm->update_gmmu_lock);
@@ -2414,7 +2476,9 @@ int gk20a_vm_map_buffer(struct gk20a_as_share *as_share,
                        int dmabuf_fd,
                        u64 *offset_align,
                        u32 flags, /*NVHOST_AS_MAP_BUFFER_FLAGS_*/
-                       int kind)
+                       int kind,
+                       u64 buffer_offset,
+                       u64 mapping_size)
 {
        int err = 0;
        struct vm_gk20a *vm = as_share->vm;
@@ -2425,6 +2489,9 @@ int gk20a_vm_map_buffer(struct gk20a_as_share *as_share,
 
        /* get ref to the mem handle (released on unmap_locked) */
        dmabuf = dma_buf_get(dmabuf_fd);
+       if (IS_ERR(dmabuf))
+               return PTR_ERR(dmabuf);
+
        if (!dmabuf)
                return 0;
 
@@ -2439,7 +2506,10 @@ int gk20a_vm_map_buffer(struct gk20a_as_share *as_share,
 
        ret_va = gk20a_vm_map(vm, dmabuf, *offset_align,
                        flags, kind, NULL, true,
-                       gk20a_mem_flag_none);
+                       gk20a_mem_flag_none,
+                       buffer_offset,
+                       mapping_size);
+
        *offset_align = ret_va;
        if (!ret_va) {
                dma_buf_put(dmabuf);
@@ -2754,8 +2824,6 @@ int gk20a_mm_fb_flush(struct gk20a *g)
 
        mutex_lock(&mm->l2_op_lock);
 
-       g->ops.ltc.elpg_flush(g);
-
        /* Make sure all previous writes are committed to the L2. There's no
           guarantee that writes are to DRAM. This will be a sysmembar internal
           to the L2. */
@@ -2890,35 +2958,20 @@ int gk20a_vm_find_buffer(struct vm_gk20a *vm, u64 gpu_va,
        return 0;
 }
 
-void gk20a_mm_tlb_invalidate(struct vm_gk20a *vm)
+void __gk20a_mm_tlb_invalidate(struct vm_gk20a *vm)
 {
-       struct mm_gk20a *mm = vm->mm;
        struct gk20a *g = gk20a_from_vm(vm);
        u32 addr_lo = u64_lo32(gk20a_mm_iova_addr(vm->pdes.sgt->sgl) >> 12);
        u32 data;
        s32 retry = 200;
+       static DEFINE_MUTEX(tlb_lock);
 
        gk20a_dbg_fn("");
 
-       /* pagetables are considered sw states which are preserved after
-          prepare_poweroff. When gk20a deinit releases those pagetables,
-          common code in vm unmap path calls tlb invalidate that touches
-          hw. Use the power_on flag to skip tlb invalidation when gpu
-          power is turned off */
-
        if (!g->power_on)
                return;
 
-       /* No need to invalidate if tlb is clean */
-       mutex_lock(&vm->update_gmmu_lock);
-       if (!vm->tlb_dirty) {
-               mutex_unlock(&vm->update_gmmu_lock);
-               return;
-       }
-       vm->tlb_dirty = false;
-       mutex_unlock(&vm->update_gmmu_lock);
-
-       mutex_lock(&mm->tlb_lock);
+       mutex_lock(&tlb_lock);
        do {
                data = gk20a_readl(g, fb_mmu_ctrl_r());
                if (fb_mmu_ctrl_pri_fifo_space_v(data) != 0)
@@ -2927,17 +2980,17 @@ void gk20a_mm_tlb_invalidate(struct vm_gk20a *vm)
                retry--;
        } while (retry >= 0 || !tegra_platform_is_silicon());
 
-       if (retry < 0)
+       if (retry < 0) {
                gk20a_warn(dev_from_gk20a(g),
                        "wait mmu fifo space too many retries");
+               goto out;
+       }
 
        gk20a_writel(g, fb_mmu_invalidate_pdb_r(),
                fb_mmu_invalidate_pdb_addr_f(addr_lo) |
                fb_mmu_invalidate_pdb_aperture_vid_mem_f());
 
-       /* this is a sledgehammer, it would seem */
        gk20a_writel(g, fb_mmu_invalidate_r(),
-               fb_mmu_invalidate_all_pdb_true_f() |
                fb_mmu_invalidate_all_va_true_f() |
                fb_mmu_invalidate_trigger_true_f());
 
@@ -2954,15 +3007,42 @@ void gk20a_mm_tlb_invalidate(struct vm_gk20a *vm)
                gk20a_warn(dev_from_gk20a(g),
                        "mmu invalidate too many retries");
 
-       mutex_unlock(&mm->tlb_lock);
+out:
+       mutex_unlock(&tlb_lock);
+}
+
+void gk20a_mm_tlb_invalidate(struct vm_gk20a *vm)
+{
+       struct gk20a *g = gk20a_from_vm(vm);
+
+       gk20a_dbg_fn("");
+
+       /* pagetables are considered sw states which are preserved after
+          prepare_poweroff. When gk20a deinit releases those pagetables,
+          common code in vm unmap path calls tlb invalidate that touches
+          hw. Use the power_on flag to skip tlb invalidation when gpu
+          power is turned off */
+
+       if (!g->power_on)
+               return;
+
+       /* No need to invalidate if tlb is clean */
+       mutex_lock(&vm->update_gmmu_lock);
+       if (!vm->tlb_dirty) {
+               mutex_unlock(&vm->update_gmmu_lock);
+               return;
+       }
+       vm->tlb_dirty = false;
+       mutex_unlock(&vm->update_gmmu_lock);
+
+       __gk20a_mm_tlb_invalidate(vm);
 }
 
 int gk20a_mm_suspend(struct gk20a *g)
 {
        gk20a_dbg_fn("");
 
-       gk20a_mm_fb_flush(g);
-       gk20a_mm_l2_flush(g, true);
+       g->ops.ltc.elpg_flush(g);
 
        gk20a_dbg_fn("done");
        return 0;
@@ -2983,48 +3063,3 @@ bool gk20a_mm_mmu_debug_mode_enabled(struct gk20a *g)
        return fb_mmu_debug_ctrl_debug_v(debug_ctrl) ==
                fb_mmu_debug_ctrl_debug_enabled_v();
 }
-
-static int gk20a_mm_mmu_vpr_info_fetch_wait(struct gk20a *g,
-                                           const unsigned int msec)
-{
-       unsigned long timeout;
-
-       timeout = jiffies + msecs_to_jiffies(msec);
-       while (1) {
-               u32 val;
-
-               val = gk20a_readl(g, fb_mmu_vpr_info_r());
-               if (fb_mmu_vpr_info_fetch_v(val) ==
-                   fb_mmu_vpr_info_fetch_false_v())
-                       break;
-
-               if (tegra_platform_is_silicon() &&
-                               WARN_ON(time_after(jiffies, timeout)))
-                       return -ETIME;
-       }
-
-       return 0;
-}
-
-int gk20a_mm_mmu_vpr_info_fetch(struct gk20a *g)
-{
-       int ret = 0;
-
-       gk20a_busy_noresume(g->dev);
-       if (!pm_runtime_active(&g->dev->dev))
-               goto fail;
-
-       if (gk20a_mm_mmu_vpr_info_fetch_wait(g, 5)) {
-               ret = -ETIME;
-               goto fail;
-       }
-
-       gk20a_writel(g, fb_mmu_vpr_info_r(),
-                    fb_mmu_vpr_info_fetch_true_v());
-
-       ret = gk20a_mm_mmu_vpr_info_fetch_wait(g, 5);
-
- fail:
-       gk20a_idle(g->dev);
-       return ret;
-}