video: tegra: nvmap: Add support for zeroed pages
Alex Waterman [Wed, 1 Aug 2012 18:02:30 +0000 (11:02 -0700)]
Add support to alloc zeroed pages for user space alloc requests. Also
define a config option to force userspace allocation requests to be
zeroed.

Change-Id: I75d3b2bc36e808f1470b423578ec4cba99e0f967
Signed-off-by: Alex Waterman <alexw@nvidia.com>
Reviewed-on: http://git-master/r/122549
Reviewed-by: Simone Willett <swillett@nvidia.com>
Tested-by: Simone Willett <swillett@nvidia.com>

drivers/video/tegra/Kconfig
drivers/video/tegra/nvmap/nvmap_handle.c
drivers/video/tegra/nvmap/nvmap_ioctl.c
include/linux/nvmap.h

index b5540a5..650c418 100644 (file)
@@ -135,6 +135,15 @@ config NVMAP_VPR
          Say Y here to enable Video Protection Region(VPR) heap.
          if unsure, say N.
 
+config NVMAP_FORCE_ZEROED_USER_PAGES
+       bool "Only alloc zeroed pages for user space"
+       depends on TEGRA_NVMAP
+       help
+         Say Y here to force zeroing of pages allocated for user space. This
+         avoids leaking kernel secure data to user space. This can add
+         significant overhead to allocation operations depending on the
+         allocation size requested.
+
 config TEGRA_DSI
        bool "Enable DSI panel."
        default n
index f12c058..df54f1a 100644 (file)
@@ -36,6 +36,7 @@
 
 #include <asm/cacheflush.h>
 #include <asm/outercache.h>
+#include <asm/tlbflush.h>
 #include <asm/pgtable.h>
 
 #include <mach/iovmm.h>
@@ -555,6 +556,17 @@ static int handle_page_alloc(struct nvmap_client *client,
        struct nvmap_page_pool *pool = NULL;
        struct nvmap_share *share = nvmap_get_share_from_dev(h->dev);
 #endif
+       gfp_t gfp = GFP_NVMAP;
+       unsigned long kaddr, paddr;
+       pte_t **pte = NULL;
+
+       if (h->userflags & NVMAP_HANDLE_ZEROED_PAGES) {
+               gfp |= __GFP_ZERO;
+               prot = nvmap_pgprot(h, pgprot_kernel);
+               pte = nvmap_alloc_pte(client->dev, (void **)&kaddr);
+               if (IS_ERR(pte))
+                       return -ENOMEM;
+       }
 
        pages = altalloc(nr_page * sizeof(*pages));
        if (!pages)
@@ -565,7 +577,7 @@ static int handle_page_alloc(struct nvmap_client *client,
        h->pgalloc.area = NULL;
        if (contiguous) {
                struct page *page;
-               page = nvmap_alloc_pages_exact(GFP_NVMAP, size);
+               page = nvmap_alloc_pages_exact(gfp, size);
                if (!page)
                        goto fail;
 
@@ -582,12 +594,29 @@ static int handle_page_alloc(struct nvmap_client *client,
                        pages[i] = nvmap_page_pool_alloc(pool);
                        if (!pages[i])
                                break;
+                       if (h->userflags & NVMAP_HANDLE_ZEROED_PAGES) {
+                               /*
+                                * Just memset low mem pages; they will for
+                                * sure have a virtual address. Otherwise, build
+                                * a mapping for the page in the kernel.
+                                */
+                               if (!PageHighMem(pages[i])) {
+                                       memset(page_address(pages[i]), 0,
+                                              PAGE_SIZE);
+                               } else {
+                                       paddr = page_to_phys(pages[i]);
+                                       set_pte_at(&init_mm, kaddr, *pte,
+                                                  pfn_pte(__phys_to_pfn(paddr),
+                                                          prot));
+                                       flush_tlb_kernel_page(kaddr);
+                                       memset((char *)kaddr, 0, PAGE_SIZE);
+                               }
+                       }
                        page_index++;
                }
 #endif
                for (; i < nr_page; i++) {
-                       pages[i] = nvmap_alloc_pages_exact(GFP_NVMAP,
-                               PAGE_SIZE);
+                       pages[i] = nvmap_alloc_pages_exact(gfp, PAGE_SIZE);
                        if (!pages[i])
                                goto fail;
                }
@@ -618,6 +647,8 @@ static int handle_page_alloc(struct nvmap_client *client,
                                nr_page - page_index);
 
 skip_attr_change:
+       if (h->userflags & NVMAP_HANDLE_ZEROED_PAGES)
+               nvmap_free_pte(client->dev, pte);
        h->size = size;
        h->pgalloc.pages = pages;
        h->pgalloc.contig = contiguous;
@@ -625,6 +656,8 @@ skip_attr_change:
        return 0;
 
 fail:
+       if (h->userflags & NVMAP_HANDLE_ZEROED_PAGES)
+               nvmap_free_pte(client->dev, pte);
        while (i--) {
                set_pages_array_wb(&pages[i], 1);
                __free_page(pages[i]);
index 5bfbbf6..bb15699 100644 (file)
@@ -3,7 +3,7 @@
  *
  * User-space interface to nvmap
  *
- * Copyright (c) 2011, NVIDIA Corporation.
+ * Copyright (c) 2011-2012, NVIDIA CORPORATION. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -175,6 +175,9 @@ int nvmap_ioctl_alloc(struct file *filp, void __user *arg)
        /* user-space handles are aligned to page boundaries, to prevent
         * data leakage. */
        op.align = max_t(size_t, op.align, PAGE_SIZE);
+#if defined(CONFIG_NVMAP_FORCE_ZEROED_USER_PAGES)
+       op.flags |= NVMAP_HANDLE_ZEROED_PAGES;
+#endif
 
        return nvmap_alloc_handle_id(client, op.handle, op.heap_mask,
                                     op.align, op.flags);
index 553a7bd..692956b 100644 (file)
@@ -3,7 +3,7 @@
  *
  * structure declarations for nvmem and nvmap user-space ioctls
  *
- * Copyright (c) 2009-2012, NVIDIA Corporation.
+ * Copyright (c) 2009-2012, NVIDIA CORPORATION. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -49,6 +49,7 @@
 #define NVMAP_HANDLE_CACHE_FLAG      (0x3ul << 0)
 
 #define NVMAP_HANDLE_SECURE          (0x1ul << 2)
+#define NVMAP_HANDLE_ZEROED_PAGES    (0x1ul << 3)
 
 #if defined(__KERNEL__)