Revert "video: tegra: nvmap: clean cache during page allocations into page pool"
Krishna Reddy [Tue, 16 Dec 2014 23:51:03 +0000 (15:51 -0800)]
This reverts commit b1d8c6c9415df111e4af1425a3d84b25c00a9c06.

Change-Id: Ide7e78780722bdd30426089f38155c7cabf28934
Signed-off-by: Krishna Reddy <vdumpa@nvidia.com>
Reviewed-on: http://git-master/r/664669
GVS: Gerrit_Virtual_Submit
Reviewed-on: http://git-master/r/736423
Reviewed-by: Alex Waterman <alexw@nvidia.com>
Tested-by: Alex Waterman <alexw@nvidia.com>

drivers/video/tegra/nvmap/nvmap_handle.c
drivers/video/tegra/nvmap/nvmap_pp.c

index 4281daf..d166ffc 100644 (file)
@@ -220,11 +220,10 @@ static int handle_page_alloc(struct nvmap_client *client,
         * FIXME: For ARMv7 we don't have __clean_dcache_page() so we continue
         * to use the flush cache version.
         */
-       if (page_index < nr_page)
 #ifdef ARM64
-               nvmap_clean_cache(&pages[page_index], nr_page - page_index);
+       nvmap_clean_cache(pages, nr_page);
 #else
-               nvmap_flush_cache(&pages[page_index], nr_page - page_index);
+       nvmap_flush_cache(pages, nr_page);
 #endif
 
        h->size = size;
index 4748155..b2878f0 100644 (file)
@@ -32,7 +32,7 @@
 #include "nvmap_priv.h"
 
 #define NVMAP_TEST_PAGE_POOL_SHRINKER     1
-#define PENDING_PAGES_SIZE                (SZ_1M / PAGE_SIZE)
+#define PENDING_PAGES_SIZE                128
 #define MIN_AVAILABLE_MB                  128
 
 static bool enable_pp = 1;
@@ -41,7 +41,6 @@ static int pool_size;
 static struct task_struct *background_allocator;
 static struct page *pending_pages[PENDING_PAGES_SIZE];
 static atomic_t bg_pages_to_fill;
-static atomic_t pp_dirty;
 
 #ifdef CONFIG_NVMAP_PAGE_POOL_DEBUG
 static inline void __pp_dbg_var_add(u64 *dbg_var, u32 nr)
@@ -57,21 +56,6 @@ static inline void __pp_dbg_var_add(u64 *dbg_var, u32 nr)
 #define pp_hit_add(pool, nr)   __pp_dbg_var_add(&(pool)->hits, nr)
 #define pp_miss_add(pool, nr)  __pp_dbg_var_add(&(pool)->misses, nr)
 
-static void pp_clean_cache(void)
-{
-       if (atomic_read(&pp_dirty)) {
-               /*
-                * Make sure any data in the caches is cleaned out before
-                * passing these pages to userspace. otherwise, It can lead to
-                * corruption in pages that get mapped as something
-                * other than WB in userspace and leaked kernel data.
-                */
-               inner_clean_cache_all();
-               outer_clean_all();
-               atomic_set(&pp_dirty, 0);
-       }
-}
-
 /*
  * Allocate n pages one by one. Not the most efficient allocation scheme ever;
  * however, it will make it easier later on to handle single or small number of
@@ -134,7 +118,6 @@ static void nvmap_pp_do_background_fill(struct nvmap_page_pool *pool)
                }
 
                nvmap_page_pool_lock(pool);
-               atomic_set(&pp_dirty, 1);
                i = __nvmap_page_pool_fill_lots_locked(pool, pending_pages, nr);
                nvmap_page_pool_unlock(pool);
                pages -= nr;
@@ -142,10 +125,6 @@ static void nvmap_pp_do_background_fill(struct nvmap_page_pool *pool)
 
        for (; i < nr; i++)
                __free_page(pending_pages[i]);
-       /* clean cache in the background so that allocations immediately
-        * after fill don't suffer the cache clean overhead.
-        */
-       pp_clean_cache();
 }
 
 /*
@@ -232,7 +211,6 @@ static struct page *nvmap_page_pool_alloc_locked(struct nvmap_page_pool *pool,
        if (IS_ENABLED(CONFIG_NVMAP_PAGE_POOL_DEBUG))
                BUG_ON(pool->count == 0);
 
-       pp_clean_cache();
        page = pool->page_array[pool->alloc];
        pool->page_array[pool->alloc] = NULL;
        nvmap_pp_alloc_inc(pool);
@@ -266,8 +244,6 @@ int __nvmap_page_pool_alloc_lots_locked(struct nvmap_page_pool *pool,
        if (!enable_pp || !pool->page_array)
                return 0;
 
-       pp_clean_cache();
-
        real_nr = min_t(u32, nr, pool->count);
 
        while (real_nr--) {
@@ -359,6 +335,19 @@ int __nvmap_page_pool_fill_lots_locked(struct nvmap_page_pool *pool,
        return ind;
 }
 
+bool nvmap_page_pool_fill(struct nvmap_page_pool *pool, struct page *page)
+{
+       bool ret = false;
+
+       if (pool) {
+               nvmap_page_pool_lock(pool);
+               ret = nvmap_page_pool_fill_locked(pool, page);
+               nvmap_page_pool_unlock(pool);
+       }
+
+       return ret;
+}
+
 static int nvmap_page_pool_get_available_count(struct nvmap_page_pool *pool)
 {
        return pool->count;