nvmap: page pools: hide internal lock from nvmap_handle.c
Colin Cross [Mon, 11 Aug 2014 21:08:40 +0000 (14:08 -0700)]
The internal pool lock is exported so that nvmap_handle can lock
it, call a *_locked function, and then unlock it.  Provide a
version of the *_locked functions that takes the lock, remove
the lock and unlock helpers, and make the lock private to
the pools again.

Change-Id: I5a99753058e43161d50a0c61f3a984655cd7cd35
Signed-off-by: Colin Cross <ccross@android.com>
Signed-off-by: Krishna Reddy <vdumpa@nvidia.com>
Reviewed-on: http://git-master/r/664671
GVS: Gerrit_Virtual_Submit
Reviewed-on: http://git-master/r/736425
Reviewed-by: Alex Waterman <alexw@nvidia.com>
Tested-by: Alex Waterman <alexw@nvidia.com>

drivers/video/tegra/nvmap/nvmap_handle.c
drivers/video/tegra/nvmap/nvmap_pp.c
drivers/video/tegra/nvmap/nvmap_priv.h

index d166ffc..3dd2c3c 100644 (file)
@@ -86,10 +86,6 @@ void nvmap_altfree(void *ptr, size_t len)
 void _nvmap_handle_free(struct nvmap_handle *h)
 {
        unsigned int i, nr_page, page_index = 0;
-#if defined(CONFIG_NVMAP_PAGE_POOLS) && \
-       !defined(CONFIG_NVMAP_FORCE_ZEROED_USER_PAGES)
-       struct nvmap_page_pool *pool;
-#endif
 
        if (h->nvhost_priv)
                h->nvhost_priv_delete(h->nvhost_priv);
@@ -120,17 +116,9 @@ void _nvmap_handle_free(struct nvmap_handle *h)
        for (i = 0; i < nr_page; i++)
                h->pgalloc.pages[i] = nvmap_to_page(h->pgalloc.pages[i]);
 
-#if defined(CONFIG_NVMAP_PAGE_POOLS) && \
-       !defined(CONFIG_NVMAP_FORCE_ZEROED_USER_PAGES)
-       if (!zero_memory) {
-               pool = &nvmap_dev->pool;
-
-               nvmap_page_pool_lock(pool);
-               page_index = __nvmap_page_pool_fill_lots_locked(pool,
-                                               h->pgalloc.pages, nr_page);
-               nvmap_page_pool_unlock(pool);
-       }
-#endif
+       if (!zero_memory)
+               page_index = nvmap_page_pool_fill_lots(&nvmap_dev->pool,
+                               h->pgalloc.pages, nr_page);
 
        for (i = page_index; i < nr_page; i++)
                __free_page(h->pgalloc.pages[i]);
@@ -169,9 +157,6 @@ static int handle_page_alloc(struct nvmap_client *client,
        pgprot_t prot;
        unsigned int i = 0, page_index = 0;
        struct page **pages;
-#ifdef CONFIG_NVMAP_PAGE_POOLS
-       struct nvmap_page_pool *pool = NULL;
-#endif
        gfp_t gfp = GFP_NVMAP;
 
        if (zero_memory)
@@ -194,15 +179,11 @@ static int handle_page_alloc(struct nvmap_client *client,
 
        } else {
 #ifdef CONFIG_NVMAP_PAGE_POOLS
-               pool = &nvmap_dev->pool;
-
                /*
                 * Get as many pages from the pools as possible.
                 */
-               nvmap_page_pool_lock(pool);
-               page_index = __nvmap_page_pool_alloc_lots_locked(pool, pages,
+               page_index = nvmap_page_pool_alloc_lots(&nvmap_dev->pool, pages,
                                                                 nr_page);
-               nvmap_page_pool_unlock(pool);
 #endif
                for (i = page_index; i < nr_page; i++) {
                        pages[i] = nvmap_alloc_pages_exact(gfp, PAGE_SIZE);
index 1e869a2..1c2e1f7 100644 (file)
@@ -56,6 +56,9 @@ static inline void __pp_dbg_var_add(u64 *dbg_var, u32 nr)
 #define pp_hit_add(pool, nr)   __pp_dbg_var_add(&(pool)->hits, nr)
 #define pp_miss_add(pool, nr)  __pp_dbg_var_add(&(pool)->misses, nr)
 
+static int __nvmap_page_pool_fill_lots_locked(struct nvmap_page_pool *pool,
+                                      struct page **pages, u32 nr);
+
 static inline struct page *get_page_list_page(struct nvmap_page_pool *pool)
 {
        struct page *page;
@@ -132,9 +135,9 @@ static void nvmap_pp_do_background_fill(struct nvmap_page_pool *pool)
                        return;
                }
 
-               nvmap_page_pool_lock(pool);
+               mutex_lock(&pool->lock);
                i = __nvmap_page_pool_fill_lots_locked(pool, pending_pages, nr);
-               nvmap_page_pool_unlock(pool);
+               mutex_unlock(&pool->lock);
                pages -= nr;
        } while (pages && i == nr);
 
@@ -249,7 +252,7 @@ static struct page *nvmap_page_pool_alloc_locked(struct nvmap_page_pool *pool,
  *
  * You must lock the page pool before using this.
  */
-int __nvmap_page_pool_alloc_lots_locked(struct nvmap_page_pool *pool,
+static int __nvmap_page_pool_alloc_lots_locked(struct nvmap_page_pool *pool,
                                        struct page **pages, u32 nr)
 {
        u32 real_nr;
@@ -280,6 +283,18 @@ int __nvmap_page_pool_alloc_lots_locked(struct nvmap_page_pool *pool,
        return ind;
 }
 
+int nvmap_page_pool_alloc_lots(struct nvmap_page_pool *pool,
+                               struct page **pages, u32 nr)
+{
+       int ret;
+
+       mutex_lock(&pool->lock);
+       ret = __nvmap_page_pool_alloc_lots_locked(pool, pages, nr);
+       mutex_unlock(&pool->lock);
+
+       return ret;
+}
+
 /*
  * This adds a page to the pool. Returns true if the passed page is added.
  * That means if the pool is full this operation will fail.
@@ -314,7 +329,7 @@ static bool nvmap_page_pool_fill_locked(struct nvmap_page_pool *pool,
  *
  * You must lock the page pool before using this.
  */
-int __nvmap_page_pool_fill_lots_locked(struct nvmap_page_pool *pool,
+static int __nvmap_page_pool_fill_lots_locked(struct nvmap_page_pool *pool,
                                       struct page **pages, u32 nr)
 {
        u32 real_nr;
@@ -341,14 +356,26 @@ int __nvmap_page_pool_fill_lots_locked(struct nvmap_page_pool *pool,
        return ind;
 }
 
+int nvmap_page_pool_fill_lots(struct nvmap_page_pool *pool,
+                                      struct page **pages, u32 nr)
+{
+       int ret;
+
+       mutex_lock(&pool->lock);
+       ret = __nvmap_page_pool_fill_lots_locked(pool, pages, nr);
+       mutex_unlock(&pool->lock);
+
+       return ret;
+}
+
 bool nvmap_page_pool_fill(struct nvmap_page_pool *pool, struct page *page)
 {
        bool ret = false;
 
        if (pool) {
-               nvmap_page_pool_lock(pool);
+               mutex_lock(&pool->lock);
                ret = nvmap_page_pool_fill_locked(pool, page);
-               nvmap_page_pool_unlock(pool);
+               mutex_unlock(&pool->lock);
        }
 
        return ret;
@@ -367,7 +394,7 @@ static int nvmap_page_pool_free(struct nvmap_page_pool *pool, int nr_free)
        if (!nr_free)
                return nr_free;
 
-       nvmap_page_pool_lock(pool);
+       mutex_lock(&pool->lock);
        while (i) {
                page = nvmap_page_pool_alloc_locked(pool, 1);
                if (!page)
@@ -375,7 +402,7 @@ static int nvmap_page_pool_free(struct nvmap_page_pool *pool, int nr_free)
                __free_page(page);
                i--;
        }
-       nvmap_page_pool_unlock(pool);
+       mutex_unlock(&pool->lock);
 
        return i;
 }
@@ -401,18 +428,18 @@ int nvmap_page_pool_clear(void)
        struct page *page;
        struct nvmap_page_pool *pool = &nvmap_dev->pool;
 
-       nvmap_page_pool_lock(pool);
+       mutex_lock(&pool->lock);
 
        while ((page = nvmap_page_pool_alloc_locked(pool, 1)) != NULL)
                __free_page(page);
 
        /* For some reason, if an error occured... */
        if (!list_empty(&pool->page_list)) {
-               nvmap_page_pool_unlock(pool);
+               mutex_unlock(&pool->lock);
                return -ENOMEM;
        }
 
-       nvmap_page_pool_unlock(pool);
+       mutex_unlock(&pool->lock);
        nvmap_pp_wake_up_allocator();
 
        return 0;
@@ -428,7 +455,7 @@ static void nvmap_page_pool_resize(struct nvmap_page_pool *pool, int size)
        if (!enable_pp || size == pool->max || size < 0)
                return;
 
-       nvmap_page_pool_lock(pool);
+       mutex_lock(&pool->lock);
 
        while (pool->count > size)
                __free_page(nvmap_page_pool_alloc_locked(pool, 0));
@@ -436,7 +463,7 @@ static void nvmap_page_pool_resize(struct nvmap_page_pool *pool, int size)
        pool->max = size;
 
        pr_debug("page pool resized to %d from %d pages\n", size, pool->max);
-       nvmap_page_pool_unlock(pool);
+       mutex_unlock(&pool->lock);
 }
 
 static int nvmap_page_pool_shrink(struct shrinker *shrinker,
@@ -633,8 +660,6 @@ int nvmap_page_pool_init(struct nvmap_device *dev)
                        PAGE_SIZE;
        pages_to_fill = pages_to_fill ? : pool->count;
 
-       nvmap_page_pool_lock(pool);
-       atomic_set(&pp_dirty, 1);
        for (i = 0; i < pages_to_fill; i++) {
                page = alloc_page(GFP_NVMAP);
                if (!page)
@@ -653,8 +678,6 @@ int nvmap_page_pool_init(struct nvmap_device *dev)
                highmem_pages, pool->count,
                info.totalram, info.freeram, info.totalhigh, info.freehigh);
 done:
-       pp_clean_cache();
-       nvmap_page_pool_unlock(pool);
 #endif
        return 0;
 fail:
index 3044d0e..f6060e8 100644 (file)
@@ -191,23 +191,13 @@ struct nvmap_page_pool {
 #endif
 };
 
-static inline void nvmap_page_pool_lock(struct nvmap_page_pool *pool)
-{
-       mutex_lock(&pool->lock);
-}
-
-static inline void nvmap_page_pool_unlock(struct nvmap_page_pool *pool)
-{
-       mutex_unlock(&pool->lock);
-}
-
 int nvmap_page_pool_init(struct nvmap_device *dev);
 int nvmap_page_pool_fini(struct nvmap_device *dev);
 struct page *nvmap_page_pool_alloc(struct nvmap_page_pool *pool);
 bool nvmap_page_pool_fill(struct nvmap_page_pool *pool, struct page *page);
-int __nvmap_page_pool_alloc_lots_locked(struct nvmap_page_pool *pool,
+int nvmap_page_pool_alloc_lots(struct nvmap_page_pool *pool,
                                        struct page **pages, u32 nr);
-int __nvmap_page_pool_fill_lots_locked(struct nvmap_page_pool *pool,
+int nvmap_page_pool_fill_lots(struct nvmap_page_pool *pool,
                                       struct page **pages, u32 nr);
 int nvmap_page_pool_clear(void);
 int nvmap_page_pool_debugfs_init(struct dentry *nvmap_root);