ARM: dma-mapping: atomic_pool with struct page **pages
Hiroshi Doyu [Fri, 24 Aug 2012 06:36:41 +0000 (09:36 +0300)]
struct page **pages is necessary to align with non atomic path in
__iommu_get_pages(). atomic_pool() has the intialized **pages instead
of just *page.

Change-Id: I3d8c2501fb7c74dc28226510f48f0e2c126e0076
Signed-off-by: Hiroshi Doyu <hdoyu@nvidia.com>

arch/arm/mm/dma-mapping.c

index 70a6275..c4696da 100644 (file)
@@ -296,7 +296,7 @@ struct dma_pool {
        unsigned long *bitmap;
        unsigned long nr_pages;
        void *vaddr;
-       struct page *page;
+       struct page **pages;
 };
 
 static struct dma_pool atomic_pool = {
@@ -335,6 +335,7 @@ static int __init atomic_pool_init(void)
        unsigned long nr_pages = pool->size >> PAGE_SHIFT;
        unsigned long *bitmap;
        struct page *page;
+       struct page **pages;
        void *ptr;
        int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long);
 
@@ -342,21 +343,31 @@ static int __init atomic_pool_init(void)
        if (!bitmap)
                goto no_bitmap;
 
+       pages = kzalloc(nr_pages * sizeof(struct page *), GFP_KERNEL);
+       if (!pages)
+               goto no_pages;
+
        if (IS_ENABLED(CONFIG_CMA))
                ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page);
        else
                ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot,
                                           &page, NULL);
        if (ptr) {
+               int i;
+
+               for (i = 0; i < nr_pages; i++)
+                       pages[i] = page + i;
+
                spin_lock_init(&pool->lock);
                pool->vaddr = ptr;
-               pool->page = page;
+               pool->pages = pages;
                pool->bitmap = bitmap;
                pool->nr_pages = nr_pages;
                pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n",
                       (unsigned)pool->size / 1024);
                return 0;
        }
+no_pages:
        kfree(bitmap);
 no_bitmap:
        pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n",
@@ -481,7 +492,7 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page)
        if (pageno < pool->nr_pages) {
                bitmap_set(pool->bitmap, pageno, count);
                ptr = pool->vaddr + PAGE_SIZE * pageno;
-               *ret_page = pool->page + pageno;
+               *ret_page = pool->pages[pageno];
        } else {
                pr_err_once("ERROR: %u KiB atomic DMA coherent pool is too small!\n"
                            "Please increase it with coherent_pool= kernel parameter!\n",