Revert "USB: Correct Makefile to make isp1760 buildable"
[linux-2.6.git] / mm / dmapool.c
index e2ea454..b1f0885 100644 (file)
@@ -17,7 +17,9 @@
  * The current design of this allocator is fairly simple.  The pool is
  * represented by the 'struct dma_pool' which keeps a doubly-linked list of
  * allocated pages.  Each page in the page_list is split into blocks of at
- * least 'size' bytes.
+ * least 'size' bytes.  Free blocks are tracked in an unsorted singly-linked
+ * list of free blocks within the page.  Used blocks aren't tracked, but we
+ * keep a count of how many are currently allocated from each page.
  */
 
 #include <linux/device.h>
 #include <linux/types.h>
 #include <linux/wait.h>
 
+#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
+#define DMAPOOL_DEBUG 1
+#endif
+
 struct dma_pool {              /* the pool */
        struct list_head page_list;
        spinlock_t lock;
-       size_t blocks_per_page;
        size_t size;
        struct device *dev;
        size_t allocation;
+       size_t boundary;
        char name[32];
        wait_queue_head_t waitq;
        struct list_head pools;
@@ -51,8 +57,8 @@ struct dma_page {             /* cacheable header for 'allocation' bytes */
        struct list_head page_list;
        void *vaddr;
        dma_addr_t dma;
-       unsigned in_use;
-       unsigned long bitmap[0];
+       unsigned int in_use;
+       unsigned int offset;
 };
 
 #define        POOL_TIMEOUT_JIFFIES    ((100 /* msec */ * HZ) / 1000)
@@ -87,8 +93,8 @@ show_pools(struct device *dev, struct device_attribute *attr, char *buf)
 
                /* per-pool info, no real statistics yet */
                temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n",
-                                pool->name,
-                                blocks, pages * pool->blocks_per_page,
+                                pool->name, blocks,
+                                pages * (pool->allocation / pool->size),
                                 pool->size, pages);
                size -= temp;
                next += temp;
@@ -106,7 +112,7 @@ static DEVICE_ATTR(pools, S_IRUGO, show_pools, NULL);
  * @dev: device that will be doing the DMA
  * @size: size of the blocks in this pool.
  * @align: alignment requirement for blocks; must be a power of two
- * @allocation: returned blocks won't cross this boundary (or zero)
+ * @boundary: returned blocks won't cross this power of two boundary
  * Context: !in_interrupt()
  *
  * Returns a dma allocation pool with the requested characteristics, or
@@ -116,15 +122,16 @@ static DEVICE_ATTR(pools, S_IRUGO, show_pools, NULL);
  * cache flushing primitives.  The actual size of blocks allocated may be
  * larger than requested because of alignment.
  *
- * If allocation is nonzero, objects returned from dma_pool_alloc() won't
+ * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
  * cross that size boundary.  This is useful for devices which have
  * addressing restrictions on individual DMA transfers, such as not crossing
  * boundaries of 4KBytes.
  */
 struct dma_pool *dma_pool_create(const char *name, struct device *dev,
-                                size_t size, size_t align, size_t allocation)
+                                size_t size, size_t align, size_t boundary)
 {
        struct dma_pool *retval;
+       size_t allocation;
 
        if (align == 0) {
                align = 1;
@@ -132,35 +139,36 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev,
                return NULL;
        }
 
-       if (size == 0)
+       if (size == 0) {
                return NULL;
+       } else if (size < 4) {
+               size = 4;
+       }
 
        if ((size % align) != 0)
                size = ALIGN(size, align);
 
-       if (allocation == 0) {
-               if (PAGE_SIZE < size)
-                       allocation = size;
-               else
-                       allocation = PAGE_SIZE;
-               /* FIXME: round up for less fragmentation */
-       } else if (allocation < size)
+       allocation = max_t(size_t, size, PAGE_SIZE);
+
+       if (!boundary) {
+               boundary = allocation;
+       } else if ((boundary < size) || (boundary & (boundary - 1))) {
                return NULL;
+       }
 
-       if (!
-           (retval =
-            kmalloc_node(sizeof *retval, GFP_KERNEL, dev_to_node(dev))))
+       retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, dev_to_node(dev));
+       if (!retval)
                return retval;
 
-       strlcpy(retval->name, name, sizeof retval->name);
+       strlcpy(retval->name, name, sizeof(retval->name));
 
        retval->dev = dev;
 
        INIT_LIST_HEAD(&retval->page_list);
        spin_lock_init(&retval->lock);
        retval->size = size;
+       retval->boundary = boundary;
        retval->allocation = allocation;
-       retval->blocks_per_page = allocation / size;
        init_waitqueue_head(&retval->waitq);
 
        if (dev) {
@@ -186,28 +194,39 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev,
 }
 EXPORT_SYMBOL(dma_pool_create);
 
+static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
+{
+       unsigned int offset = 0;
+       unsigned int next_boundary = pool->boundary;
+
+       do {
+               unsigned int next = offset + pool->size;
+               if (unlikely((next + pool->size) >= next_boundary)) {
+                       next = next_boundary;
+                       next_boundary += pool->boundary;
+               }
+               *(int *)(page->vaddr + offset) = next;
+               offset = next;
+       } while (offset < pool->allocation);
+}
+
 static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
 {
        struct dma_page *page;
-       int mapsize;
-
-       mapsize = pool->blocks_per_page;
-       mapsize = (mapsize + BITS_PER_LONG - 1) / BITS_PER_LONG;
-       mapsize *= sizeof(long);
 
-       page = kmalloc(mapsize + sizeof *page, mem_flags);
+       page = kmalloc(sizeof(*page), mem_flags);
        if (!page)
                return NULL;
-       page->vaddr = dma_alloc_coherent(pool->dev,
-                                        pool->allocation,
+       page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
                                         &page->dma, mem_flags);
        if (page->vaddr) {
-               memset(page->bitmap, 0xff, mapsize);    /* bit set == free */
-#ifdef CONFIG_DEBUG_SLAB
+#ifdef DMAPOOL_DEBUG
                memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
 #endif
+               pool_initialise_page(pool, page);
                list_add(&page->page_list, &pool->page_list);
                page->in_use = 0;
+               page->offset = 0;
        } else {
                kfree(page);
                page = NULL;
@@ -215,21 +234,16 @@ static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
        return page;
 }
 
-static inline int is_page_busy(int blocks, unsigned long *bitmap)
+static inline int is_page_busy(struct dma_page *page)
 {
-       while (blocks > 0) {
-               if (*bitmap++ != ~0UL)
-                       return 1;
-               blocks -= BITS_PER_LONG;
-       }
-       return 0;
+       return page->in_use != 0;
 }
 
 static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
 {
        dma_addr_t dma = page->dma;
 
-#ifdef CONFIG_DEBUG_SLAB
+#ifdef DMAPOOL_DEBUG
        memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
 #endif
        dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
@@ -257,7 +271,7 @@ void dma_pool_destroy(struct dma_pool *pool)
                struct dma_page *page;
                page = list_entry(pool->page_list.next,
                                  struct dma_page, page_list);
-               if (is_page_busy(pool->blocks_per_page, page->bitmap)) {
+               if (is_page_busy(page)) {
                        if (pool->dev)
                                dev_err(pool->dev,
                                        "dma_pool_destroy %s, %p busy\n",
@@ -292,27 +306,14 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
 {
        unsigned long flags;
        struct dma_page *page;
-       int map, block;
        size_t offset;
        void *retval;
 
        spin_lock_irqsave(&pool->lock, flags);
  restart:
        list_for_each_entry(page, &pool->page_list, page_list) {
-               int i;
-               /* only cachable accesses here ... */
-               for (map = 0, i = 0;
-                    i < pool->blocks_per_page; i += BITS_PER_LONG, map++) {
-                       if (page->bitmap[map] == 0)
-                               continue;
-                       block = ffz(~page->bitmap[map]);
-                       if ((i + block) < pool->blocks_per_page) {
-                               clear_bit(block, &page->bitmap[map]);
-                               offset = (BITS_PER_LONG * map) + block;
-                               offset *= pool->size;
-                               goto ready;
-                       }
-               }
+               if (page->offset < pool->allocation)
+                       goto ready;
        }
        page = pool_alloc_page(pool, GFP_ATOMIC);
        if (!page) {
@@ -333,13 +334,13 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
                goto done;
        }
 
-       clear_bit(0, &page->bitmap[0]);
-       offset = 0;
  ready:
        page->in_use++;
+       offset = page->offset;
+       page->offset = *(int *)(page->vaddr + offset);
        retval = offset + page->vaddr;
        *handle = offset + page->dma;
-#ifdef CONFIG_DEBUG_SLAB
+#ifdef DMAPOOL_DEBUG
        memset(retval, POOL_POISON_ALLOCATED, pool->size);
 #endif
  done:
@@ -379,7 +380,7 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
 {
        struct dma_page *page;
        unsigned long flags;
-       int map, block;
+       unsigned int offset;
 
        page = pool_find_page(pool, dma);
        if (!page) {
@@ -393,13 +394,9 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
                return;
        }
 
-       block = dma - page->dma;
-       block /= pool->size;
-       map = block / BITS_PER_LONG;
-       block %= BITS_PER_LONG;
-
-#ifdef CONFIG_DEBUG_SLAB
-       if (((dma - page->dma) + (void *)page->vaddr) != vaddr) {
+       offset = vaddr - page->vaddr;
+#ifdef DMAPOOL_DEBUG
+       if ((dma - page->dma) != offset) {
                if (pool->dev)
                        dev_err(pool->dev,
                                "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
@@ -410,28 +407,36 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
                               pool->name, vaddr, (unsigned long long)dma);
                return;
        }
-       if (page->bitmap[map] & (1UL << block)) {
-               if (pool->dev)
-                       dev_err(pool->dev,
-                               "dma_pool_free %s, dma %Lx already free\n",
-                               pool->name, (unsigned long long)dma);
-               else
-                       printk(KERN_ERR
-                              "dma_pool_free %s, dma %Lx already free\n",
-                              pool->name, (unsigned long long)dma);
-               return;
+       {
+               unsigned int chain = page->offset;
+               while (chain < pool->allocation) {
+                       if (chain != offset) {
+                               chain = *(int *)(page->vaddr + chain);
+                               continue;
+                       }
+                       if (pool->dev)
+                               dev_err(pool->dev, "dma_pool_free %s, dma %Lx "
+                                       "already free\n", pool->name,
+                                       (unsigned long long)dma);
+                       else
+                               printk(KERN_ERR "dma_pool_free %s, dma %Lx "
+                                       "already free\n", pool->name,
+                                       (unsigned long long)dma);
+                       return;
+               }
        }
        memset(vaddr, POOL_POISON_FREED, pool->size);
 #endif
 
        spin_lock_irqsave(&pool->lock, flags);
        page->in_use--;
-       set_bit(block, &page->bitmap[map]);
+       *(int *)vaddr = page->offset;
+       page->offset = offset;
        if (waitqueue_active(&pool->waitq))
                wake_up_locked(&pool->waitq);
        /*
         * Resist a temptation to do
-        *    if (!is_page_busy(bpp, page->bitmap)) pool_free_page(pool, page);
+        *    if (!is_page_busy(page)) pool_free_page(pool, page);
         * Better have a few empty pages hang around.
         */
        spin_unlock_irqrestore(&pool->lock, flags);