bcmdhd: do not turn off mmc at probe
[linux-2.6.git] / lib / scatterlist.c
index 876ba6d..4ceb05d 100644 (file)
@@ -7,8 +7,10 @@
  * Version 2. See the file COPYING for more details.
  */
 #include <linux/module.h>
+#include <linux/slab.h>
 #include <linux/scatterlist.h>
 #include <linux/highmem.h>
+#include <linux/kmemleak.h>
 
 /**
  * sg_next - return the next scatterlist entry in a list
@@ -114,17 +116,29 @@ EXPORT_SYMBOL(sg_init_one);
  */
 static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
 {
-       if (nents == SG_MAX_SINGLE_ALLOC)
-               return (struct scatterlist *) __get_free_page(gfp_mask);
-       else
+       if (nents == SG_MAX_SINGLE_ALLOC) {
+               /*
+                * Kmemleak doesn't track page allocations as they are not
+                * commonly used (in a raw form) for kernel data structures.
+                * As we chain together a list of pages and then a normal
+                * kmalloc (tracked by kmemleak), in order to for that last
+                * allocation not to become decoupled (and thus a
+                * false-positive) we need to inform kmemleak of all the
+                * intermediate allocations.
+                */
+               void *ptr = (void *) __get_free_page(gfp_mask);
+               kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask);
+               return ptr;
+       } else
                return kmalloc(nents * sizeof(struct scatterlist), gfp_mask);
 }
 
 static void sg_kfree(struct scatterlist *sg, unsigned int nents)
 {
-       if (nents == SG_MAX_SINGLE_ALLOC)
+       if (nents == SG_MAX_SINGLE_ALLOC) {
+               kmemleak_free(sg);
                free_page((unsigned long) sg);
-       else
+       } else
                kfree(sg);
 }
 
@@ -234,8 +248,18 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents,
                left -= sg_size;
 
                sg = alloc_fn(alloc_size, gfp_mask);
-               if (unlikely(!sg))
-                       return -ENOMEM;
+               if (unlikely(!sg)) {
+                       /*
+                        * Adjust entry count to reflect that the last
+                        * entry of the previous table won't be used for
+                        * linkage.  Without this, sg_kfree() may get
+                        * confused.
+                        */
+                       if (prv)
+                               table->nents = ++table->orig_nents;
+
+                       return -ENOMEM;
+               }
 
                sg_init_table(sg, alloc_size);
                table->nents = table->orig_nents += sg_size;
@@ -314,6 +338,7 @@ void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
        miter->__sg = sgl;
        miter->__nents = nents;
        miter->__offset = 0;
+       WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG)));
        miter->__flags = flags;
 }
 EXPORT_SYMBOL(sg_miter_start);
@@ -347,9 +372,12 @@ bool sg_miter_next(struct sg_mapping_iter *miter)
        sg_miter_stop(miter);
 
        /* get to the next sg if necessary.  __offset is adjusted by stop */
-       if (miter->__offset == miter->__sg->length && --miter->__nents) {
-               miter->__sg = sg_next(miter->__sg);
-               miter->__offset = 0;
+       while (miter->__offset == miter->__sg->length) {
+               if (--miter->__nents) {
+                       miter->__sg = sg_next(miter->__sg);
+                       miter->__offset = 0;
+               } else
+                       return false;
        }
 
        /* map the next page */
@@ -391,11 +419,14 @@ void sg_miter_stop(struct sg_mapping_iter *miter)
        if (miter->addr) {
                miter->__offset += miter->consumed;
 
+               if (miter->__flags & SG_MITER_TO_SG)
+                       flush_kernel_dcache_page(miter->page);
+
                if (miter->__flags & SG_MITER_ATOMIC) {
                        WARN_ON(!irqs_disabled());
                        kunmap_atomic(miter->addr, KM_BIO_SRC_IRQ);
                } else
-                       kunmap(miter->addr);
+                       kunmap(miter->page);
 
                miter->page = NULL;
                miter->addr = NULL;
@@ -422,8 +453,17 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
 {
        unsigned int offset = 0;
        struct sg_mapping_iter miter;
+       unsigned long flags;
+       unsigned int sg_flags = SG_MITER_ATOMIC;
+
+       if (to_buffer)
+               sg_flags |= SG_MITER_FROM_SG;
+       else
+               sg_flags |= SG_MITER_TO_SG;
 
-       sg_miter_start(&miter, sgl, nents, SG_MITER_ATOMIC);
+       sg_miter_start(&miter, sgl, nents, sg_flags);
+
+       local_irq_save(flags);
 
        while (sg_miter_next(&miter) && offset < buflen) {
                unsigned int len;
@@ -432,16 +472,15 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
 
                if (to_buffer)
                        memcpy(buf + offset, miter.addr, len);
-               else {
+               else
                        memcpy(miter.addr, buf + offset, len);
-                       flush_kernel_dcache_page(miter.page);
-               }
 
                offset += len;
        }
 
        sg_miter_stop(&miter);
 
+       local_irq_restore(flags);
        return offset;
 }