nbd: support FLUSH requests
[linux-3.10.git] / include / crypto / scatterwalk.h
index 07b6f17..3744d2a 100644 (file)
 #include <linux/kernel.h>
 #include <linux/mm.h>
 #include <linux/scatterlist.h>
+#include <linux/sched.h>
 
-static inline enum km_type crypto_kmap_type(int out)
+static inline void crypto_yield(u32 flags)
 {
-       enum km_type type;
-
-       if (in_softirq())
-               type = out * (KM_SOFTIRQ1 - KM_SOFTIRQ0) + KM_SOFTIRQ0;
-       else
-               type = out * (KM_USER1 - KM_USER0) + KM_USER0;
-
-       return type;
+       if (flags & CRYPTO_TFM_REQ_MAY_SLEEP)
+               cond_resched();
 }
 
-static inline void *crypto_kmap(struct page *page, int out)
+static inline void scatterwalk_sg_chain(struct scatterlist *sg1, int num,
+                                       struct scatterlist *sg2)
 {
-       return kmap_atomic(page, crypto_kmap_type(out));
+       sg_set_page(&sg1[num - 1], (void *)sg2, 0, 0);
+       sg1[num - 1].page_link &= ~0x02;
 }
 
-static inline void crypto_kunmap(void *vaddr, int out)
+static inline struct scatterlist *scatterwalk_sg_next(struct scatterlist *sg)
 {
-       kunmap_atomic(vaddr, crypto_kmap_type(out));
+       if (sg_is_last(sg))
+               return NULL;
+
+       return (++sg)->length ? sg : (void *)sg_page(sg);
 }
 
-static inline void crypto_yield(u32 flags)
+static inline void scatterwalk_crypto_chain(struct scatterlist *head,
+                                           struct scatterlist *sg,
+                                           int chain, int num)
 {
-       if (flags & CRYPTO_TFM_REQ_MAY_SLEEP)
-               cond_resched();
+       if (chain) {
+               head->length += sg->length;
+               sg = scatterwalk_sg_next(sg);
+       }
+
+       if (sg)
+               scatterwalk_sg_chain(head, num, sg);
+       else
+               sg_mark_end(head);
 }
 
 static inline unsigned long scatterwalk_samebuf(struct scatter_walk *walk_in,
@@ -90,15 +99,15 @@ static inline struct page *scatterwalk_page(struct scatter_walk *walk)
        return sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
 }
 
-static inline void scatterwalk_unmap(void *vaddr, int out)
+static inline void scatterwalk_unmap(void *vaddr)
 {
-       crypto_kunmap(vaddr, out);
+       kunmap_atomic(vaddr);
 }
 
 void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg);
 void scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
                            size_t nbytes, int out);
-void *scatterwalk_map(struct scatter_walk *walk, int out);
+void *scatterwalk_map(struct scatter_walk *walk);
 void scatterwalk_done(struct scatter_walk *walk, int out, int more);
 
 void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,