[CRYPTO] blkcipher: Remove alignment restriction on block size
[linux-2.6.git] / crypto / blkcipher.c
1 /*
2  * Block chaining cipher operations.
3  * 
4  * Generic encrypt/decrypt wrapper for ciphers, handles operations across
5  * multiple page boundaries by using temporary blocks.  In user context,
6  * the kernel is given a chance to schedule us once per page.
7  *
8  * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
9  *
10  * This program is free software; you can redistribute it and/or modify it
11  * under the terms of the GNU General Public License as published by the Free
12  * Software Foundation; either version 2 of the License, or (at your option) 
13  * any later version.
14  *
15  */
16
17 #include <linux/crypto.h>
18 #include <linux/errno.h>
19 #include <linux/hardirq.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/scatterlist.h>
23 #include <linux/seq_file.h>
24 #include <linux/slab.h>
25 #include <linux/string.h>
26
27 #include "internal.h"
28 #include "scatterwalk.h"
29
30 enum {
31         BLKCIPHER_WALK_PHYS = 1 << 0,
32         BLKCIPHER_WALK_SLOW = 1 << 1,
33         BLKCIPHER_WALK_COPY = 1 << 2,
34         BLKCIPHER_WALK_DIFF = 1 << 3,
35 };
36
37 static int blkcipher_walk_next(struct blkcipher_desc *desc,
38                                struct blkcipher_walk *walk);
39 static int blkcipher_walk_first(struct blkcipher_desc *desc,
40                                 struct blkcipher_walk *walk);
41
42 static inline void blkcipher_map_src(struct blkcipher_walk *walk)
43 {
44         walk->src.virt.addr = scatterwalk_map(&walk->in, 0);
45 }
46
47 static inline void blkcipher_map_dst(struct blkcipher_walk *walk)
48 {
49         walk->dst.virt.addr = scatterwalk_map(&walk->out, 1);
50 }
51
52 static inline void blkcipher_unmap_src(struct blkcipher_walk *walk)
53 {
54         scatterwalk_unmap(walk->src.virt.addr, 0);
55 }
56
57 static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk)
58 {
59         scatterwalk_unmap(walk->dst.virt.addr, 1);
60 }
61
62 /* Get a spot of the specified length that does not straddle a page.
63  * The caller needs to ensure that there is enough space for this operation.
64  */
65 static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len)
66 {
67         u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
68         return max(start, end_page);
69 }
70
71 static inline unsigned int blkcipher_done_slow(struct crypto_blkcipher *tfm,
72                                                struct blkcipher_walk *walk,
73                                                unsigned int bsize)
74 {
75         u8 *addr;
76         unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
77
78         addr = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1);
79         addr = blkcipher_get_spot(addr, bsize);
80         scatterwalk_copychunks(addr, &walk->out, bsize, 1);
81         return bsize;
82 }
83
84 static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk,
85                                                unsigned int n)
86 {
87         n = walk->nbytes - n;
88
89         if (walk->flags & BLKCIPHER_WALK_COPY) {
90                 blkcipher_map_dst(walk);
91                 memcpy(walk->dst.virt.addr, walk->page, n);
92                 blkcipher_unmap_dst(walk);
93         } else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) {
94                 blkcipher_unmap_src(walk);
95                 if (walk->flags & BLKCIPHER_WALK_DIFF)
96                         blkcipher_unmap_dst(walk);
97         }
98
99         scatterwalk_advance(&walk->in, n);
100         scatterwalk_advance(&walk->out, n);
101
102         return n;
103 }
104
105 int blkcipher_walk_done(struct blkcipher_desc *desc,
106                         struct blkcipher_walk *walk, int err)
107 {
108         struct crypto_blkcipher *tfm = desc->tfm;
109         unsigned int nbytes = 0;
110
111         if (likely(err >= 0)) {
112                 unsigned int bsize = crypto_blkcipher_blocksize(tfm);
113                 unsigned int n;
114
115                 if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW)))
116                         n = blkcipher_done_fast(walk, err);
117                 else
118                         n = blkcipher_done_slow(tfm, walk, bsize);
119
120                 nbytes = walk->total - n;
121                 err = 0;
122         }
123
124         scatterwalk_done(&walk->in, 0, nbytes);
125         scatterwalk_done(&walk->out, 1, nbytes);
126
127         walk->total = nbytes;
128         walk->nbytes = nbytes;
129
130         if (nbytes) {
131                 crypto_yield(desc->flags);
132                 return blkcipher_walk_next(desc, walk);
133         }
134
135         if (walk->iv != desc->info)
136                 memcpy(desc->info, walk->iv, crypto_blkcipher_ivsize(tfm));
137         if (walk->buffer != walk->page)
138                 kfree(walk->buffer);
139         if (walk->page)
140                 free_page((unsigned long)walk->page);
141
142         return err;
143 }
144 EXPORT_SYMBOL_GPL(blkcipher_walk_done);
145
146 static inline int blkcipher_next_slow(struct blkcipher_desc *desc,
147                                       struct blkcipher_walk *walk,
148                                       unsigned int bsize,
149                                       unsigned int alignmask)
150 {
151         unsigned int n;
152         unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
153
154         if (walk->buffer)
155                 goto ok;
156
157         walk->buffer = walk->page;
158         if (walk->buffer)
159                 goto ok;
160
161         n = bsize * 3 - (alignmask + 1) +
162             (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
163         walk->buffer = kmalloc(n, GFP_ATOMIC);
164         if (!walk->buffer)
165                 return blkcipher_walk_done(desc, walk, -ENOMEM);
166
167 ok:
168         walk->dst.virt.addr = (u8 *)ALIGN((unsigned long)walk->buffer,
169                                           alignmask + 1);
170         walk->dst.virt.addr = blkcipher_get_spot(walk->dst.virt.addr, bsize);
171         walk->src.virt.addr = blkcipher_get_spot(walk->dst.virt.addr +
172                                                  aligned_bsize, bsize);
173
174         scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
175
176         walk->nbytes = bsize;
177         walk->flags |= BLKCIPHER_WALK_SLOW;
178
179         return 0;
180 }
181
182 static inline int blkcipher_next_copy(struct blkcipher_walk *walk)
183 {
184         u8 *tmp = walk->page;
185
186         blkcipher_map_src(walk);
187         memcpy(tmp, walk->src.virt.addr, walk->nbytes);
188         blkcipher_unmap_src(walk);
189
190         walk->src.virt.addr = tmp;
191         walk->dst.virt.addr = tmp;
192
193         return 0;
194 }
195
196 static inline int blkcipher_next_fast(struct blkcipher_desc *desc,
197                                       struct blkcipher_walk *walk)
198 {
199         unsigned long diff;
200
201         walk->src.phys.page = scatterwalk_page(&walk->in);
202         walk->src.phys.offset = offset_in_page(walk->in.offset);
203         walk->dst.phys.page = scatterwalk_page(&walk->out);
204         walk->dst.phys.offset = offset_in_page(walk->out.offset);
205
206         if (walk->flags & BLKCIPHER_WALK_PHYS)
207                 return 0;
208
209         diff = walk->src.phys.offset - walk->dst.phys.offset;
210         diff |= walk->src.virt.page - walk->dst.virt.page;
211
212         blkcipher_map_src(walk);
213         walk->dst.virt.addr = walk->src.virt.addr;
214
215         if (diff) {
216                 walk->flags |= BLKCIPHER_WALK_DIFF;
217                 blkcipher_map_dst(walk);
218         }
219
220         return 0;
221 }
222
223 static int blkcipher_walk_next(struct blkcipher_desc *desc,
224                                struct blkcipher_walk *walk)
225 {
226         struct crypto_blkcipher *tfm = desc->tfm;
227         unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
228         unsigned int bsize = crypto_blkcipher_blocksize(tfm);
229         unsigned int n;
230         int err;
231
232         n = walk->total;
233         if (unlikely(n < bsize)) {
234                 desc->flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
235                 return blkcipher_walk_done(desc, walk, -EINVAL);
236         }
237
238         walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
239                          BLKCIPHER_WALK_DIFF);
240         if (!scatterwalk_aligned(&walk->in, alignmask) ||
241             !scatterwalk_aligned(&walk->out, alignmask)) {
242                 walk->flags |= BLKCIPHER_WALK_COPY;
243                 if (!walk->page) {
244                         walk->page = (void *)__get_free_page(GFP_ATOMIC);
245                         if (!walk->page)
246                                 n = 0;
247                 }
248         }
249
250         n = scatterwalk_clamp(&walk->in, n);
251         n = scatterwalk_clamp(&walk->out, n);
252
253         if (unlikely(n < bsize)) {
254                 err = blkcipher_next_slow(desc, walk, bsize, alignmask);
255                 goto set_phys_lowmem;
256         }
257
258         walk->nbytes = n;
259         if (walk->flags & BLKCIPHER_WALK_COPY) {
260                 err = blkcipher_next_copy(walk);
261                 goto set_phys_lowmem;
262         }
263
264         return blkcipher_next_fast(desc, walk);
265
266 set_phys_lowmem:
267         if (walk->flags & BLKCIPHER_WALK_PHYS) {
268                 walk->src.phys.page = virt_to_page(walk->src.virt.addr);
269                 walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
270                 walk->src.phys.offset &= PAGE_SIZE - 1;
271                 walk->dst.phys.offset &= PAGE_SIZE - 1;
272         }
273         return err;
274 }
275
276 static inline int blkcipher_copy_iv(struct blkcipher_walk *walk,
277                                     struct crypto_blkcipher *tfm,
278                                     unsigned int alignmask)
279 {
280         unsigned bs = crypto_blkcipher_blocksize(tfm);
281         unsigned int ivsize = crypto_blkcipher_ivsize(tfm);
282         unsigned aligned_bs = ALIGN(bs, alignmask + 1);
283         unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) -
284                             (alignmask + 1);
285         u8 *iv;
286
287         size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
288         walk->buffer = kmalloc(size, GFP_ATOMIC);
289         if (!walk->buffer)
290                 return -ENOMEM;
291
292         iv = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1);
293         iv = blkcipher_get_spot(iv, bs) + aligned_bs;
294         iv = blkcipher_get_spot(iv, bs) + aligned_bs;
295         iv = blkcipher_get_spot(iv, ivsize);
296
297         walk->iv = memcpy(iv, walk->iv, ivsize);
298         return 0;
299 }
300
301 int blkcipher_walk_virt(struct blkcipher_desc *desc,
302                         struct blkcipher_walk *walk)
303 {
304         walk->flags &= ~BLKCIPHER_WALK_PHYS;
305         return blkcipher_walk_first(desc, walk);
306 }
307 EXPORT_SYMBOL_GPL(blkcipher_walk_virt);
308
309 int blkcipher_walk_phys(struct blkcipher_desc *desc,
310                         struct blkcipher_walk *walk)
311 {
312         walk->flags |= BLKCIPHER_WALK_PHYS;
313         return blkcipher_walk_first(desc, walk);
314 }
315 EXPORT_SYMBOL_GPL(blkcipher_walk_phys);
316
317 static int blkcipher_walk_first(struct blkcipher_desc *desc,
318                                 struct blkcipher_walk *walk)
319 {
320         struct crypto_blkcipher *tfm = desc->tfm;
321         unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
322
323         if (WARN_ON_ONCE(in_irq()))
324                 return -EDEADLK;
325
326         walk->nbytes = walk->total;
327         if (unlikely(!walk->total))
328                 return 0;
329
330         walk->buffer = NULL;
331         walk->iv = desc->info;
332         if (unlikely(((unsigned long)walk->iv & alignmask))) {
333                 int err = blkcipher_copy_iv(walk, tfm, alignmask);
334                 if (err)
335                         return err;
336         }
337
338         scatterwalk_start(&walk->in, walk->in.sg);
339         scatterwalk_start(&walk->out, walk->out.sg);
340         walk->page = NULL;
341
342         return blkcipher_walk_next(desc, walk);
343 }
344
345 static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
346                             unsigned int keylen)
347 {
348         struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
349         unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
350         int ret;
351         u8 *buffer, *alignbuffer;
352         unsigned long absize;
353
354         absize = keylen + alignmask;
355         buffer = kmalloc(absize, GFP_ATOMIC);
356         if (!buffer)
357                 return -ENOMEM;
358
359         alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
360         memcpy(alignbuffer, key, keylen);
361         ret = cipher->setkey(tfm, alignbuffer, keylen);
362         memset(alignbuffer, 0, keylen);
363         kfree(buffer);
364         return ret;
365 }
366
367 static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
368 {
369         struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
370         unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
371
372         if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
373                 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
374                 return -EINVAL;
375         }
376
377         if ((unsigned long)key & alignmask)
378                 return setkey_unaligned(tfm, key, keylen);
379
380         return cipher->setkey(tfm, key, keylen);
381 }
382
383 static int async_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
384                         unsigned int keylen)
385 {
386         return setkey(crypto_ablkcipher_tfm(tfm), key, keylen);
387 }
388
389 static int async_encrypt(struct ablkcipher_request *req)
390 {
391         struct crypto_tfm *tfm = req->base.tfm;
392         struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
393         struct blkcipher_desc desc = {
394                 .tfm = __crypto_blkcipher_cast(tfm),
395                 .info = req->info,
396                 .flags = req->base.flags,
397         };
398
399
400         return alg->encrypt(&desc, req->dst, req->src, req->nbytes);
401 }
402
403 static int async_decrypt(struct ablkcipher_request *req)
404 {
405         struct crypto_tfm *tfm = req->base.tfm;
406         struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
407         struct blkcipher_desc desc = {
408                 .tfm = __crypto_blkcipher_cast(tfm),
409                 .info = req->info,
410                 .flags = req->base.flags,
411         };
412
413         return alg->decrypt(&desc, req->dst, req->src, req->nbytes);
414 }
415
416 static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type,
417                                              u32 mask)
418 {
419         struct blkcipher_alg *cipher = &alg->cra_blkcipher;
420         unsigned int len = alg->cra_ctxsize;
421
422         type ^= CRYPTO_ALG_ASYNC;
423         mask &= CRYPTO_ALG_ASYNC;
424         if ((type & mask) && cipher->ivsize) {
425                 len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1);
426                 len += cipher->ivsize;
427         }
428
429         return len;
430 }
431
432 static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm)
433 {
434         struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
435         struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
436
437         crt->setkey = async_setkey;
438         crt->encrypt = async_encrypt;
439         crt->decrypt = async_decrypt;
440         crt->ivsize = alg->ivsize;
441
442         return 0;
443 }
444
445 static int crypto_init_blkcipher_ops_sync(struct crypto_tfm *tfm)
446 {
447         struct blkcipher_tfm *crt = &tfm->crt_blkcipher;
448         struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
449         unsigned long align = crypto_tfm_alg_alignmask(tfm) + 1;
450         unsigned long addr;
451
452         crt->setkey = setkey;
453         crt->encrypt = alg->encrypt;
454         crt->decrypt = alg->decrypt;
455
456         addr = (unsigned long)crypto_tfm_ctx(tfm);
457         addr = ALIGN(addr, align);
458         addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align);
459         crt->iv = (void *)addr;
460
461         return 0;
462 }
463
464 static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
465 {
466         struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
467
468         if (alg->ivsize > PAGE_SIZE / 8)
469                 return -EINVAL;
470
471         type ^= CRYPTO_ALG_ASYNC;
472         mask &= CRYPTO_ALG_ASYNC;
473         if (type & mask)
474                 return crypto_init_blkcipher_ops_sync(tfm);
475         else
476                 return crypto_init_blkcipher_ops_async(tfm);
477 }
478
479 static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
480         __attribute__ ((unused));
481 static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
482 {
483         seq_printf(m, "type         : blkcipher\n");
484         seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
485         seq_printf(m, "min keysize  : %u\n", alg->cra_blkcipher.min_keysize);
486         seq_printf(m, "max keysize  : %u\n", alg->cra_blkcipher.max_keysize);
487         seq_printf(m, "ivsize       : %u\n", alg->cra_blkcipher.ivsize);
488 }
489
490 const struct crypto_type crypto_blkcipher_type = {
491         .ctxsize = crypto_blkcipher_ctxsize,
492         .init = crypto_init_blkcipher_ops,
493 #ifdef CONFIG_PROC_FS
494         .show = crypto_blkcipher_show,
495 #endif
496 };
497 EXPORT_SYMBOL_GPL(crypto_blkcipher_type);
498
499 MODULE_LICENSE("GPL");
500 MODULE_DESCRIPTION("Generic block chaining cipher type");