crypto: aesni-intel - Merge with fpu.ko
[linux-2.6.git] / arch / x86 / crypto / aesni-intel_glue.c
1 /*
2  * Support for Intel AES-NI instructions. This file contains glue
3  * code, the real AES implementation is in intel-aes_asm.S.
4  *
5  * Copyright (C) 2008, Intel Corp.
6  *    Author: Huang Ying <ying.huang@intel.com>
7  *
8  * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
9  * interface for 64-bit kernels.
10  *    Authors: Adrian Hoban <adrian.hoban@intel.com>
11  *             Gabriele Paoloni <gabriele.paoloni@intel.com>
12  *             Tadeusz Struk (tadeusz.struk@intel.com)
13  *             Aidan O'Mahony (aidan.o.mahony@intel.com)
14  *    Copyright (c) 2010, Intel Corporation.
15  *
16  * This program is free software; you can redistribute it and/or modify
17  * it under the terms of the GNU General Public License as published by
18  * the Free Software Foundation; either version 2 of the License, or
19  * (at your option) any later version.
20  */
21
22 #include <linux/hardirq.h>
23 #include <linux/types.h>
24 #include <linux/crypto.h>
25 #include <linux/err.h>
26 #include <crypto/algapi.h>
27 #include <crypto/aes.h>
28 #include <crypto/cryptd.h>
29 #include <crypto/ctr.h>
30 #include <asm/i387.h>
31 #include <asm/aes.h>
32 #include <crypto/scatterwalk.h>
33 #include <crypto/internal/aead.h>
34 #include <linux/workqueue.h>
35 #include <linux/spinlock.h>
36
37 #if defined(CONFIG_CRYPTO_CTR) || defined(CONFIG_CRYPTO_CTR_MODULE)
38 #define HAS_CTR
39 #endif
40
41 #if defined(CONFIG_CRYPTO_LRW) || defined(CONFIG_CRYPTO_LRW_MODULE)
42 #define HAS_LRW
43 #endif
44
45 #if defined(CONFIG_CRYPTO_PCBC) || defined(CONFIG_CRYPTO_PCBC_MODULE)
46 #define HAS_PCBC
47 #endif
48
49 #if defined(CONFIG_CRYPTO_XTS) || defined(CONFIG_CRYPTO_XTS_MODULE)
50 #define HAS_XTS
51 #endif
52
53 struct async_aes_ctx {
54         struct cryptd_ablkcipher *cryptd_tfm;
55 };
56
57 /* This data is stored at the end of the crypto_tfm struct.
58  * It's a type of per "session" data storage location.
59  * This needs to be 16 byte aligned.
60  */
61 struct aesni_rfc4106_gcm_ctx {
62         u8 hash_subkey[16];
63         struct crypto_aes_ctx aes_key_expanded;
64         u8 nonce[4];
65         struct cryptd_aead *cryptd_tfm;
66 };
67
68 struct aesni_gcm_set_hash_subkey_result {
69         int err;
70         struct completion completion;
71 };
72
73 struct aesni_hash_subkey_req_data {
74         u8 iv[16];
75         struct aesni_gcm_set_hash_subkey_result result;
76         struct scatterlist sg;
77 };
78
79 #define AESNI_ALIGN     (16)
80 #define AES_BLOCK_MASK  (~(AES_BLOCK_SIZE-1))
81 #define RFC4106_HASH_SUBKEY_SIZE 16
82
83 asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
84                              unsigned int key_len);
85 asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
86                           const u8 *in);
87 asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out,
88                           const u8 *in);
89 asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
90                               const u8 *in, unsigned int len);
91 asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
92                               const u8 *in, unsigned int len);
93 asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
94                               const u8 *in, unsigned int len, u8 *iv);
95 asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
96                               const u8 *in, unsigned int len, u8 *iv);
97 #ifdef CONFIG_X86_64
98 asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
99                               const u8 *in, unsigned int len, u8 *iv);
100
101 /* asmlinkage void aesni_gcm_enc()
102  * void *ctx,  AES Key schedule. Starts on a 16 byte boundary.
103  * u8 *out, Ciphertext output. Encrypt in-place is allowed.
104  * const u8 *in, Plaintext input
105  * unsigned long plaintext_len, Length of data in bytes for encryption.
106  * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
107  *         concatenated with 8 byte Initialisation Vector (from IPSec ESP
108  *         Payload) concatenated with 0x00000001. 16-byte aligned pointer.
109  * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
110  * const u8 *aad, Additional Authentication Data (AAD)
111  * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this
112  *          is going to be 8 or 12 bytes
113  * u8 *auth_tag, Authenticated Tag output.
114  * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
115  *          Valid values are 16 (most likely), 12 or 8.
116  */
117 asmlinkage void aesni_gcm_enc(void *ctx, u8 *out,
118                         const u8 *in, unsigned long plaintext_len, u8 *iv,
119                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
120                         u8 *auth_tag, unsigned long auth_tag_len);
121
122 /* asmlinkage void aesni_gcm_dec()
123  * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
124  * u8 *out, Plaintext output. Decrypt in-place is allowed.
125  * const u8 *in, Ciphertext input
126  * unsigned long ciphertext_len, Length of data in bytes for decryption.
127  * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
128  *         concatenated with 8 byte Initialisation Vector (from IPSec ESP
129  *         Payload) concatenated with 0x00000001. 16-byte aligned pointer.
130  * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
131  * const u8 *aad, Additional Authentication Data (AAD)
132  * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
133  * to be 8 or 12 bytes
134  * u8 *auth_tag, Authenticated Tag output.
135  * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
136  * Valid values are 16 (most likely), 12 or 8.
137  */
138 asmlinkage void aesni_gcm_dec(void *ctx, u8 *out,
139                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
140                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
141                         u8 *auth_tag, unsigned long auth_tag_len);
142
143 int crypto_fpu_init(void);
144 void crypto_fpu_exit(void);
145
146 static inline struct
147 aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
148 {
149         return
150                 (struct aesni_rfc4106_gcm_ctx *)
151                 PTR_ALIGN((u8 *)
152                 crypto_tfm_ctx(crypto_aead_tfm(tfm)), AESNI_ALIGN);
153 }
154 #endif
155
156 static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
157 {
158         unsigned long addr = (unsigned long)raw_ctx;
159         unsigned long align = AESNI_ALIGN;
160
161         if (align <= crypto_tfm_ctx_alignment())
162                 align = 1;
163         return (struct crypto_aes_ctx *)ALIGN(addr, align);
164 }
165
166 static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
167                               const u8 *in_key, unsigned int key_len)
168 {
169         struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
170         u32 *flags = &tfm->crt_flags;
171         int err;
172
173         if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
174             key_len != AES_KEYSIZE_256) {
175                 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
176                 return -EINVAL;
177         }
178
179         if (!irq_fpu_usable())
180                 err = crypto_aes_expand_key(ctx, in_key, key_len);
181         else {
182                 kernel_fpu_begin();
183                 err = aesni_set_key(ctx, in_key, key_len);
184                 kernel_fpu_end();
185         }
186
187         return err;
188 }
189
190 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
191                        unsigned int key_len)
192 {
193         return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
194 }
195
196 static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
197 {
198         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
199
200         if (!irq_fpu_usable())
201                 crypto_aes_encrypt_x86(ctx, dst, src);
202         else {
203                 kernel_fpu_begin();
204                 aesni_enc(ctx, dst, src);
205                 kernel_fpu_end();
206         }
207 }
208
209 static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
210 {
211         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
212
213         if (!irq_fpu_usable())
214                 crypto_aes_decrypt_x86(ctx, dst, src);
215         else {
216                 kernel_fpu_begin();
217                 aesni_dec(ctx, dst, src);
218                 kernel_fpu_end();
219         }
220 }
221
222 static struct crypto_alg aesni_alg = {
223         .cra_name               = "aes",
224         .cra_driver_name        = "aes-aesni",
225         .cra_priority           = 300,
226         .cra_flags              = CRYPTO_ALG_TYPE_CIPHER,
227         .cra_blocksize          = AES_BLOCK_SIZE,
228         .cra_ctxsize            = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
229         .cra_alignmask          = 0,
230         .cra_module             = THIS_MODULE,
231         .cra_list               = LIST_HEAD_INIT(aesni_alg.cra_list),
232         .cra_u  = {
233                 .cipher = {
234                         .cia_min_keysize        = AES_MIN_KEY_SIZE,
235                         .cia_max_keysize        = AES_MAX_KEY_SIZE,
236                         .cia_setkey             = aes_set_key,
237                         .cia_encrypt            = aes_encrypt,
238                         .cia_decrypt            = aes_decrypt
239                 }
240         }
241 };
242
243 static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
244 {
245         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
246
247         aesni_enc(ctx, dst, src);
248 }
249
250 static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
251 {
252         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
253
254         aesni_dec(ctx, dst, src);
255 }
256
257 static struct crypto_alg __aesni_alg = {
258         .cra_name               = "__aes-aesni",
259         .cra_driver_name        = "__driver-aes-aesni",
260         .cra_priority           = 0,
261         .cra_flags              = CRYPTO_ALG_TYPE_CIPHER,
262         .cra_blocksize          = AES_BLOCK_SIZE,
263         .cra_ctxsize            = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
264         .cra_alignmask          = 0,
265         .cra_module             = THIS_MODULE,
266         .cra_list               = LIST_HEAD_INIT(__aesni_alg.cra_list),
267         .cra_u  = {
268                 .cipher = {
269                         .cia_min_keysize        = AES_MIN_KEY_SIZE,
270                         .cia_max_keysize        = AES_MAX_KEY_SIZE,
271                         .cia_setkey             = aes_set_key,
272                         .cia_encrypt            = __aes_encrypt,
273                         .cia_decrypt            = __aes_decrypt
274                 }
275         }
276 };
277
278 static int ecb_encrypt(struct blkcipher_desc *desc,
279                        struct scatterlist *dst, struct scatterlist *src,
280                        unsigned int nbytes)
281 {
282         struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
283         struct blkcipher_walk walk;
284         int err;
285
286         blkcipher_walk_init(&walk, dst, src, nbytes);
287         err = blkcipher_walk_virt(desc, &walk);
288         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
289
290         kernel_fpu_begin();
291         while ((nbytes = walk.nbytes)) {
292                 aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
293                               nbytes & AES_BLOCK_MASK);
294                 nbytes &= AES_BLOCK_SIZE - 1;
295                 err = blkcipher_walk_done(desc, &walk, nbytes);
296         }
297         kernel_fpu_end();
298
299         return err;
300 }
301
302 static int ecb_decrypt(struct blkcipher_desc *desc,
303                        struct scatterlist *dst, struct scatterlist *src,
304                        unsigned int nbytes)
305 {
306         struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
307         struct blkcipher_walk walk;
308         int err;
309
310         blkcipher_walk_init(&walk, dst, src, nbytes);
311         err = blkcipher_walk_virt(desc, &walk);
312         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
313
314         kernel_fpu_begin();
315         while ((nbytes = walk.nbytes)) {
316                 aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
317                               nbytes & AES_BLOCK_MASK);
318                 nbytes &= AES_BLOCK_SIZE - 1;
319                 err = blkcipher_walk_done(desc, &walk, nbytes);
320         }
321         kernel_fpu_end();
322
323         return err;
324 }
325
326 static struct crypto_alg blk_ecb_alg = {
327         .cra_name               = "__ecb-aes-aesni",
328         .cra_driver_name        = "__driver-ecb-aes-aesni",
329         .cra_priority           = 0,
330         .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER,
331         .cra_blocksize          = AES_BLOCK_SIZE,
332         .cra_ctxsize            = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
333         .cra_alignmask          = 0,
334         .cra_type               = &crypto_blkcipher_type,
335         .cra_module             = THIS_MODULE,
336         .cra_list               = LIST_HEAD_INIT(blk_ecb_alg.cra_list),
337         .cra_u = {
338                 .blkcipher = {
339                         .min_keysize    = AES_MIN_KEY_SIZE,
340                         .max_keysize    = AES_MAX_KEY_SIZE,
341                         .setkey         = aes_set_key,
342                         .encrypt        = ecb_encrypt,
343                         .decrypt        = ecb_decrypt,
344                 },
345         },
346 };
347
348 static int cbc_encrypt(struct blkcipher_desc *desc,
349                        struct scatterlist *dst, struct scatterlist *src,
350                        unsigned int nbytes)
351 {
352         struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
353         struct blkcipher_walk walk;
354         int err;
355
356         blkcipher_walk_init(&walk, dst, src, nbytes);
357         err = blkcipher_walk_virt(desc, &walk);
358         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
359
360         kernel_fpu_begin();
361         while ((nbytes = walk.nbytes)) {
362                 aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
363                               nbytes & AES_BLOCK_MASK, walk.iv);
364                 nbytes &= AES_BLOCK_SIZE - 1;
365                 err = blkcipher_walk_done(desc, &walk, nbytes);
366         }
367         kernel_fpu_end();
368
369         return err;
370 }
371
372 static int cbc_decrypt(struct blkcipher_desc *desc,
373                        struct scatterlist *dst, struct scatterlist *src,
374                        unsigned int nbytes)
375 {
376         struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
377         struct blkcipher_walk walk;
378         int err;
379
380         blkcipher_walk_init(&walk, dst, src, nbytes);
381         err = blkcipher_walk_virt(desc, &walk);
382         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
383
384         kernel_fpu_begin();
385         while ((nbytes = walk.nbytes)) {
386                 aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
387                               nbytes & AES_BLOCK_MASK, walk.iv);
388                 nbytes &= AES_BLOCK_SIZE - 1;
389                 err = blkcipher_walk_done(desc, &walk, nbytes);
390         }
391         kernel_fpu_end();
392
393         return err;
394 }
395
396 static struct crypto_alg blk_cbc_alg = {
397         .cra_name               = "__cbc-aes-aesni",
398         .cra_driver_name        = "__driver-cbc-aes-aesni",
399         .cra_priority           = 0,
400         .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER,
401         .cra_blocksize          = AES_BLOCK_SIZE,
402         .cra_ctxsize            = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
403         .cra_alignmask          = 0,
404         .cra_type               = &crypto_blkcipher_type,
405         .cra_module             = THIS_MODULE,
406         .cra_list               = LIST_HEAD_INIT(blk_cbc_alg.cra_list),
407         .cra_u = {
408                 .blkcipher = {
409                         .min_keysize    = AES_MIN_KEY_SIZE,
410                         .max_keysize    = AES_MAX_KEY_SIZE,
411                         .setkey         = aes_set_key,
412                         .encrypt        = cbc_encrypt,
413                         .decrypt        = cbc_decrypt,
414                 },
415         },
416 };
417
418 #ifdef CONFIG_X86_64
419 static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
420                             struct blkcipher_walk *walk)
421 {
422         u8 *ctrblk = walk->iv;
423         u8 keystream[AES_BLOCK_SIZE];
424         u8 *src = walk->src.virt.addr;
425         u8 *dst = walk->dst.virt.addr;
426         unsigned int nbytes = walk->nbytes;
427
428         aesni_enc(ctx, keystream, ctrblk);
429         crypto_xor(keystream, src, nbytes);
430         memcpy(dst, keystream, nbytes);
431         crypto_inc(ctrblk, AES_BLOCK_SIZE);
432 }
433
434 static int ctr_crypt(struct blkcipher_desc *desc,
435                      struct scatterlist *dst, struct scatterlist *src,
436                      unsigned int nbytes)
437 {
438         struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
439         struct blkcipher_walk walk;
440         int err;
441
442         blkcipher_walk_init(&walk, dst, src, nbytes);
443         err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
444         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
445
446         kernel_fpu_begin();
447         while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
448                 aesni_ctr_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
449                               nbytes & AES_BLOCK_MASK, walk.iv);
450                 nbytes &= AES_BLOCK_SIZE - 1;
451                 err = blkcipher_walk_done(desc, &walk, nbytes);
452         }
453         if (walk.nbytes) {
454                 ctr_crypt_final(ctx, &walk);
455                 err = blkcipher_walk_done(desc, &walk, 0);
456         }
457         kernel_fpu_end();
458
459         return err;
460 }
461
462 static struct crypto_alg blk_ctr_alg = {
463         .cra_name               = "__ctr-aes-aesni",
464         .cra_driver_name        = "__driver-ctr-aes-aesni",
465         .cra_priority           = 0,
466         .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER,
467         .cra_blocksize          = 1,
468         .cra_ctxsize            = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
469         .cra_alignmask          = 0,
470         .cra_type               = &crypto_blkcipher_type,
471         .cra_module             = THIS_MODULE,
472         .cra_list               = LIST_HEAD_INIT(blk_ctr_alg.cra_list),
473         .cra_u = {
474                 .blkcipher = {
475                         .min_keysize    = AES_MIN_KEY_SIZE,
476                         .max_keysize    = AES_MAX_KEY_SIZE,
477                         .ivsize         = AES_BLOCK_SIZE,
478                         .setkey         = aes_set_key,
479                         .encrypt        = ctr_crypt,
480                         .decrypt        = ctr_crypt,
481                 },
482         },
483 };
484 #endif
485
486 static int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
487                         unsigned int key_len)
488 {
489         struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
490         struct crypto_ablkcipher *child = &ctx->cryptd_tfm->base;
491         int err;
492
493         crypto_ablkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
494         crypto_ablkcipher_set_flags(child, crypto_ablkcipher_get_flags(tfm)
495                                     & CRYPTO_TFM_REQ_MASK);
496         err = crypto_ablkcipher_setkey(child, key, key_len);
497         crypto_ablkcipher_set_flags(tfm, crypto_ablkcipher_get_flags(child)
498                                     & CRYPTO_TFM_RES_MASK);
499         return err;
500 }
501
502 static int ablk_encrypt(struct ablkcipher_request *req)
503 {
504         struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
505         struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
506
507         if (!irq_fpu_usable()) {
508                 struct ablkcipher_request *cryptd_req =
509                         ablkcipher_request_ctx(req);
510                 memcpy(cryptd_req, req, sizeof(*req));
511                 ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
512                 return crypto_ablkcipher_encrypt(cryptd_req);
513         } else {
514                 struct blkcipher_desc desc;
515                 desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
516                 desc.info = req->info;
517                 desc.flags = 0;
518                 return crypto_blkcipher_crt(desc.tfm)->encrypt(
519                         &desc, req->dst, req->src, req->nbytes);
520         }
521 }
522
523 static int ablk_decrypt(struct ablkcipher_request *req)
524 {
525         struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
526         struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
527
528         if (!irq_fpu_usable()) {
529                 struct ablkcipher_request *cryptd_req =
530                         ablkcipher_request_ctx(req);
531                 memcpy(cryptd_req, req, sizeof(*req));
532                 ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
533                 return crypto_ablkcipher_decrypt(cryptd_req);
534         } else {
535                 struct blkcipher_desc desc;
536                 desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
537                 desc.info = req->info;
538                 desc.flags = 0;
539                 return crypto_blkcipher_crt(desc.tfm)->decrypt(
540                         &desc, req->dst, req->src, req->nbytes);
541         }
542 }
543
544 static void ablk_exit(struct crypto_tfm *tfm)
545 {
546         struct async_aes_ctx *ctx = crypto_tfm_ctx(tfm);
547
548         cryptd_free_ablkcipher(ctx->cryptd_tfm);
549 }
550
551 static void ablk_init_common(struct crypto_tfm *tfm,
552                              struct cryptd_ablkcipher *cryptd_tfm)
553 {
554         struct async_aes_ctx *ctx = crypto_tfm_ctx(tfm);
555
556         ctx->cryptd_tfm = cryptd_tfm;
557         tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) +
558                 crypto_ablkcipher_reqsize(&cryptd_tfm->base);
559 }
560
561 static int ablk_ecb_init(struct crypto_tfm *tfm)
562 {
563         struct cryptd_ablkcipher *cryptd_tfm;
564
565         cryptd_tfm = cryptd_alloc_ablkcipher("__driver-ecb-aes-aesni", 0, 0);
566         if (IS_ERR(cryptd_tfm))
567                 return PTR_ERR(cryptd_tfm);
568         ablk_init_common(tfm, cryptd_tfm);
569         return 0;
570 }
571
572 static struct crypto_alg ablk_ecb_alg = {
573         .cra_name               = "ecb(aes)",
574         .cra_driver_name        = "ecb-aes-aesni",
575         .cra_priority           = 400,
576         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
577         .cra_blocksize          = AES_BLOCK_SIZE,
578         .cra_ctxsize            = sizeof(struct async_aes_ctx),
579         .cra_alignmask          = 0,
580         .cra_type               = &crypto_ablkcipher_type,
581         .cra_module             = THIS_MODULE,
582         .cra_list               = LIST_HEAD_INIT(ablk_ecb_alg.cra_list),
583         .cra_init               = ablk_ecb_init,
584         .cra_exit               = ablk_exit,
585         .cra_u = {
586                 .ablkcipher = {
587                         .min_keysize    = AES_MIN_KEY_SIZE,
588                         .max_keysize    = AES_MAX_KEY_SIZE,
589                         .setkey         = ablk_set_key,
590                         .encrypt        = ablk_encrypt,
591                         .decrypt        = ablk_decrypt,
592                 },
593         },
594 };
595
596 static int ablk_cbc_init(struct crypto_tfm *tfm)
597 {
598         struct cryptd_ablkcipher *cryptd_tfm;
599
600         cryptd_tfm = cryptd_alloc_ablkcipher("__driver-cbc-aes-aesni", 0, 0);
601         if (IS_ERR(cryptd_tfm))
602                 return PTR_ERR(cryptd_tfm);
603         ablk_init_common(tfm, cryptd_tfm);
604         return 0;
605 }
606
607 static struct crypto_alg ablk_cbc_alg = {
608         .cra_name               = "cbc(aes)",
609         .cra_driver_name        = "cbc-aes-aesni",
610         .cra_priority           = 400,
611         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
612         .cra_blocksize          = AES_BLOCK_SIZE,
613         .cra_ctxsize            = sizeof(struct async_aes_ctx),
614         .cra_alignmask          = 0,
615         .cra_type               = &crypto_ablkcipher_type,
616         .cra_module             = THIS_MODULE,
617         .cra_list               = LIST_HEAD_INIT(ablk_cbc_alg.cra_list),
618         .cra_init               = ablk_cbc_init,
619         .cra_exit               = ablk_exit,
620         .cra_u = {
621                 .ablkcipher = {
622                         .min_keysize    = AES_MIN_KEY_SIZE,
623                         .max_keysize    = AES_MAX_KEY_SIZE,
624                         .ivsize         = AES_BLOCK_SIZE,
625                         .setkey         = ablk_set_key,
626                         .encrypt        = ablk_encrypt,
627                         .decrypt        = ablk_decrypt,
628                 },
629         },
630 };
631
632 #ifdef CONFIG_X86_64
633 static int ablk_ctr_init(struct crypto_tfm *tfm)
634 {
635         struct cryptd_ablkcipher *cryptd_tfm;
636
637         cryptd_tfm = cryptd_alloc_ablkcipher("__driver-ctr-aes-aesni", 0, 0);
638         if (IS_ERR(cryptd_tfm))
639                 return PTR_ERR(cryptd_tfm);
640         ablk_init_common(tfm, cryptd_tfm);
641         return 0;
642 }
643
644 static struct crypto_alg ablk_ctr_alg = {
645         .cra_name               = "ctr(aes)",
646         .cra_driver_name        = "ctr-aes-aesni",
647         .cra_priority           = 400,
648         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
649         .cra_blocksize          = 1,
650         .cra_ctxsize            = sizeof(struct async_aes_ctx),
651         .cra_alignmask          = 0,
652         .cra_type               = &crypto_ablkcipher_type,
653         .cra_module             = THIS_MODULE,
654         .cra_list               = LIST_HEAD_INIT(ablk_ctr_alg.cra_list),
655         .cra_init               = ablk_ctr_init,
656         .cra_exit               = ablk_exit,
657         .cra_u = {
658                 .ablkcipher = {
659                         .min_keysize    = AES_MIN_KEY_SIZE,
660                         .max_keysize    = AES_MAX_KEY_SIZE,
661                         .ivsize         = AES_BLOCK_SIZE,
662                         .setkey         = ablk_set_key,
663                         .encrypt        = ablk_encrypt,
664                         .decrypt        = ablk_encrypt,
665                         .geniv          = "chainiv",
666                 },
667         },
668 };
669
670 #ifdef HAS_CTR
671 static int ablk_rfc3686_ctr_init(struct crypto_tfm *tfm)
672 {
673         struct cryptd_ablkcipher *cryptd_tfm;
674
675         cryptd_tfm = cryptd_alloc_ablkcipher(
676                 "rfc3686(__driver-ctr-aes-aesni)", 0, 0);
677         if (IS_ERR(cryptd_tfm))
678                 return PTR_ERR(cryptd_tfm);
679         ablk_init_common(tfm, cryptd_tfm);
680         return 0;
681 }
682
683 static struct crypto_alg ablk_rfc3686_ctr_alg = {
684         .cra_name               = "rfc3686(ctr(aes))",
685         .cra_driver_name        = "rfc3686-ctr-aes-aesni",
686         .cra_priority           = 400,
687         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
688         .cra_blocksize          = 1,
689         .cra_ctxsize            = sizeof(struct async_aes_ctx),
690         .cra_alignmask          = 0,
691         .cra_type               = &crypto_ablkcipher_type,
692         .cra_module             = THIS_MODULE,
693         .cra_list               = LIST_HEAD_INIT(ablk_rfc3686_ctr_alg.cra_list),
694         .cra_init               = ablk_rfc3686_ctr_init,
695         .cra_exit               = ablk_exit,
696         .cra_u = {
697                 .ablkcipher = {
698                         .min_keysize = AES_MIN_KEY_SIZE+CTR_RFC3686_NONCE_SIZE,
699                         .max_keysize = AES_MAX_KEY_SIZE+CTR_RFC3686_NONCE_SIZE,
700                         .ivsize      = CTR_RFC3686_IV_SIZE,
701                         .setkey      = ablk_set_key,
702                         .encrypt     = ablk_encrypt,
703                         .decrypt     = ablk_decrypt,
704                         .geniv       = "seqiv",
705                 },
706         },
707 };
708 #endif
709 #endif
710
711 #ifdef HAS_LRW
712 static int ablk_lrw_init(struct crypto_tfm *tfm)
713 {
714         struct cryptd_ablkcipher *cryptd_tfm;
715
716         cryptd_tfm = cryptd_alloc_ablkcipher("fpu(lrw(__driver-aes-aesni))",
717                                              0, 0);
718         if (IS_ERR(cryptd_tfm))
719                 return PTR_ERR(cryptd_tfm);
720         ablk_init_common(tfm, cryptd_tfm);
721         return 0;
722 }
723
724 static struct crypto_alg ablk_lrw_alg = {
725         .cra_name               = "lrw(aes)",
726         .cra_driver_name        = "lrw-aes-aesni",
727         .cra_priority           = 400,
728         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
729         .cra_blocksize          = AES_BLOCK_SIZE,
730         .cra_ctxsize            = sizeof(struct async_aes_ctx),
731         .cra_alignmask          = 0,
732         .cra_type               = &crypto_ablkcipher_type,
733         .cra_module             = THIS_MODULE,
734         .cra_list               = LIST_HEAD_INIT(ablk_lrw_alg.cra_list),
735         .cra_init               = ablk_lrw_init,
736         .cra_exit               = ablk_exit,
737         .cra_u = {
738                 .ablkcipher = {
739                         .min_keysize    = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
740                         .max_keysize    = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
741                         .ivsize         = AES_BLOCK_SIZE,
742                         .setkey         = ablk_set_key,
743                         .encrypt        = ablk_encrypt,
744                         .decrypt        = ablk_decrypt,
745                 },
746         },
747 };
748 #endif
749
750 #ifdef HAS_PCBC
751 static int ablk_pcbc_init(struct crypto_tfm *tfm)
752 {
753         struct cryptd_ablkcipher *cryptd_tfm;
754
755         cryptd_tfm = cryptd_alloc_ablkcipher("fpu(pcbc(__driver-aes-aesni))",
756                                              0, 0);
757         if (IS_ERR(cryptd_tfm))
758                 return PTR_ERR(cryptd_tfm);
759         ablk_init_common(tfm, cryptd_tfm);
760         return 0;
761 }
762
763 static struct crypto_alg ablk_pcbc_alg = {
764         .cra_name               = "pcbc(aes)",
765         .cra_driver_name        = "pcbc-aes-aesni",
766         .cra_priority           = 400,
767         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
768         .cra_blocksize          = AES_BLOCK_SIZE,
769         .cra_ctxsize            = sizeof(struct async_aes_ctx),
770         .cra_alignmask          = 0,
771         .cra_type               = &crypto_ablkcipher_type,
772         .cra_module             = THIS_MODULE,
773         .cra_list               = LIST_HEAD_INIT(ablk_pcbc_alg.cra_list),
774         .cra_init               = ablk_pcbc_init,
775         .cra_exit               = ablk_exit,
776         .cra_u = {
777                 .ablkcipher = {
778                         .min_keysize    = AES_MIN_KEY_SIZE,
779                         .max_keysize    = AES_MAX_KEY_SIZE,
780                         .ivsize         = AES_BLOCK_SIZE,
781                         .setkey         = ablk_set_key,
782                         .encrypt        = ablk_encrypt,
783                         .decrypt        = ablk_decrypt,
784                 },
785         },
786 };
787 #endif
788
789 #ifdef HAS_XTS
790 static int ablk_xts_init(struct crypto_tfm *tfm)
791 {
792         struct cryptd_ablkcipher *cryptd_tfm;
793
794         cryptd_tfm = cryptd_alloc_ablkcipher("fpu(xts(__driver-aes-aesni))",
795                                              0, 0);
796         if (IS_ERR(cryptd_tfm))
797                 return PTR_ERR(cryptd_tfm);
798         ablk_init_common(tfm, cryptd_tfm);
799         return 0;
800 }
801
802 static struct crypto_alg ablk_xts_alg = {
803         .cra_name               = "xts(aes)",
804         .cra_driver_name        = "xts-aes-aesni",
805         .cra_priority           = 400,
806         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
807         .cra_blocksize          = AES_BLOCK_SIZE,
808         .cra_ctxsize            = sizeof(struct async_aes_ctx),
809         .cra_alignmask          = 0,
810         .cra_type               = &crypto_ablkcipher_type,
811         .cra_module             = THIS_MODULE,
812         .cra_list               = LIST_HEAD_INIT(ablk_xts_alg.cra_list),
813         .cra_init               = ablk_xts_init,
814         .cra_exit               = ablk_exit,
815         .cra_u = {
816                 .ablkcipher = {
817                         .min_keysize    = 2 * AES_MIN_KEY_SIZE,
818                         .max_keysize    = 2 * AES_MAX_KEY_SIZE,
819                         .ivsize         = AES_BLOCK_SIZE,
820                         .setkey         = ablk_set_key,
821                         .encrypt        = ablk_encrypt,
822                         .decrypt        = ablk_decrypt,
823                 },
824         },
825 };
826 #endif
827
828 #ifdef CONFIG_X86_64
829 static int rfc4106_init(struct crypto_tfm *tfm)
830 {
831         struct cryptd_aead *cryptd_tfm;
832         struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *)
833                 PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
834         struct crypto_aead *cryptd_child;
835         struct aesni_rfc4106_gcm_ctx *child_ctx;
836         cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni", 0, 0);
837         if (IS_ERR(cryptd_tfm))
838                 return PTR_ERR(cryptd_tfm);
839
840         cryptd_child = cryptd_aead_child(cryptd_tfm);
841         child_ctx = aesni_rfc4106_gcm_ctx_get(cryptd_child);
842         memcpy(child_ctx, ctx, sizeof(*ctx));
843         ctx->cryptd_tfm = cryptd_tfm;
844         tfm->crt_aead.reqsize = sizeof(struct aead_request)
845                 + crypto_aead_reqsize(&cryptd_tfm->base);
846         return 0;
847 }
848
849 static void rfc4106_exit(struct crypto_tfm *tfm)
850 {
851         struct aesni_rfc4106_gcm_ctx *ctx =
852                 (struct aesni_rfc4106_gcm_ctx *)
853                 PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
854         if (!IS_ERR(ctx->cryptd_tfm))
855                 cryptd_free_aead(ctx->cryptd_tfm);
856         return;
857 }
858
859 static void
860 rfc4106_set_hash_subkey_done(struct crypto_async_request *req, int err)
861 {
862         struct aesni_gcm_set_hash_subkey_result *result = req->data;
863
864         if (err == -EINPROGRESS)
865                 return;
866         result->err = err;
867         complete(&result->completion);
868 }
869
870 static int
871 rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
872 {
873         struct crypto_ablkcipher *ctr_tfm;
874         struct ablkcipher_request *req;
875         int ret = -EINVAL;
876         struct aesni_hash_subkey_req_data *req_data;
877
878         ctr_tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, 0);
879         if (IS_ERR(ctr_tfm))
880                 return PTR_ERR(ctr_tfm);
881
882         crypto_ablkcipher_clear_flags(ctr_tfm, ~0);
883
884         ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len);
885         if (ret)
886                 goto out_free_ablkcipher;
887
888         ret = -ENOMEM;
889         req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL);
890         if (!req)
891                 goto out_free_ablkcipher;
892
893         req_data = kmalloc(sizeof(*req_data), GFP_KERNEL);
894         if (!req_data)
895                 goto out_free_request;
896
897         memset(req_data->iv, 0, sizeof(req_data->iv));
898
899         /* Clear the data in the hash sub key container to zero.*/
900         /* We want to cipher all zeros to create the hash sub key. */
901         memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
902
903         init_completion(&req_data->result.completion);
904         sg_init_one(&req_data->sg, hash_subkey, RFC4106_HASH_SUBKEY_SIZE);
905         ablkcipher_request_set_tfm(req, ctr_tfm);
906         ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
907                                         CRYPTO_TFM_REQ_MAY_BACKLOG,
908                                         rfc4106_set_hash_subkey_done,
909                                         &req_data->result);
910
911         ablkcipher_request_set_crypt(req, &req_data->sg,
912                 &req_data->sg, RFC4106_HASH_SUBKEY_SIZE, req_data->iv);
913
914         ret = crypto_ablkcipher_encrypt(req);
915         if (ret == -EINPROGRESS || ret == -EBUSY) {
916                 ret = wait_for_completion_interruptible
917                         (&req_data->result.completion);
918                 if (!ret)
919                         ret = req_data->result.err;
920         }
921         kfree(req_data);
922 out_free_request:
923         ablkcipher_request_free(req);
924 out_free_ablkcipher:
925         crypto_free_ablkcipher(ctr_tfm);
926         return ret;
927 }
928
929 static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
930                                                    unsigned int key_len)
931 {
932         int ret = 0;
933         struct crypto_tfm *tfm = crypto_aead_tfm(parent);
934         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
935         struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
936         struct aesni_rfc4106_gcm_ctx *child_ctx =
937                                  aesni_rfc4106_gcm_ctx_get(cryptd_child);
938         u8 *new_key_mem = NULL;
939
940         if (key_len < 4) {
941                 crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
942                 return -EINVAL;
943         }
944         /*Account for 4 byte nonce at the end.*/
945         key_len -= 4;
946         if (key_len != AES_KEYSIZE_128) {
947                 crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
948                 return -EINVAL;
949         }
950
951         memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
952         /*This must be on a 16 byte boundary!*/
953         if ((unsigned long)(&(ctx->aes_key_expanded.key_enc[0])) % AESNI_ALIGN)
954                 return -EINVAL;
955
956         if ((unsigned long)key % AESNI_ALIGN) {
957                 /*key is not aligned: use an auxuliar aligned pointer*/
958                 new_key_mem = kmalloc(key_len+AESNI_ALIGN, GFP_KERNEL);
959                 if (!new_key_mem)
960                         return -ENOMEM;
961
962                 new_key_mem = PTR_ALIGN(new_key_mem, AESNI_ALIGN);
963                 memcpy(new_key_mem, key, key_len);
964                 key = new_key_mem;
965         }
966
967         if (!irq_fpu_usable())
968                 ret = crypto_aes_expand_key(&(ctx->aes_key_expanded),
969                 key, key_len);
970         else {
971                 kernel_fpu_begin();
972                 ret = aesni_set_key(&(ctx->aes_key_expanded), key, key_len);
973                 kernel_fpu_end();
974         }
975         /*This must be on a 16 byte boundary!*/
976         if ((unsigned long)(&(ctx->hash_subkey[0])) % AESNI_ALIGN) {
977                 ret = -EINVAL;
978                 goto exit;
979         }
980         ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
981         memcpy(child_ctx, ctx, sizeof(*ctx));
982 exit:
983         kfree(new_key_mem);
984         return ret;
985 }
986
987 /* This is the Integrity Check Value (aka the authentication tag length and can
988  * be 8, 12 or 16 bytes long. */
989 static int rfc4106_set_authsize(struct crypto_aead *parent,
990                                 unsigned int authsize)
991 {
992         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
993         struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
994
995         switch (authsize) {
996         case 8:
997         case 12:
998         case 16:
999                 break;
1000         default:
1001                 return -EINVAL;
1002         }
1003         crypto_aead_crt(parent)->authsize = authsize;
1004         crypto_aead_crt(cryptd_child)->authsize = authsize;
1005         return 0;
1006 }
1007
1008 static int rfc4106_encrypt(struct aead_request *req)
1009 {
1010         int ret;
1011         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1012         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1013
1014         if (!irq_fpu_usable()) {
1015                 struct aead_request *cryptd_req =
1016                         (struct aead_request *) aead_request_ctx(req);
1017                 memcpy(cryptd_req, req, sizeof(*req));
1018                 aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
1019                 return crypto_aead_encrypt(cryptd_req);
1020         } else {
1021                 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
1022                 kernel_fpu_begin();
1023                 ret = cryptd_child->base.crt_aead.encrypt(req);
1024                 kernel_fpu_end();
1025                 return ret;
1026         }
1027 }
1028
1029 static int rfc4106_decrypt(struct aead_request *req)
1030 {
1031         int ret;
1032         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1033         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1034
1035         if (!irq_fpu_usable()) {
1036                 struct aead_request *cryptd_req =
1037                         (struct aead_request *) aead_request_ctx(req);
1038                 memcpy(cryptd_req, req, sizeof(*req));
1039                 aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
1040                 return crypto_aead_decrypt(cryptd_req);
1041         } else {
1042                 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
1043                 kernel_fpu_begin();
1044                 ret = cryptd_child->base.crt_aead.decrypt(req);
1045                 kernel_fpu_end();
1046                 return ret;
1047         }
1048 }
1049
1050 static struct crypto_alg rfc4106_alg = {
1051         .cra_name = "rfc4106(gcm(aes))",
1052         .cra_driver_name = "rfc4106-gcm-aesni",
1053         .cra_priority = 400,
1054         .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1055         .cra_blocksize = 1,
1056         .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) + AESNI_ALIGN,
1057         .cra_alignmask = 0,
1058         .cra_type = &crypto_nivaead_type,
1059         .cra_module = THIS_MODULE,
1060         .cra_list = LIST_HEAD_INIT(rfc4106_alg.cra_list),
1061         .cra_init = rfc4106_init,
1062         .cra_exit = rfc4106_exit,
1063         .cra_u = {
1064                 .aead = {
1065                         .setkey = rfc4106_set_key,
1066                         .setauthsize = rfc4106_set_authsize,
1067                         .encrypt = rfc4106_encrypt,
1068                         .decrypt = rfc4106_decrypt,
1069                         .geniv = "seqiv",
1070                         .ivsize = 8,
1071                         .maxauthsize = 16,
1072                 },
1073         },
1074 };
1075
1076 static int __driver_rfc4106_encrypt(struct aead_request *req)
1077 {
1078         u8 one_entry_in_sg = 0;
1079         u8 *src, *dst, *assoc;
1080         __be32 counter = cpu_to_be32(1);
1081         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1082         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1083         void *aes_ctx = &(ctx->aes_key_expanded);
1084         unsigned long auth_tag_len = crypto_aead_authsize(tfm);
1085         u8 iv_tab[16+AESNI_ALIGN];
1086         u8* iv = (u8 *) PTR_ALIGN((u8 *)iv_tab, AESNI_ALIGN);
1087         struct scatter_walk src_sg_walk;
1088         struct scatter_walk assoc_sg_walk;
1089         struct scatter_walk dst_sg_walk;
1090         unsigned int i;
1091
1092         /* Assuming we are supporting rfc4106 64-bit extended */
1093         /* sequence numbers We need to have the AAD length equal */
1094         /* to 8 or 12 bytes */
1095         if (unlikely(req->assoclen != 8 && req->assoclen != 12))
1096                 return -EINVAL;
1097         /* IV below built */
1098         for (i = 0; i < 4; i++)
1099                 *(iv+i) = ctx->nonce[i];
1100         for (i = 0; i < 8; i++)
1101                 *(iv+4+i) = req->iv[i];
1102         *((__be32 *)(iv+12)) = counter;
1103
1104         if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
1105                 one_entry_in_sg = 1;
1106                 scatterwalk_start(&src_sg_walk, req->src);
1107                 scatterwalk_start(&assoc_sg_walk, req->assoc);
1108                 src = scatterwalk_map(&src_sg_walk, 0);
1109                 assoc = scatterwalk_map(&assoc_sg_walk, 0);
1110                 dst = src;
1111                 if (unlikely(req->src != req->dst)) {
1112                         scatterwalk_start(&dst_sg_walk, req->dst);
1113                         dst = scatterwalk_map(&dst_sg_walk, 0);
1114                 }
1115
1116         } else {
1117                 /* Allocate memory for src, dst, assoc */
1118                 src = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
1119                         GFP_ATOMIC);
1120                 if (unlikely(!src))
1121                         return -ENOMEM;
1122                 assoc = (src + req->cryptlen + auth_tag_len);
1123                 scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
1124                 scatterwalk_map_and_copy(assoc, req->assoc, 0,
1125                                         req->assoclen, 0);
1126                 dst = src;
1127         }
1128
1129         aesni_gcm_enc(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv,
1130                 ctx->hash_subkey, assoc, (unsigned long)req->assoclen, dst
1131                 + ((unsigned long)req->cryptlen), auth_tag_len);
1132
1133         /* The authTag (aka the Integrity Check Value) needs to be written
1134          * back to the packet. */
1135         if (one_entry_in_sg) {
1136                 if (unlikely(req->src != req->dst)) {
1137                         scatterwalk_unmap(dst, 0);
1138                         scatterwalk_done(&dst_sg_walk, 0, 0);
1139                 }
1140                 scatterwalk_unmap(src, 0);
1141                 scatterwalk_unmap(assoc, 0);
1142                 scatterwalk_done(&src_sg_walk, 0, 0);
1143                 scatterwalk_done(&assoc_sg_walk, 0, 0);
1144         } else {
1145                 scatterwalk_map_and_copy(dst, req->dst, 0,
1146                         req->cryptlen + auth_tag_len, 1);
1147                 kfree(src);
1148         }
1149         return 0;
1150 }
1151
1152 static int __driver_rfc4106_decrypt(struct aead_request *req)
1153 {
1154         u8 one_entry_in_sg = 0;
1155         u8 *src, *dst, *assoc;
1156         unsigned long tempCipherLen = 0;
1157         __be32 counter = cpu_to_be32(1);
1158         int retval = 0;
1159         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1160         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1161         void *aes_ctx = &(ctx->aes_key_expanded);
1162         unsigned long auth_tag_len = crypto_aead_authsize(tfm);
1163         u8 iv_and_authTag[32+AESNI_ALIGN];
1164         u8 *iv = (u8 *) PTR_ALIGN((u8 *)iv_and_authTag, AESNI_ALIGN);
1165         u8 *authTag = iv + 16;
1166         struct scatter_walk src_sg_walk;
1167         struct scatter_walk assoc_sg_walk;
1168         struct scatter_walk dst_sg_walk;
1169         unsigned int i;
1170
1171         if (unlikely((req->cryptlen < auth_tag_len) ||
1172                 (req->assoclen != 8 && req->assoclen != 12)))
1173                 return -EINVAL;
1174         /* Assuming we are supporting rfc4106 64-bit extended */
1175         /* sequence numbers We need to have the AAD length */
1176         /* equal to 8 or 12 bytes */
1177
1178         tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
1179         /* IV below built */
1180         for (i = 0; i < 4; i++)
1181                 *(iv+i) = ctx->nonce[i];
1182         for (i = 0; i < 8; i++)
1183                 *(iv+4+i) = req->iv[i];
1184         *((__be32 *)(iv+12)) = counter;
1185
1186         if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
1187                 one_entry_in_sg = 1;
1188                 scatterwalk_start(&src_sg_walk, req->src);
1189                 scatterwalk_start(&assoc_sg_walk, req->assoc);
1190                 src = scatterwalk_map(&src_sg_walk, 0);
1191                 assoc = scatterwalk_map(&assoc_sg_walk, 0);
1192                 dst = src;
1193                 if (unlikely(req->src != req->dst)) {
1194                         scatterwalk_start(&dst_sg_walk, req->dst);
1195                         dst = scatterwalk_map(&dst_sg_walk, 0);
1196                 }
1197
1198         } else {
1199                 /* Allocate memory for src, dst, assoc */
1200                 src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
1201                 if (!src)
1202                         return -ENOMEM;
1203                 assoc = (src + req->cryptlen + auth_tag_len);
1204                 scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
1205                 scatterwalk_map_and_copy(assoc, req->assoc, 0,
1206                         req->assoclen, 0);
1207                 dst = src;
1208         }
1209
1210         aesni_gcm_dec(aes_ctx, dst, src, tempCipherLen, iv,
1211                 ctx->hash_subkey, assoc, (unsigned long)req->assoclen,
1212                 authTag, auth_tag_len);
1213
1214         /* Compare generated tag with passed in tag. */
1215         retval = memcmp(src + tempCipherLen, authTag, auth_tag_len) ?
1216                 -EBADMSG : 0;
1217
1218         if (one_entry_in_sg) {
1219                 if (unlikely(req->src != req->dst)) {
1220                         scatterwalk_unmap(dst, 0);
1221                         scatterwalk_done(&dst_sg_walk, 0, 0);
1222                 }
1223                 scatterwalk_unmap(src, 0);
1224                 scatterwalk_unmap(assoc, 0);
1225                 scatterwalk_done(&src_sg_walk, 0, 0);
1226                 scatterwalk_done(&assoc_sg_walk, 0, 0);
1227         } else {
1228                 scatterwalk_map_and_copy(dst, req->dst, 0, req->cryptlen, 1);
1229                 kfree(src);
1230         }
1231         return retval;
1232 }
1233
1234 static struct crypto_alg __rfc4106_alg = {
1235         .cra_name               = "__gcm-aes-aesni",
1236         .cra_driver_name        = "__driver-gcm-aes-aesni",
1237         .cra_priority           = 0,
1238         .cra_flags              = CRYPTO_ALG_TYPE_AEAD,
1239         .cra_blocksize          = 1,
1240         .cra_ctxsize    = sizeof(struct aesni_rfc4106_gcm_ctx) + AESNI_ALIGN,
1241         .cra_alignmask          = 0,
1242         .cra_type               = &crypto_aead_type,
1243         .cra_module             = THIS_MODULE,
1244         .cra_list               = LIST_HEAD_INIT(__rfc4106_alg.cra_list),
1245         .cra_u = {
1246                 .aead = {
1247                         .encrypt        = __driver_rfc4106_encrypt,
1248                         .decrypt        = __driver_rfc4106_decrypt,
1249                 },
1250         },
1251 };
1252 #endif
1253
1254 static int __init aesni_init(void)
1255 {
1256         int err;
1257
1258         if (!cpu_has_aes) {
1259                 printk(KERN_INFO "Intel AES-NI instructions are not detected.\n");
1260                 return -ENODEV;
1261         }
1262
1263         if ((err = crypto_fpu_init()))
1264                 goto fpu_err;
1265         if ((err = crypto_register_alg(&aesni_alg)))
1266                 goto aes_err;
1267         if ((err = crypto_register_alg(&__aesni_alg)))
1268                 goto __aes_err;
1269         if ((err = crypto_register_alg(&blk_ecb_alg)))
1270                 goto blk_ecb_err;
1271         if ((err = crypto_register_alg(&blk_cbc_alg)))
1272                 goto blk_cbc_err;
1273         if ((err = crypto_register_alg(&ablk_ecb_alg)))
1274                 goto ablk_ecb_err;
1275         if ((err = crypto_register_alg(&ablk_cbc_alg)))
1276                 goto ablk_cbc_err;
1277 #ifdef CONFIG_X86_64
1278         if ((err = crypto_register_alg(&blk_ctr_alg)))
1279                 goto blk_ctr_err;
1280         if ((err = crypto_register_alg(&ablk_ctr_alg)))
1281                 goto ablk_ctr_err;
1282         if ((err = crypto_register_alg(&__rfc4106_alg)))
1283                 goto __aead_gcm_err;
1284         if ((err = crypto_register_alg(&rfc4106_alg)))
1285                 goto aead_gcm_err;
1286 #ifdef HAS_CTR
1287         if ((err = crypto_register_alg(&ablk_rfc3686_ctr_alg)))
1288                 goto ablk_rfc3686_ctr_err;
1289 #endif
1290 #endif
1291 #ifdef HAS_LRW
1292         if ((err = crypto_register_alg(&ablk_lrw_alg)))
1293                 goto ablk_lrw_err;
1294 #endif
1295 #ifdef HAS_PCBC
1296         if ((err = crypto_register_alg(&ablk_pcbc_alg)))
1297                 goto ablk_pcbc_err;
1298 #endif
1299 #ifdef HAS_XTS
1300         if ((err = crypto_register_alg(&ablk_xts_alg)))
1301                 goto ablk_xts_err;
1302 #endif
1303         return err;
1304
1305 #ifdef HAS_XTS
1306 ablk_xts_err:
1307 #endif
1308 #ifdef HAS_PCBC
1309         crypto_unregister_alg(&ablk_pcbc_alg);
1310 ablk_pcbc_err:
1311 #endif
1312 #ifdef HAS_LRW
1313         crypto_unregister_alg(&ablk_lrw_alg);
1314 ablk_lrw_err:
1315 #endif
1316 #ifdef CONFIG_X86_64
1317 #ifdef HAS_CTR
1318         crypto_unregister_alg(&ablk_rfc3686_ctr_alg);
1319 ablk_rfc3686_ctr_err:
1320 #endif
1321         crypto_unregister_alg(&rfc4106_alg);
1322 aead_gcm_err:
1323         crypto_unregister_alg(&__rfc4106_alg);
1324 __aead_gcm_err:
1325         crypto_unregister_alg(&ablk_ctr_alg);
1326 ablk_ctr_err:
1327         crypto_unregister_alg(&blk_ctr_alg);
1328 blk_ctr_err:
1329 #endif
1330         crypto_unregister_alg(&ablk_cbc_alg);
1331 ablk_cbc_err:
1332         crypto_unregister_alg(&ablk_ecb_alg);
1333 ablk_ecb_err:
1334         crypto_unregister_alg(&blk_cbc_alg);
1335 blk_cbc_err:
1336         crypto_unregister_alg(&blk_ecb_alg);
1337 blk_ecb_err:
1338         crypto_unregister_alg(&__aesni_alg);
1339 __aes_err:
1340         crypto_unregister_alg(&aesni_alg);
1341 aes_err:
1342 fpu_err:
1343         return err;
1344 }
1345
1346 static void __exit aesni_exit(void)
1347 {
1348 #ifdef HAS_XTS
1349         crypto_unregister_alg(&ablk_xts_alg);
1350 #endif
1351 #ifdef HAS_PCBC
1352         crypto_unregister_alg(&ablk_pcbc_alg);
1353 #endif
1354 #ifdef HAS_LRW
1355         crypto_unregister_alg(&ablk_lrw_alg);
1356 #endif
1357 #ifdef CONFIG_X86_64
1358 #ifdef HAS_CTR
1359         crypto_unregister_alg(&ablk_rfc3686_ctr_alg);
1360 #endif
1361         crypto_unregister_alg(&rfc4106_alg);
1362         crypto_unregister_alg(&__rfc4106_alg);
1363         crypto_unregister_alg(&ablk_ctr_alg);
1364         crypto_unregister_alg(&blk_ctr_alg);
1365 #endif
1366         crypto_unregister_alg(&ablk_cbc_alg);
1367         crypto_unregister_alg(&ablk_ecb_alg);
1368         crypto_unregister_alg(&blk_cbc_alg);
1369         crypto_unregister_alg(&blk_ecb_alg);
1370         crypto_unregister_alg(&__aesni_alg);
1371         crypto_unregister_alg(&aesni_alg);
1372
1373         crypto_fpu_exit();
1374 }
1375
1376 module_init(aesni_init);
1377 module_exit(aesni_exit);
1378
1379 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1380 MODULE_LICENSE("GPL");
1381 MODULE_ALIAS("aes");