crypto: crc32 - add crc32 pclmulqdq implementation and wrappers for table implementation
[linux-3.10.git] / crypto / cryptd.c
index b150de5..7bdd61b 100644 (file)
@@ -3,6 +3,13 @@
  *
  * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
  *
+ * Added AEAD support to cryptd.
+ *    Authors: Tadeusz Struk (tadeusz.struk@intel.com)
+ *             Adrian Hoban <adrian.hoban@intel.com>
+ *             Gabriele Paoloni <gabriele.paoloni@intel.com>
+ *             Aidan O'Mahony (aidan.o.mahony@intel.com)
+ *    Copyright (c) 2010, Intel Corporation.
+ *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
  * Software Foundation; either version 2 of the License, or (at your option)
  */
 
 #include <crypto/algapi.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/aead.h>
+#include <crypto/cryptd.h>
+#include <crypto/crypto_wq.h>
 #include <linux/err.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
-#include <linux/kthread.h>
 #include <linux/list.h>
 #include <linux/module.h>
-#include <linux/mutex.h>
 #include <linux/scatterlist.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
-#include <linux/spinlock.h>
 
-#define CRYPTD_MAX_QLEN 100
+#define CRYPTD_MAX_CPU_QLEN 100
 
-struct cryptd_state {
-       spinlock_t lock;
-       struct mutex mutex;
+struct cryptd_cpu_queue {
        struct crypto_queue queue;
-       struct task_struct *task;
+       struct work_struct work;
+};
+
+struct cryptd_queue {
+       struct cryptd_cpu_queue __percpu *cpu_queue;
 };
 
 struct cryptd_instance_ctx {
        struct crypto_spawn spawn;
-       struct cryptd_state *state;
+       struct cryptd_queue *queue;
+};
+
+struct hashd_instance_ctx {
+       struct crypto_shash_spawn spawn;
+       struct cryptd_queue *queue;
+};
+
+struct aead_instance_ctx {
+       struct crypto_aead_spawn aead_spawn;
+       struct cryptd_queue *queue;
 };
 
 struct cryptd_blkcipher_ctx {
@@ -45,12 +65,107 @@ struct cryptd_blkcipher_request_ctx {
        crypto_completion_t complete;
 };
 
+struct cryptd_hash_ctx {
+       struct crypto_shash *child;
+};
+
+struct cryptd_hash_request_ctx {
+       crypto_completion_t complete;
+       struct shash_desc desc;
+};
+
+struct cryptd_aead_ctx {
+       struct crypto_aead *child;
+};
+
+struct cryptd_aead_request_ctx {
+       crypto_completion_t complete;
+};
+
+static void cryptd_queue_worker(struct work_struct *work);
+
+static int cryptd_init_queue(struct cryptd_queue *queue,
+                            unsigned int max_cpu_qlen)
+{
+       int cpu;
+       struct cryptd_cpu_queue *cpu_queue;
+
+       queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
+       if (!queue->cpu_queue)
+               return -ENOMEM;
+       for_each_possible_cpu(cpu) {
+               cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
+               crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
+               INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
+       }
+       return 0;
+}
+
+static void cryptd_fini_queue(struct cryptd_queue *queue)
+{
+       int cpu;
+       struct cryptd_cpu_queue *cpu_queue;
+
+       for_each_possible_cpu(cpu) {
+               cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
+               BUG_ON(cpu_queue->queue.qlen);
+       }
+       free_percpu(queue->cpu_queue);
+}
+
+static int cryptd_enqueue_request(struct cryptd_queue *queue,
+                                 struct crypto_async_request *request)
+{
+       int cpu, err;
+       struct cryptd_cpu_queue *cpu_queue;
+
+       cpu = get_cpu();
+       cpu_queue = this_cpu_ptr(queue->cpu_queue);
+       err = crypto_enqueue_request(&cpu_queue->queue, request);
+       queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
+       put_cpu();
+
+       return err;
+}
+
+/* Called in workqueue context, do one real cryption work (via
+ * req->complete) and reschedule itself if there are more work to
+ * do. */
+static void cryptd_queue_worker(struct work_struct *work)
+{
+       struct cryptd_cpu_queue *cpu_queue;
+       struct crypto_async_request *req, *backlog;
+
+       cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
+       /*
+        * Only handle one request at a time to avoid hogging crypto workqueue.
+        * preempt_disable/enable is used to prevent being preempted by
+        * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent
+        * cryptd_enqueue_request() being accessed from software interrupts.
+        */
+       local_bh_disable();
+       preempt_disable();
+       backlog = crypto_get_backlog(&cpu_queue->queue);
+       req = crypto_dequeue_request(&cpu_queue->queue);
+       preempt_enable();
+       local_bh_enable();
+
+       if (!req)
+               return;
+
+       if (backlog)
+               backlog->complete(backlog, -EINPROGRESS);
+       req->complete(req, 0);
+
+       if (cpu_queue->queue.qlen)
+               queue_work(kcrypto_wq, &cpu_queue->work);
+}
 
-static inline struct cryptd_state *cryptd_get_state(struct crypto_tfm *tfm)
+static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
 {
        struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
        struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
-       return ictx->state;
+       return ictx->queue;
 }
 
 static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent,
@@ -82,10 +197,8 @@ static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
 
        rctx = ablkcipher_request_ctx(req);
 
-       if (unlikely(err == -EINPROGRESS)) {
-               rctx->complete(&req->base, err);
-               return;
-       }
+       if (unlikely(err == -EINPROGRESS))
+               goto out;
 
        desc.tfm = child;
        desc.info = req->info;
@@ -95,8 +208,9 @@ static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
 
        req->base.complete = rctx->complete;
 
+out:
        local_bh_disable();
-       req->base.complete(&req->base, err);
+       rctx->complete(&req->base, err);
        local_bh_enable();
 }
 
@@ -123,19 +237,13 @@ static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req,
 {
        struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req);
        struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
-       struct cryptd_state *state =
-               cryptd_get_state(crypto_ablkcipher_tfm(tfm));
-       int err;
+       struct cryptd_queue *queue;
 
+       queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm));
        rctx->complete = req->base.complete;
        req->base.complete = complete;
 
-       spin_lock_bh(&state->lock);
-       err = ablkcipher_enqueue_request(&state->queue, req);
-       spin_unlock_bh(&state->lock);
-
-       wake_up_process(state->task);
-       return err;
+       return cryptd_enqueue_request(queue, &req->base);
 }
 
 static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req)
@@ -169,45 +277,28 @@ static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm)
 static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm)
 {
        struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
-       struct cryptd_state *state = cryptd_get_state(tfm);
-       int active;
-
-       mutex_lock(&state->mutex);
-       active = ablkcipher_tfm_in_queue(&state->queue,
-                                        __crypto_ablkcipher_cast(tfm));
-       mutex_unlock(&state->mutex);
-
-       BUG_ON(active);
 
        crypto_free_blkcipher(ctx->child);
 }
 
-static struct crypto_instance *cryptd_alloc_instance(struct crypto_alg *alg,
-                                                    struct cryptd_state *state)
+static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
+                                  unsigned int tail)
 {
+       char *p;
        struct crypto_instance *inst;
-       struct cryptd_instance_ctx *ctx;
        int err;
 
-       inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
-       if (!inst) {
-               inst = ERR_PTR(-ENOMEM);
-               goto out;
-       }
+       p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL);
+       if (!p)
+               return ERR_PTR(-ENOMEM);
+
+       inst = (void *)(p + head);
 
        err = -ENAMETOOLONG;
        if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
                     "cryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
                goto out_free_inst;
 
-       ctx = crypto_instance_ctx(inst);
-       err = crypto_init_spawn(&ctx->spawn, alg, inst,
-                               CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
-       if (err)
-               goto out_free_inst;
-
-       ctx->state = state;
-
        memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
 
        inst->alg.cra_priority = alg->cra_priority + 50;
@@ -215,29 +306,41 @@ static struct crypto_instance *cryptd_alloc_instance(struct crypto_alg *alg,
        inst->alg.cra_alignmask = alg->cra_alignmask;
 
 out:
-       return inst;
+       return p;
 
 out_free_inst:
-       kfree(inst);
-       inst = ERR_PTR(err);
+       kfree(p);
+       p = ERR_PTR(err);
        goto out;
 }
 
-static struct crypto_instance *cryptd_alloc_blkcipher(
-       struct rtattr **tb, struct cryptd_state *state)
+static int cryptd_create_blkcipher(struct crypto_template *tmpl,
+                                  struct rtattr **tb,
+                                  struct cryptd_queue *queue)
 {
+       struct cryptd_instance_ctx *ctx;
        struct crypto_instance *inst;
        struct crypto_alg *alg;
+       int err;
 
        alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER,
                                  CRYPTO_ALG_TYPE_MASK);
        if (IS_ERR(alg))
-               return ERR_CAST(alg);
+               return PTR_ERR(alg);
 
-       inst = cryptd_alloc_instance(alg, state);
+       inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx));
+       err = PTR_ERR(inst);
        if (IS_ERR(inst))
                goto out_put_alg;
 
+       ctx = crypto_instance_ctx(inst);
+       ctx->queue = queue;
+
+       err = crypto_init_spawn(&ctx->spawn, alg, inst,
+                               CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
+       if (err)
+               goto out_free_inst;
+
        inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
        inst->alg.cra_type = &crypto_ablkcipher_type;
 
@@ -256,124 +359,598 @@ static struct crypto_instance *cryptd_alloc_blkcipher(
        inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue;
        inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue;
 
+       err = crypto_register_instance(tmpl, inst);
+       if (err) {
+               crypto_drop_spawn(&ctx->spawn);
+out_free_inst:
+               kfree(inst);
+       }
+
 out_put_alg:
        crypto_mod_put(alg);
-       return inst;
+       return err;
+}
+
+static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
+{
+       struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
+       struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
+       struct crypto_shash_spawn *spawn = &ictx->spawn;
+       struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct crypto_shash *hash;
+
+       hash = crypto_spawn_shash(spawn);
+       if (IS_ERR(hash))
+               return PTR_ERR(hash);
+
+       ctx->child = hash;
+       crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+                                sizeof(struct cryptd_hash_request_ctx) +
+                                crypto_shash_descsize(hash));
+       return 0;
+}
+
+static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
+{
+       struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+
+       crypto_free_shash(ctx->child);
+}
+
+static int cryptd_hash_setkey(struct crypto_ahash *parent,
+                                  const u8 *key, unsigned int keylen)
+{
+       struct cryptd_hash_ctx *ctx   = crypto_ahash_ctx(parent);
+       struct crypto_shash *child = ctx->child;
+       int err;
+
+       crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
+       crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
+                                     CRYPTO_TFM_REQ_MASK);
+       err = crypto_shash_setkey(child, key, keylen);
+       crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) &
+                                      CRYPTO_TFM_RES_MASK);
+       return err;
+}
+
+static int cryptd_hash_enqueue(struct ahash_request *req,
+                               crypto_completion_t complete)
+{
+       struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
+       struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+       struct cryptd_queue *queue =
+               cryptd_get_queue(crypto_ahash_tfm(tfm));
+
+       rctx->complete = req->base.complete;
+       req->base.complete = complete;
+
+       return cryptd_enqueue_request(queue, &req->base);
+}
+
+static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
+{
+       struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
+       struct crypto_shash *child = ctx->child;
+       struct ahash_request *req = ahash_request_cast(req_async);
+       struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
+       struct shash_desc *desc = &rctx->desc;
+
+       if (unlikely(err == -EINPROGRESS))
+               goto out;
+
+       desc->tfm = child;
+       desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+
+       err = crypto_shash_init(desc);
+
+       req->base.complete = rctx->complete;
+
+out:
+       local_bh_disable();
+       rctx->complete(&req->base, err);
+       local_bh_enable();
 }
 
-static struct cryptd_state state;
+static int cryptd_hash_init_enqueue(struct ahash_request *req)
+{
+       return cryptd_hash_enqueue(req, cryptd_hash_init);
+}
 
-static struct crypto_instance *cryptd_alloc(struct rtattr **tb)
+static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
+{
+       struct ahash_request *req = ahash_request_cast(req_async);
+       struct cryptd_hash_request_ctx *rctx;
+
+       rctx = ahash_request_ctx(req);
+
+       if (unlikely(err == -EINPROGRESS))
+               goto out;
+
+       err = shash_ahash_update(req, &rctx->desc);
+
+       req->base.complete = rctx->complete;
+
+out:
+       local_bh_disable();
+       rctx->complete(&req->base, err);
+       local_bh_enable();
+}
+
+static int cryptd_hash_update_enqueue(struct ahash_request *req)
+{
+       return cryptd_hash_enqueue(req, cryptd_hash_update);
+}
+
+static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
+{
+       struct ahash_request *req = ahash_request_cast(req_async);
+       struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
+
+       if (unlikely(err == -EINPROGRESS))
+               goto out;
+
+       err = crypto_shash_final(&rctx->desc, req->result);
+
+       req->base.complete = rctx->complete;
+
+out:
+       local_bh_disable();
+       rctx->complete(&req->base, err);
+       local_bh_enable();
+}
+
+static int cryptd_hash_final_enqueue(struct ahash_request *req)
+{
+       return cryptd_hash_enqueue(req, cryptd_hash_final);
+}
+
+static void cryptd_hash_finup(struct crypto_async_request *req_async, int err)
+{
+       struct ahash_request *req = ahash_request_cast(req_async);
+       struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
+
+       if (unlikely(err == -EINPROGRESS))
+               goto out;
+
+       err = shash_ahash_finup(req, &rctx->desc);
+
+       req->base.complete = rctx->complete;
+
+out:
+       local_bh_disable();
+       rctx->complete(&req->base, err);
+       local_bh_enable();
+}
+
+static int cryptd_hash_finup_enqueue(struct ahash_request *req)
+{
+       return cryptd_hash_enqueue(req, cryptd_hash_finup);
+}
+
+static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
+{
+       struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
+       struct crypto_shash *child = ctx->child;
+       struct ahash_request *req = ahash_request_cast(req_async);
+       struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
+       struct shash_desc *desc = &rctx->desc;
+
+       if (unlikely(err == -EINPROGRESS))
+               goto out;
+
+       desc->tfm = child;
+       desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+
+       err = shash_ahash_digest(req, desc);
+
+       req->base.complete = rctx->complete;
+
+out:
+       local_bh_disable();
+       rctx->complete(&req->base, err);
+       local_bh_enable();
+}
+
+static int cryptd_hash_digest_enqueue(struct ahash_request *req)
+{
+       return cryptd_hash_enqueue(req, cryptd_hash_digest);
+}
+
+static int cryptd_hash_export(struct ahash_request *req, void *out)
+{
+       struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
+
+       return crypto_shash_export(&rctx->desc, out);
+}
+
+static int cryptd_hash_import(struct ahash_request *req, const void *in)
+{
+       struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
+
+       return crypto_shash_import(&rctx->desc, in);
+}
+
+static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
+                             struct cryptd_queue *queue)
+{
+       struct hashd_instance_ctx *ctx;
+       struct ahash_instance *inst;
+       struct shash_alg *salg;
+       struct crypto_alg *alg;
+       int err;
+
+       salg = shash_attr_alg(tb[1], 0, 0);
+       if (IS_ERR(salg))
+               return PTR_ERR(salg);
+
+       alg = &salg->base;
+       inst = cryptd_alloc_instance(alg, ahash_instance_headroom(),
+                                    sizeof(*ctx));
+       err = PTR_ERR(inst);
+       if (IS_ERR(inst))
+               goto out_put_alg;
+
+       ctx = ahash_instance_ctx(inst);
+       ctx->queue = queue;
+
+       err = crypto_init_shash_spawn(&ctx->spawn, salg,
+                                     ahash_crypto_instance(inst));
+       if (err)
+               goto out_free_inst;
+
+       inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC;
+
+       inst->alg.halg.digestsize = salg->digestsize;
+       inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
+
+       inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
+       inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm;
+
+       inst->alg.init   = cryptd_hash_init_enqueue;
+       inst->alg.update = cryptd_hash_update_enqueue;
+       inst->alg.final  = cryptd_hash_final_enqueue;
+       inst->alg.finup  = cryptd_hash_finup_enqueue;
+       inst->alg.export = cryptd_hash_export;
+       inst->alg.import = cryptd_hash_import;
+       inst->alg.setkey = cryptd_hash_setkey;
+       inst->alg.digest = cryptd_hash_digest_enqueue;
+
+       err = ahash_register_instance(tmpl, inst);
+       if (err) {
+               crypto_drop_shash(&ctx->spawn);
+out_free_inst:
+               kfree(inst);
+       }
+
+out_put_alg:
+       crypto_mod_put(alg);
+       return err;
+}
+
+static void cryptd_aead_crypt(struct aead_request *req,
+                       struct crypto_aead *child,
+                       int err,
+                       int (*crypt)(struct aead_request *req))
+{
+       struct cryptd_aead_request_ctx *rctx;
+       rctx = aead_request_ctx(req);
+
+       if (unlikely(err == -EINPROGRESS))
+               goto out;
+       aead_request_set_tfm(req, child);
+       err = crypt( req );
+       req->base.complete = rctx->complete;
+out:
+       local_bh_disable();
+       rctx->complete(&req->base, err);
+       local_bh_enable();
+}
+
+static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err)
+{
+       struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
+       struct crypto_aead *child = ctx->child;
+       struct aead_request *req;
+
+       req = container_of(areq, struct aead_request, base);
+       cryptd_aead_crypt(req, child, err, crypto_aead_crt(child)->encrypt);
+}
+
+static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err)
+{
+       struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
+       struct crypto_aead *child = ctx->child;
+       struct aead_request *req;
+
+       req = container_of(areq, struct aead_request, base);
+       cryptd_aead_crypt(req, child, err, crypto_aead_crt(child)->decrypt);
+}
+
+static int cryptd_aead_enqueue(struct aead_request *req,
+                                   crypto_completion_t complete)
+{
+       struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req);
+       struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+       struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm));
+
+       rctx->complete = req->base.complete;
+       req->base.complete = complete;
+       return cryptd_enqueue_request(queue, &req->base);
+}
+
+static int cryptd_aead_encrypt_enqueue(struct aead_request *req)
+{
+       return cryptd_aead_enqueue(req, cryptd_aead_encrypt );
+}
+
+static int cryptd_aead_decrypt_enqueue(struct aead_request *req)
+{
+       return cryptd_aead_enqueue(req, cryptd_aead_decrypt );
+}
+
+static int cryptd_aead_init_tfm(struct crypto_tfm *tfm)
+{
+       struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
+       struct aead_instance_ctx *ictx = crypto_instance_ctx(inst);
+       struct crypto_aead_spawn *spawn = &ictx->aead_spawn;
+       struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct crypto_aead *cipher;
+
+       cipher = crypto_spawn_aead(spawn);
+       if (IS_ERR(cipher))
+               return PTR_ERR(cipher);
+
+       crypto_aead_set_flags(cipher, CRYPTO_TFM_REQ_MAY_SLEEP);
+       ctx->child = cipher;
+       tfm->crt_aead.reqsize = sizeof(struct cryptd_aead_request_ctx);
+       return 0;
+}
+
+static void cryptd_aead_exit_tfm(struct crypto_tfm *tfm)
+{
+       struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(tfm);
+       crypto_free_aead(ctx->child);
+}
+
+static int cryptd_create_aead(struct crypto_template *tmpl,
+                             struct rtattr **tb,
+                             struct cryptd_queue *queue)
+{
+       struct aead_instance_ctx *ctx;
+       struct crypto_instance *inst;
+       struct crypto_alg *alg;
+       int err;
+
+       alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_AEAD,
+                               CRYPTO_ALG_TYPE_MASK);
+        if (IS_ERR(alg))
+               return PTR_ERR(alg);
+
+       inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx));
+       err = PTR_ERR(inst);
+       if (IS_ERR(inst))
+               goto out_put_alg;
+
+       ctx = crypto_instance_ctx(inst);
+       ctx->queue = queue;
+
+       err = crypto_init_spawn(&ctx->aead_spawn.base, alg, inst,
+                       CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
+       if (err)
+               goto out_free_inst;
+
+       inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
+       inst->alg.cra_type = alg->cra_type;
+       inst->alg.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
+       inst->alg.cra_init = cryptd_aead_init_tfm;
+       inst->alg.cra_exit = cryptd_aead_exit_tfm;
+       inst->alg.cra_aead.setkey      = alg->cra_aead.setkey;
+       inst->alg.cra_aead.setauthsize = alg->cra_aead.setauthsize;
+       inst->alg.cra_aead.geniv       = alg->cra_aead.geniv;
+       inst->alg.cra_aead.ivsize      = alg->cra_aead.ivsize;
+       inst->alg.cra_aead.maxauthsize = alg->cra_aead.maxauthsize;
+       inst->alg.cra_aead.encrypt     = cryptd_aead_encrypt_enqueue;
+       inst->alg.cra_aead.decrypt     = cryptd_aead_decrypt_enqueue;
+       inst->alg.cra_aead.givencrypt  = alg->cra_aead.givencrypt;
+       inst->alg.cra_aead.givdecrypt  = alg->cra_aead.givdecrypt;
+
+       err = crypto_register_instance(tmpl, inst);
+       if (err) {
+               crypto_drop_spawn(&ctx->aead_spawn.base);
+out_free_inst:
+               kfree(inst);
+       }
+out_put_alg:
+       crypto_mod_put(alg);
+       return err;
+}
+
+static struct cryptd_queue queue;
+
+static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
 {
        struct crypto_attr_type *algt;
 
        algt = crypto_get_attr_type(tb);
        if (IS_ERR(algt))
-               return ERR_CAST(algt);
+               return PTR_ERR(algt);
 
        switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
        case CRYPTO_ALG_TYPE_BLKCIPHER:
-               return cryptd_alloc_blkcipher(tb, &state);
+               return cryptd_create_blkcipher(tmpl, tb, &queue);
+       case CRYPTO_ALG_TYPE_DIGEST:
+               return cryptd_create_hash(tmpl, tb, &queue);
+       case CRYPTO_ALG_TYPE_AEAD:
+               return cryptd_create_aead(tmpl, tb, &queue);
        }
 
-       return ERR_PTR(-EINVAL);
+       return -EINVAL;
 }
 
 static void cryptd_free(struct crypto_instance *inst)
 {
        struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
+       struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
+       struct aead_instance_ctx *aead_ctx = crypto_instance_ctx(inst);
 
-       crypto_drop_spawn(&ctx->spawn);
-       kfree(inst);
+       switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
+       case CRYPTO_ALG_TYPE_AHASH:
+               crypto_drop_shash(&hctx->spawn);
+               kfree(ahash_instance(inst));
+               return;
+       case CRYPTO_ALG_TYPE_AEAD:
+               crypto_drop_spawn(&aead_ctx->aead_spawn.base);
+               kfree(inst);
+               return;
+       default:
+               crypto_drop_spawn(&ctx->spawn);
+               kfree(inst);
+       }
 }
 
 static struct crypto_template cryptd_tmpl = {
        .name = "cryptd",
-       .alloc = cryptd_alloc,
+       .create = cryptd_create,
        .free = cryptd_free,
        .module = THIS_MODULE,
 };
 
-static inline int cryptd_create_thread(struct cryptd_state *state,
-                                      int (*fn)(void *data), const char *name)
+struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
+                                                 u32 type, u32 mask)
 {
-       spin_lock_init(&state->lock);
-       mutex_init(&state->mutex);
-       crypto_init_queue(&state->queue, CRYPTD_MAX_QLEN);
-
-       state->task = kthread_run(fn, state, name);
-       if (IS_ERR(state->task))
-               return PTR_ERR(state->task);
+       char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
+       struct crypto_tfm *tfm;
+
+       if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
+                    "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
+               return ERR_PTR(-EINVAL);
+       type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
+       type |= CRYPTO_ALG_TYPE_BLKCIPHER;
+       mask &= ~CRYPTO_ALG_TYPE_MASK;
+       mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK);
+       tfm = crypto_alloc_base(cryptd_alg_name, type, mask);
+       if (IS_ERR(tfm))
+               return ERR_CAST(tfm);
+       if (tfm->__crt_alg->cra_module != THIS_MODULE) {
+               crypto_free_tfm(tfm);
+               return ERR_PTR(-EINVAL);
+       }
 
-       return 0;
+       return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm));
 }
+EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher);
 
-static inline void cryptd_stop_thread(struct cryptd_state *state)
+struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm)
 {
-       BUG_ON(state->queue.qlen);
-       kthread_stop(state->task);
+       struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
+       return ctx->child;
 }
+EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child);
 
-static int cryptd_thread(void *data)
+void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm)
 {
-       struct cryptd_state *state = data;
-       int stop;
+       crypto_free_ablkcipher(&tfm->base);
+}
+EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher);
 
-       current->flags |= PF_NOFREEZE;
+struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
+                                       u32 type, u32 mask)
+{
+       char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
+       struct crypto_ahash *tfm;
+
+       if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
+                    "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
+               return ERR_PTR(-EINVAL);
+       tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask);
+       if (IS_ERR(tfm))
+               return ERR_CAST(tfm);
+       if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
+               crypto_free_ahash(tfm);
+               return ERR_PTR(-EINVAL);
+       }
 
-       do {
-               struct crypto_async_request *req, *backlog;
+       return __cryptd_ahash_cast(tfm);
+}
+EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
 
-               mutex_lock(&state->mutex);
-               __set_current_state(TASK_INTERRUPTIBLE);
+struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm)
+{
+       struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
 
-               spin_lock_bh(&state->lock);
-               backlog = crypto_get_backlog(&state->queue);
-               req = crypto_dequeue_request(&state->queue);
-               spin_unlock_bh(&state->lock);
+       return ctx->child;
+}
+EXPORT_SYMBOL_GPL(cryptd_ahash_child);
 
-               stop = kthread_should_stop();
+struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
+{
+       struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
+       return &rctx->desc;
+}
+EXPORT_SYMBOL_GPL(cryptd_shash_desc);
 
-               if (stop || req) {
-                       __set_current_state(TASK_RUNNING);
-                       if (req) {
-                               if (backlog)
-                                       backlog->complete(backlog,
-                                                         -EINPROGRESS);
-                               req->complete(req, 0);
-                       }
-               }
+void cryptd_free_ahash(struct cryptd_ahash *tfm)
+{
+       crypto_free_ahash(&tfm->base);
+}
+EXPORT_SYMBOL_GPL(cryptd_free_ahash);
 
-               mutex_unlock(&state->mutex);
+struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
+                                                 u32 type, u32 mask)
+{
+       char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
+       struct crypto_aead *tfm;
+
+       if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
+                    "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
+               return ERR_PTR(-EINVAL);
+       tfm = crypto_alloc_aead(cryptd_alg_name, type, mask);
+       if (IS_ERR(tfm))
+               return ERR_CAST(tfm);
+       if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
+               crypto_free_aead(tfm);
+               return ERR_PTR(-EINVAL);
+       }
+       return __cryptd_aead_cast(tfm);
+}
+EXPORT_SYMBOL_GPL(cryptd_alloc_aead);
 
-               schedule();
-       } while (!stop);
+struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm)
+{
+       struct cryptd_aead_ctx *ctx;
+       ctx = crypto_aead_ctx(&tfm->base);
+       return ctx->child;
+}
+EXPORT_SYMBOL_GPL(cryptd_aead_child);
 
-       return 0;
+void cryptd_free_aead(struct cryptd_aead *tfm)
+{
+       crypto_free_aead(&tfm->base);
 }
+EXPORT_SYMBOL_GPL(cryptd_free_aead);
 
 static int __init cryptd_init(void)
 {
        int err;
 
-       err = cryptd_create_thread(&state, cryptd_thread, "cryptd");
+       err = cryptd_init_queue(&queue, CRYPTD_MAX_CPU_QLEN);
        if (err)
                return err;
 
        err = crypto_register_template(&cryptd_tmpl);
        if (err)
-               kthread_stop(state.task);
+               cryptd_fini_queue(&queue);
 
        return err;
 }
 
 static void __exit cryptd_exit(void)
 {
-       cryptd_stop_thread(&state);
+       cryptd_fini_queue(&queue);
        crypto_unregister_template(&cryptd_tmpl);
 }
 
-module_init(cryptd_init);
+subsys_initcall(cryptd_init);
 module_exit(cryptd_exit);
 
 MODULE_LICENSE("GPL");