crypto: caam - fix job ring cleanup code
[linux-3.10.git] / drivers / crypto / caam / caamalg.c
1 /*
2  * caam - Freescale FSL CAAM support for crypto API
3  *
4  * Copyright 2008-2011 Freescale Semiconductor, Inc.
5  *
6  * Based on talitos crypto API driver.
7  *
8  * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
9  *
10  * ---------------                     ---------------
11  * | JobDesc #1  |-------------------->|  ShareDesc  |
12  * | *(packet 1) |                     |   (PDB)     |
13  * ---------------      |------------->|  (hashKey)  |
14  *       .              |              | (cipherKey) |
15  *       .              |    |-------->| (operation) |
16  * ---------------      |    |         ---------------
17  * | JobDesc #2  |------|    |
18  * | *(packet 2) |           |
19  * ---------------           |
20  *       .                   |
21  *       .                   |
22  * ---------------           |
23  * | JobDesc #3  |------------
24  * | *(packet 3) |
25  * ---------------
26  *
27  * The SharedDesc never changes for a connection unless rekeyed, but
28  * each packet will likely be in a different place. So all we need
29  * to know to process the packet is where the input is, where the
30  * output goes, and what context we want to process with. Context is
31  * in the SharedDesc, packet references in the JobDesc.
32  *
33  * So, a job desc looks like:
34  *
35  * ---------------------
36  * | Header            |
37  * | ShareDesc Pointer |
38  * | SEQ_OUT_PTR       |
39  * | (output buffer)   |
40  * | (output length)   |
41  * | SEQ_IN_PTR        |
42  * | (input buffer)    |
43  * | (input length)    |
44  * ---------------------
45  */
46
47 #include "compat.h"
48
49 #include "regs.h"
50 #include "intern.h"
51 #include "desc_constr.h"
52 #include "jr.h"
53 #include "error.h"
54 #include "sg_sw_sec4.h"
55 #include "key_gen.h"
56
57 /*
58  * crypto alg
59  */
60 #define CAAM_CRA_PRIORITY               3000
61 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
62 #define CAAM_MAX_KEY_SIZE               (AES_MAX_KEY_SIZE + \
63                                          SHA512_DIGEST_SIZE * 2)
64 /* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
65 #define CAAM_MAX_IV_LENGTH              16
66
67 /* length of descriptors text */
68 #define DESC_JOB_IO_LEN                 (CAAM_CMD_SZ * 5 + CAAM_PTR_SZ * 3)
69
70 #define DESC_AEAD_BASE                  (4 * CAAM_CMD_SZ)
71 #define DESC_AEAD_ENC_LEN               (DESC_AEAD_BASE + 16 * CAAM_CMD_SZ)
72 #define DESC_AEAD_DEC_LEN               (DESC_AEAD_BASE + 21 * CAAM_CMD_SZ)
73 #define DESC_AEAD_GIVENC_LEN            (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
74
75 #define DESC_ABLKCIPHER_BASE            (3 * CAAM_CMD_SZ)
76 #define DESC_ABLKCIPHER_ENC_LEN         (DESC_ABLKCIPHER_BASE + \
77                                          20 * CAAM_CMD_SZ)
78 #define DESC_ABLKCIPHER_DEC_LEN         (DESC_ABLKCIPHER_BASE + \
79                                          15 * CAAM_CMD_SZ)
80
81 #define DESC_MAX_USED_BYTES             (DESC_AEAD_GIVENC_LEN + \
82                                          CAAM_MAX_KEY_SIZE)
83 #define DESC_MAX_USED_LEN               (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
84
85 #ifdef DEBUG
86 /* for print_hex_dumps with line references */
87 #define xstr(s) str(s)
88 #define str(s) #s
89 #define debug(format, arg...) printk(format, arg)
90 #else
91 #define debug(format, arg...)
92 #endif
93
94 /* Set DK bit in class 1 operation if shared */
95 static inline void append_dec_op1(u32 *desc, u32 type)
96 {
97         u32 *jump_cmd, *uncond_jump_cmd;
98
99         jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
100         append_operation(desc, type | OP_ALG_AS_INITFINAL |
101                          OP_ALG_DECRYPT);
102         uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
103         set_jump_tgt_here(desc, jump_cmd);
104         append_operation(desc, type | OP_ALG_AS_INITFINAL |
105                          OP_ALG_DECRYPT | OP_ALG_AAI_DK);
106         set_jump_tgt_here(desc, uncond_jump_cmd);
107 }
108
109 /*
110  * Wait for completion of class 1 key loading before allowing
111  * error propagation
112  */
113 static inline void append_dec_shr_done(u32 *desc)
114 {
115         u32 *jump_cmd;
116
117         jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TEST_ALL);
118         set_jump_tgt_here(desc, jump_cmd);
119         append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
120 }
121
122 /*
123  * For aead functions, read payload and write payload,
124  * both of which are specified in req->src and req->dst
125  */
126 static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
127 {
128         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
129                              KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
130         append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
131 }
132
133 /*
134  * For aead encrypt and decrypt, read iv for both classes
135  */
136 static inline void aead_append_ld_iv(u32 *desc, int ivsize)
137 {
138         append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
139                    LDST_CLASS_1_CCB | ivsize);
140         append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize);
141 }
142
143 /*
144  * For ablkcipher encrypt and decrypt, read from req->src and
145  * write to req->dst
146  */
147 static inline void ablkcipher_append_src_dst(u32 *desc)
148 {
149         append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
150         append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
151         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
152                              KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
153         append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
154 }
155
156 /*
157  * If all data, including src (with assoc and iv) or dst (with iv only) are
158  * contiguous
159  */
160 #define GIV_SRC_CONTIG          1
161 #define GIV_DST_CONTIG          (1 << 1)
162
163 /*
164  * per-session context
165  */
166 struct caam_ctx {
167         struct device *jrdev;
168         u32 sh_desc_enc[DESC_MAX_USED_LEN];
169         u32 sh_desc_dec[DESC_MAX_USED_LEN];
170         u32 sh_desc_givenc[DESC_MAX_USED_LEN];
171         dma_addr_t sh_desc_enc_dma;
172         dma_addr_t sh_desc_dec_dma;
173         dma_addr_t sh_desc_givenc_dma;
174         u32 class1_alg_type;
175         u32 class2_alg_type;
176         u32 alg_op;
177         u8 key[CAAM_MAX_KEY_SIZE];
178         dma_addr_t key_dma;
179         unsigned int enckeylen;
180         unsigned int split_key_len;
181         unsigned int split_key_pad_len;
182         unsigned int authsize;
183 };
184
185 static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
186                             int keys_fit_inline)
187 {
188         if (keys_fit_inline) {
189                 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
190                                   ctx->split_key_len, CLASS_2 |
191                                   KEY_DEST_MDHA_SPLIT | KEY_ENC);
192                 append_key_as_imm(desc, (void *)ctx->key +
193                                   ctx->split_key_pad_len, ctx->enckeylen,
194                                   ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
195         } else {
196                 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
197                            KEY_DEST_MDHA_SPLIT | KEY_ENC);
198                 append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
199                            ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
200         }
201 }
202
203 static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
204                                   int keys_fit_inline)
205 {
206         u32 *key_jump_cmd;
207
208         init_sh_desc(desc, HDR_SHARE_SERIAL);
209
210         /* Skip if already shared */
211         key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
212                                    JUMP_COND_SHRD);
213
214         append_key_aead(desc, ctx, keys_fit_inline);
215
216         set_jump_tgt_here(desc, key_jump_cmd);
217
218         /* Propagate errors from shared to job descriptor */
219         append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
220 }
221
222 static int aead_set_sh_desc(struct crypto_aead *aead)
223 {
224         struct aead_tfm *tfm = &aead->base.crt_aead;
225         struct caam_ctx *ctx = crypto_aead_ctx(aead);
226         struct device *jrdev = ctx->jrdev;
227         bool keys_fit_inline = false;
228         u32 *key_jump_cmd, *jump_cmd;
229         u32 geniv, moveiv;
230         u32 *desc;
231
232         if (!ctx->enckeylen || !ctx->authsize)
233                 return 0;
234
235         /*
236          * Job Descriptor and Shared Descriptors
237          * must all fit into the 64-word Descriptor h/w Buffer
238          */
239         if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN +
240             ctx->split_key_pad_len + ctx->enckeylen <=
241             CAAM_DESC_BYTES_MAX)
242                 keys_fit_inline = true;
243
244         /* aead_encrypt shared descriptor */
245         desc = ctx->sh_desc_enc;
246
247         init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
248
249         /* Class 2 operation */
250         append_operation(desc, ctx->class2_alg_type |
251                          OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
252
253         /* cryptlen = seqoutlen - authsize */
254         append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
255
256         /* assoclen + cryptlen = seqinlen - ivsize */
257         append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
258
259         /* assoclen + cryptlen = (assoclen + cryptlen) - cryptlen */
260         append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
261
262         /* read assoc before reading payload */
263         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
264                              KEY_VLF);
265         aead_append_ld_iv(desc, tfm->ivsize);
266
267         /* Class 1 operation */
268         append_operation(desc, ctx->class1_alg_type |
269                          OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
270
271         /* Read and write cryptlen bytes */
272         append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
273         append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
274         aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
275
276         /* Write ICV */
277         append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
278                          LDST_SRCDST_BYTE_CONTEXT);
279
280         ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
281                                               desc_bytes(desc),
282                                               DMA_TO_DEVICE);
283         if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
284                 dev_err(jrdev, "unable to map shared descriptor\n");
285                 return -ENOMEM;
286         }
287 #ifdef DEBUG
288         print_hex_dump(KERN_ERR, "aead enc shdesc@"xstr(__LINE__)": ",
289                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
290                        desc_bytes(desc), 1);
291 #endif
292
293         /*
294          * Job Descriptor and Shared Descriptors
295          * must all fit into the 64-word Descriptor h/w Buffer
296          */
297         if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN +
298             ctx->split_key_pad_len + ctx->enckeylen <=
299             CAAM_DESC_BYTES_MAX)
300                 keys_fit_inline = true;
301
302         desc = ctx->sh_desc_dec;
303
304         /* aead_decrypt shared descriptor */
305         init_sh_desc(desc, HDR_SHARE_SERIAL);
306
307         /* Skip if already shared */
308         key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
309                                    JUMP_COND_SHRD);
310
311         append_key_aead(desc, ctx, keys_fit_inline);
312
313         /* Only propagate error immediately if shared */
314         jump_cmd = append_jump(desc, JUMP_TEST_ALL);
315         set_jump_tgt_here(desc, key_jump_cmd);
316         append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
317         set_jump_tgt_here(desc, jump_cmd);
318
319         /* Class 2 operation */
320         append_operation(desc, ctx->class2_alg_type |
321                          OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
322
323         /* assoclen + cryptlen = seqinlen - ivsize */
324         append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
325                                 ctx->authsize + tfm->ivsize)
326         /* assoclen = (assoclen + cryptlen) - cryptlen */
327         append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
328         append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
329
330         /* read assoc before reading payload */
331         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
332                              KEY_VLF);
333
334         aead_append_ld_iv(desc, tfm->ivsize);
335
336         append_dec_op1(desc, ctx->class1_alg_type);
337
338         /* Read and write cryptlen bytes */
339         append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
340         append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
341         aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
342
343         /* Load ICV */
344         append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
345                              FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
346         append_dec_shr_done(desc);
347
348         ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
349                                               desc_bytes(desc),
350                                               DMA_TO_DEVICE);
351         if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
352                 dev_err(jrdev, "unable to map shared descriptor\n");
353                 return -ENOMEM;
354         }
355 #ifdef DEBUG
356         print_hex_dump(KERN_ERR, "aead dec shdesc@"xstr(__LINE__)": ",
357                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
358                        desc_bytes(desc), 1);
359 #endif
360
361         /*
362          * Job Descriptor and Shared Descriptors
363          * must all fit into the 64-word Descriptor h/w Buffer
364          */
365         if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN +
366             ctx->split_key_pad_len + ctx->enckeylen <=
367             CAAM_DESC_BYTES_MAX)
368                 keys_fit_inline = true;
369
370         /* aead_givencrypt shared descriptor */
371         desc = ctx->sh_desc_givenc;
372
373         init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
374
375         /* Generate IV */
376         geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
377                 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
378                 NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
379         append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
380                             LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
381         append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
382         append_move(desc, MOVE_SRC_INFIFO |
383                     MOVE_DEST_CLASS1CTX | (tfm->ivsize << MOVE_LEN_SHIFT));
384         append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
385
386         /* Copy IV to class 1 context */
387         append_move(desc, MOVE_SRC_CLASS1CTX |
388                     MOVE_DEST_OUTFIFO | (tfm->ivsize << MOVE_LEN_SHIFT));
389
390         /* Return to encryption */
391         append_operation(desc, ctx->class2_alg_type |
392                          OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
393
394         /* ivsize + cryptlen = seqoutlen - authsize */
395         append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
396
397         /* assoclen = seqinlen - (ivsize + cryptlen) */
398         append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
399
400         /* read assoc before reading payload */
401         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
402                              KEY_VLF);
403
404         /* Copy iv from class 1 ctx to class 2 fifo*/
405         moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
406                  NFIFOENTRY_DTYPE_MSG | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
407         append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
408                             LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
409         append_load_imm_u32(desc, tfm->ivsize, LDST_CLASS_2_CCB |
410                             LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
411
412         /* Class 1 operation */
413         append_operation(desc, ctx->class1_alg_type |
414                          OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
415
416         /* Will write ivsize + cryptlen */
417         append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
418
419         /* Not need to reload iv */
420         append_seq_fifo_load(desc, tfm->ivsize,
421                              FIFOLD_CLASS_SKIP);
422
423         /* Will read cryptlen */
424         append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
425         aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
426
427         /* Write ICV */
428         append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
429                          LDST_SRCDST_BYTE_CONTEXT);
430
431         ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
432                                                  desc_bytes(desc),
433                                                  DMA_TO_DEVICE);
434         if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
435                 dev_err(jrdev, "unable to map shared descriptor\n");
436                 return -ENOMEM;
437         }
438 #ifdef DEBUG
439         print_hex_dump(KERN_ERR, "aead givenc shdesc@"xstr(__LINE__)": ",
440                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
441                        desc_bytes(desc), 1);
442 #endif
443
444         return 0;
445 }
446
447 static int aead_setauthsize(struct crypto_aead *authenc,
448                                     unsigned int authsize)
449 {
450         struct caam_ctx *ctx = crypto_aead_ctx(authenc);
451
452         ctx->authsize = authsize;
453         aead_set_sh_desc(authenc);
454
455         return 0;
456 }
457
458 static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
459                               u32 authkeylen)
460 {
461         return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
462                                ctx->split_key_pad_len, key_in, authkeylen,
463                                ctx->alg_op);
464 }
465
466 static int aead_setkey(struct crypto_aead *aead,
467                                const u8 *key, unsigned int keylen)
468 {
469         /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
470         static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
471         struct caam_ctx *ctx = crypto_aead_ctx(aead);
472         struct device *jrdev = ctx->jrdev;
473         struct rtattr *rta = (void *)key;
474         struct crypto_authenc_key_param *param;
475         unsigned int authkeylen;
476         unsigned int enckeylen;
477         int ret = 0;
478
479         param = RTA_DATA(rta);
480         enckeylen = be32_to_cpu(param->enckeylen);
481
482         key += RTA_ALIGN(rta->rta_len);
483         keylen -= RTA_ALIGN(rta->rta_len);
484
485         if (keylen < enckeylen)
486                 goto badkey;
487
488         authkeylen = keylen - enckeylen;
489
490         if (keylen > CAAM_MAX_KEY_SIZE)
491                 goto badkey;
492
493         /* Pick class 2 key length from algorithm submask */
494         ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
495                                       OP_ALG_ALGSEL_SHIFT] * 2;
496         ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
497
498 #ifdef DEBUG
499         printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
500                keylen, enckeylen, authkeylen);
501         printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
502                ctx->split_key_len, ctx->split_key_pad_len);
503         print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ",
504                        DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
505 #endif
506
507         ret = gen_split_aead_key(ctx, key, authkeylen);
508         if (ret) {
509                 goto badkey;
510         }
511
512         /* postpend encryption key to auth split key */
513         memcpy(ctx->key + ctx->split_key_pad_len, key + authkeylen, enckeylen);
514
515         ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
516                                        enckeylen, DMA_TO_DEVICE);
517         if (dma_mapping_error(jrdev, ctx->key_dma)) {
518                 dev_err(jrdev, "unable to map key i/o memory\n");
519                 return -ENOMEM;
520         }
521 #ifdef DEBUG
522         print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
523                        DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
524                        ctx->split_key_pad_len + enckeylen, 1);
525 #endif
526
527         ctx->enckeylen = enckeylen;
528
529         ret = aead_set_sh_desc(aead);
530         if (ret) {
531                 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
532                                  enckeylen, DMA_TO_DEVICE);
533         }
534
535         return ret;
536 badkey:
537         crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
538         return -EINVAL;
539 }
540
541 static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
542                              const u8 *key, unsigned int keylen)
543 {
544         struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
545         struct ablkcipher_tfm *tfm = &ablkcipher->base.crt_ablkcipher;
546         struct device *jrdev = ctx->jrdev;
547         int ret = 0;
548         u32 *key_jump_cmd, *jump_cmd;
549         u32 *desc;
550
551 #ifdef DEBUG
552         print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ",
553                        DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
554 #endif
555
556         memcpy(ctx->key, key, keylen);
557         ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
558                                       DMA_TO_DEVICE);
559         if (dma_mapping_error(jrdev, ctx->key_dma)) {
560                 dev_err(jrdev, "unable to map key i/o memory\n");
561                 return -ENOMEM;
562         }
563         ctx->enckeylen = keylen;
564
565         /* ablkcipher_encrypt shared descriptor */
566         desc = ctx->sh_desc_enc;
567         init_sh_desc(desc, HDR_SHARE_SERIAL);
568         /* Skip if already shared */
569         key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
570                                    JUMP_COND_SHRD);
571
572         /* Load class1 key only */
573         append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
574                           ctx->enckeylen, CLASS_1 |
575                           KEY_DEST_CLASS_REG);
576
577         set_jump_tgt_here(desc, key_jump_cmd);
578
579         /* Propagate errors from shared to job descriptor */
580         append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
581
582         /* Load iv */
583         append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
584                    LDST_CLASS_1_CCB | tfm->ivsize);
585
586         /* Load operation */
587         append_operation(desc, ctx->class1_alg_type |
588                          OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
589
590         /* Perform operation */
591         ablkcipher_append_src_dst(desc);
592
593         ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
594                                               desc_bytes(desc),
595                                               DMA_TO_DEVICE);
596         if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
597                 dev_err(jrdev, "unable to map shared descriptor\n");
598                 return -ENOMEM;
599         }
600 #ifdef DEBUG
601         print_hex_dump(KERN_ERR, "ablkcipher enc shdesc@"xstr(__LINE__)": ",
602                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
603                        desc_bytes(desc), 1);
604 #endif
605         /* ablkcipher_decrypt shared descriptor */
606         desc = ctx->sh_desc_dec;
607
608         init_sh_desc(desc, HDR_SHARE_SERIAL);
609         /* Skip if already shared */
610         key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
611                                    JUMP_COND_SHRD);
612
613         /* Load class1 key only */
614         append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
615                           ctx->enckeylen, CLASS_1 |
616                           KEY_DEST_CLASS_REG);
617
618         /* For aead, only propagate error immediately if shared */
619         jump_cmd = append_jump(desc, JUMP_TEST_ALL);
620         set_jump_tgt_here(desc, key_jump_cmd);
621         append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
622         set_jump_tgt_here(desc, jump_cmd);
623
624         /* load IV */
625         append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
626                    LDST_CLASS_1_CCB | tfm->ivsize);
627
628         /* Choose operation */
629         append_dec_op1(desc, ctx->class1_alg_type);
630
631         /* Perform operation */
632         ablkcipher_append_src_dst(desc);
633
634         /* Wait for key to load before allowing propagating error */
635         append_dec_shr_done(desc);
636
637         ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
638                                               desc_bytes(desc),
639                                               DMA_TO_DEVICE);
640         if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
641                 dev_err(jrdev, "unable to map shared descriptor\n");
642                 return -ENOMEM;
643         }
644
645 #ifdef DEBUG
646         print_hex_dump(KERN_ERR, "ablkcipher dec shdesc@"xstr(__LINE__)": ",
647                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
648                        desc_bytes(desc), 1);
649 #endif
650
651         return ret;
652 }
653
654 /*
655  * aead_edesc - s/w-extended aead descriptor
656  * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
657  * @assoc_chained: if source is chained
658  * @src_nents: number of segments in input scatterlist
659  * @src_chained: if source is chained
660  * @dst_nents: number of segments in output scatterlist
661  * @dst_chained: if destination is chained
662  * @iv_dma: dma address of iv for checking continuity and link table
663  * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
664  * @sec4_sg_bytes: length of dma mapped sec4_sg space
665  * @sec4_sg_dma: bus physical mapped address of h/w link table
666  * @hw_desc: the h/w job descriptor followed by any referenced link tables
667  */
668 struct aead_edesc {
669         int assoc_nents;
670         bool assoc_chained;
671         int src_nents;
672         bool src_chained;
673         int dst_nents;
674         bool dst_chained;
675         dma_addr_t iv_dma;
676         int sec4_sg_bytes;
677         dma_addr_t sec4_sg_dma;
678         struct sec4_sg_entry *sec4_sg;
679         u32 hw_desc[0];
680 };
681
682 /*
683  * ablkcipher_edesc - s/w-extended ablkcipher descriptor
684  * @src_nents: number of segments in input scatterlist
685  * @src_chained: if source is chained
686  * @dst_nents: number of segments in output scatterlist
687  * @dst_chained: if destination is chained
688  * @iv_dma: dma address of iv for checking continuity and link table
689  * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
690  * @sec4_sg_bytes: length of dma mapped sec4_sg space
691  * @sec4_sg_dma: bus physical mapped address of h/w link table
692  * @hw_desc: the h/w job descriptor followed by any referenced link tables
693  */
694 struct ablkcipher_edesc {
695         int src_nents;
696         bool src_chained;
697         int dst_nents;
698         bool dst_chained;
699         dma_addr_t iv_dma;
700         int sec4_sg_bytes;
701         dma_addr_t sec4_sg_dma;
702         struct sec4_sg_entry *sec4_sg;
703         u32 hw_desc[0];
704 };
705
706 static void caam_unmap(struct device *dev, struct scatterlist *src,
707                        struct scatterlist *dst, int src_nents,
708                        bool src_chained, int dst_nents, bool dst_chained,
709                        dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
710                        int sec4_sg_bytes)
711 {
712         if (dst != src) {
713                 dma_unmap_sg_chained(dev, src, src_nents ? : 1, DMA_TO_DEVICE,
714                                      src_chained);
715                 dma_unmap_sg_chained(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE,
716                                      dst_chained);
717         } else {
718                 dma_unmap_sg_chained(dev, src, src_nents ? : 1,
719                                      DMA_BIDIRECTIONAL, src_chained);
720         }
721
722         if (iv_dma)
723                 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
724         if (sec4_sg_bytes)
725                 dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
726                                  DMA_TO_DEVICE);
727 }
728
729 static void aead_unmap(struct device *dev,
730                        struct aead_edesc *edesc,
731                        struct aead_request *req)
732 {
733         struct crypto_aead *aead = crypto_aead_reqtfm(req);
734         int ivsize = crypto_aead_ivsize(aead);
735
736         dma_unmap_sg_chained(dev, req->assoc, edesc->assoc_nents,
737                              DMA_TO_DEVICE, edesc->assoc_chained);
738
739         caam_unmap(dev, req->src, req->dst,
740                    edesc->src_nents, edesc->src_chained, edesc->dst_nents,
741                    edesc->dst_chained, edesc->iv_dma, ivsize,
742                    edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
743 }
744
745 static void ablkcipher_unmap(struct device *dev,
746                              struct ablkcipher_edesc *edesc,
747                              struct ablkcipher_request *req)
748 {
749         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
750         int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
751
752         caam_unmap(dev, req->src, req->dst,
753                    edesc->src_nents, edesc->src_chained, edesc->dst_nents,
754                    edesc->dst_chained, edesc->iv_dma, ivsize,
755                    edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
756 }
757
758 static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
759                                    void *context)
760 {
761         struct aead_request *req = context;
762         struct aead_edesc *edesc;
763 #ifdef DEBUG
764         struct crypto_aead *aead = crypto_aead_reqtfm(req);
765         struct caam_ctx *ctx = crypto_aead_ctx(aead);
766         int ivsize = crypto_aead_ivsize(aead);
767
768         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
769 #endif
770
771         edesc = (struct aead_edesc *)((char *)desc -
772                  offsetof(struct aead_edesc, hw_desc));
773
774         if (err) {
775                 char tmp[CAAM_ERROR_STR_MAX];
776
777                 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
778         }
779
780         aead_unmap(jrdev, edesc, req);
781
782 #ifdef DEBUG
783         print_hex_dump(KERN_ERR, "assoc  @"xstr(__LINE__)": ",
784                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
785                        req->assoclen , 1);
786         print_hex_dump(KERN_ERR, "dstiv  @"xstr(__LINE__)": ",
787                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize,
788                        edesc->src_nents ? 100 : ivsize, 1);
789         print_hex_dump(KERN_ERR, "dst    @"xstr(__LINE__)": ",
790                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
791                        edesc->src_nents ? 100 : req->cryptlen +
792                        ctx->authsize + 4, 1);
793 #endif
794
795         kfree(edesc);
796
797         aead_request_complete(req, err);
798 }
799
800 static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
801                                    void *context)
802 {
803         struct aead_request *req = context;
804         struct aead_edesc *edesc;
805 #ifdef DEBUG
806         struct crypto_aead *aead = crypto_aead_reqtfm(req);
807         struct caam_ctx *ctx = crypto_aead_ctx(aead);
808         int ivsize = crypto_aead_ivsize(aead);
809
810         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
811 #endif
812
813         edesc = (struct aead_edesc *)((char *)desc -
814                  offsetof(struct aead_edesc, hw_desc));
815
816 #ifdef DEBUG
817         print_hex_dump(KERN_ERR, "dstiv  @"xstr(__LINE__)": ",
818                        DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
819                        ivsize, 1);
820         print_hex_dump(KERN_ERR, "dst    @"xstr(__LINE__)": ",
821                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
822                        req->cryptlen, 1);
823 #endif
824
825         if (err) {
826                 char tmp[CAAM_ERROR_STR_MAX];
827
828                 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
829         }
830
831         aead_unmap(jrdev, edesc, req);
832
833         /*
834          * verify hw auth check passed else return -EBADMSG
835          */
836         if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
837                 err = -EBADMSG;
838
839 #ifdef DEBUG
840         print_hex_dump(KERN_ERR, "iphdrout@"xstr(__LINE__)": ",
841                        DUMP_PREFIX_ADDRESS, 16, 4,
842                        ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)),
843                        sizeof(struct iphdr) + req->assoclen +
844                        ((req->cryptlen > 1500) ? 1500 : req->cryptlen) +
845                        ctx->authsize + 36, 1);
846         if (!err && edesc->sec4_sg_bytes) {
847                 struct scatterlist *sg = sg_last(req->src, edesc->src_nents);
848                 print_hex_dump(KERN_ERR, "sglastout@"xstr(__LINE__)": ",
849                                DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg),
850                         sg->length + ctx->authsize + 16, 1);
851         }
852 #endif
853
854         kfree(edesc);
855
856         aead_request_complete(req, err);
857 }
858
859 static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
860                                    void *context)
861 {
862         struct ablkcipher_request *req = context;
863         struct ablkcipher_edesc *edesc;
864 #ifdef DEBUG
865         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
866         int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
867
868         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
869 #endif
870
871         edesc = (struct ablkcipher_edesc *)((char *)desc -
872                  offsetof(struct ablkcipher_edesc, hw_desc));
873
874         if (err) {
875                 char tmp[CAAM_ERROR_STR_MAX];
876
877                 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
878         }
879
880 #ifdef DEBUG
881         print_hex_dump(KERN_ERR, "dstiv  @"xstr(__LINE__)": ",
882                        DUMP_PREFIX_ADDRESS, 16, 4, req->info,
883                        edesc->src_nents > 1 ? 100 : ivsize, 1);
884         print_hex_dump(KERN_ERR, "dst    @"xstr(__LINE__)": ",
885                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
886                        edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
887 #endif
888
889         ablkcipher_unmap(jrdev, edesc, req);
890         kfree(edesc);
891
892         ablkcipher_request_complete(req, err);
893 }
894
895 static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
896                                     void *context)
897 {
898         struct ablkcipher_request *req = context;
899         struct ablkcipher_edesc *edesc;
900 #ifdef DEBUG
901         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
902         int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
903
904         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
905 #endif
906
907         edesc = (struct ablkcipher_edesc *)((char *)desc -
908                  offsetof(struct ablkcipher_edesc, hw_desc));
909         if (err) {
910                 char tmp[CAAM_ERROR_STR_MAX];
911
912                 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
913         }
914
915 #ifdef DEBUG
916         print_hex_dump(KERN_ERR, "dstiv  @"xstr(__LINE__)": ",
917                        DUMP_PREFIX_ADDRESS, 16, 4, req->info,
918                        ivsize, 1);
919         print_hex_dump(KERN_ERR, "dst    @"xstr(__LINE__)": ",
920                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
921                        edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
922 #endif
923
924         ablkcipher_unmap(jrdev, edesc, req);
925         kfree(edesc);
926
927         ablkcipher_request_complete(req, err);
928 }
929
930 /*
931  * Fill in aead job descriptor
932  */
933 static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
934                           struct aead_edesc *edesc,
935                           struct aead_request *req,
936                           bool all_contig, bool encrypt)
937 {
938         struct crypto_aead *aead = crypto_aead_reqtfm(req);
939         struct caam_ctx *ctx = crypto_aead_ctx(aead);
940         int ivsize = crypto_aead_ivsize(aead);
941         int authsize = ctx->authsize;
942         u32 *desc = edesc->hw_desc;
943         u32 out_options = 0, in_options;
944         dma_addr_t dst_dma, src_dma;
945         int len, sec4_sg_index = 0;
946
947 #ifdef DEBUG
948         debug("assoclen %d cryptlen %d authsize %d\n",
949               req->assoclen, req->cryptlen, authsize);
950         print_hex_dump(KERN_ERR, "assoc  @"xstr(__LINE__)": ",
951                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
952                        req->assoclen , 1);
953         print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
954                        DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
955                        edesc->src_nents ? 100 : ivsize, 1);
956         print_hex_dump(KERN_ERR, "src    @"xstr(__LINE__)": ",
957                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
958                         edesc->src_nents ? 100 : req->cryptlen, 1);
959         print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ",
960                        DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
961                        desc_bytes(sh_desc), 1);
962 #endif
963
964         len = desc_len(sh_desc);
965         init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
966
967         if (all_contig) {
968                 src_dma = sg_dma_address(req->assoc);
969                 in_options = 0;
970         } else {
971                 src_dma = edesc->sec4_sg_dma;
972                 sec4_sg_index += (edesc->assoc_nents ? : 1) + 1 +
973                                  (edesc->src_nents ? : 1);
974                 in_options = LDST_SGF;
975         }
976         if (encrypt)
977                 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
978                                   req->cryptlen - authsize, in_options);
979         else
980                 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
981                                   req->cryptlen, in_options);
982
983         if (likely(req->src == req->dst)) {
984                 if (all_contig) {
985                         dst_dma = sg_dma_address(req->src);
986                 } else {
987                         dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
988                                   ((edesc->assoc_nents ? : 1) + 1);
989                         out_options = LDST_SGF;
990                 }
991         } else {
992                 if (!edesc->dst_nents) {
993                         dst_dma = sg_dma_address(req->dst);
994                 } else {
995                         dst_dma = edesc->sec4_sg_dma +
996                                   sec4_sg_index *
997                                   sizeof(struct sec4_sg_entry);
998                         out_options = LDST_SGF;
999                 }
1000         }
1001         if (encrypt)
1002                 append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options);
1003         else
1004                 append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize,
1005                                    out_options);
1006 }
1007
1008 /*
1009  * Fill in aead givencrypt job descriptor
1010  */
1011 static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
1012                               struct aead_edesc *edesc,
1013                               struct aead_request *req,
1014                               int contig)
1015 {
1016         struct crypto_aead *aead = crypto_aead_reqtfm(req);
1017         struct caam_ctx *ctx = crypto_aead_ctx(aead);
1018         int ivsize = crypto_aead_ivsize(aead);
1019         int authsize = ctx->authsize;
1020         u32 *desc = edesc->hw_desc;
1021         u32 out_options = 0, in_options;
1022         dma_addr_t dst_dma, src_dma;
1023         int len, sec4_sg_index = 0;
1024
1025 #ifdef DEBUG
1026         debug("assoclen %d cryptlen %d authsize %d\n",
1027               req->assoclen, req->cryptlen, authsize);
1028         print_hex_dump(KERN_ERR, "assoc  @"xstr(__LINE__)": ",
1029                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
1030                        req->assoclen , 1);
1031         print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
1032                        DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
1033         print_hex_dump(KERN_ERR, "src    @"xstr(__LINE__)": ",
1034                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1035                         edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
1036         print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ",
1037                        DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
1038                        desc_bytes(sh_desc), 1);
1039 #endif
1040
1041         len = desc_len(sh_desc);
1042         init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1043
1044         if (contig & GIV_SRC_CONTIG) {
1045                 src_dma = sg_dma_address(req->assoc);
1046                 in_options = 0;
1047         } else {
1048                 src_dma = edesc->sec4_sg_dma;
1049                 sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents;
1050                 in_options = LDST_SGF;
1051         }
1052         append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
1053                           req->cryptlen - authsize, in_options);
1054
1055         if (contig & GIV_DST_CONTIG) {
1056                 dst_dma = edesc->iv_dma;
1057         } else {
1058                 if (likely(req->src == req->dst)) {
1059                         dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
1060                                   edesc->assoc_nents;
1061                         out_options = LDST_SGF;
1062                 } else {
1063                         dst_dma = edesc->sec4_sg_dma +
1064                                   sec4_sg_index *
1065                                   sizeof(struct sec4_sg_entry);
1066                         out_options = LDST_SGF;
1067                 }
1068         }
1069
1070         append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen, out_options);
1071 }
1072
1073 /*
1074  * Fill in ablkcipher job descriptor
1075  */
1076 static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
1077                                 struct ablkcipher_edesc *edesc,
1078                                 struct ablkcipher_request *req,
1079                                 bool iv_contig)
1080 {
1081         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1082         int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1083         u32 *desc = edesc->hw_desc;
1084         u32 out_options = 0, in_options;
1085         dma_addr_t dst_dma, src_dma;
1086         int len, sec4_sg_index = 0;
1087
1088 #ifdef DEBUG
1089         print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
1090                        DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1091                        ivsize, 1);
1092         print_hex_dump(KERN_ERR, "src    @"xstr(__LINE__)": ",
1093                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1094                        edesc->src_nents ? 100 : req->nbytes, 1);
1095 #endif
1096
1097         len = desc_len(sh_desc);
1098         init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1099
1100         if (iv_contig) {
1101                 src_dma = edesc->iv_dma;
1102                 in_options = 0;
1103         } else {
1104                 src_dma = edesc->sec4_sg_dma;
1105                 sec4_sg_index += (iv_contig ? 0 : 1) + edesc->src_nents;
1106                 in_options = LDST_SGF;
1107         }
1108         append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
1109
1110         if (likely(req->src == req->dst)) {
1111                 if (!edesc->src_nents && iv_contig) {
1112                         dst_dma = sg_dma_address(req->src);
1113                 } else {
1114                         dst_dma = edesc->sec4_sg_dma +
1115                                 sizeof(struct sec4_sg_entry);
1116                         out_options = LDST_SGF;
1117                 }
1118         } else {
1119                 if (!edesc->dst_nents) {
1120                         dst_dma = sg_dma_address(req->dst);
1121                 } else {
1122                         dst_dma = edesc->sec4_sg_dma +
1123                                 sec4_sg_index * sizeof(struct sec4_sg_entry);
1124                         out_options = LDST_SGF;
1125                 }
1126         }
1127         append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
1128 }
1129
1130 /*
1131  * allocate and map the aead extended descriptor
1132  */
1133 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
1134                                            int desc_bytes, bool *all_contig_ptr)
1135 {
1136         struct crypto_aead *aead = crypto_aead_reqtfm(req);
1137         struct caam_ctx *ctx = crypto_aead_ctx(aead);
1138         struct device *jrdev = ctx->jrdev;
1139         gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1140                        CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1141         int assoc_nents, src_nents, dst_nents = 0;
1142         struct aead_edesc *edesc;
1143         dma_addr_t iv_dma = 0;
1144         int sgc;
1145         bool all_contig = true;
1146         bool assoc_chained = false, src_chained = false, dst_chained = false;
1147         int ivsize = crypto_aead_ivsize(aead);
1148         int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
1149
1150         assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
1151         src_nents = sg_count(req->src, req->cryptlen, &src_chained);
1152
1153         if (unlikely(req->dst != req->src))
1154                 dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained);
1155
1156         sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
1157                                  DMA_BIDIRECTIONAL, assoc_chained);
1158         if (likely(req->src == req->dst)) {
1159                 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1160                                          DMA_BIDIRECTIONAL, src_chained);
1161         } else {
1162                 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1163                                          DMA_TO_DEVICE, src_chained);
1164                 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
1165                                          DMA_FROM_DEVICE, dst_chained);
1166         }
1167
1168         /* Check if data are contiguous */
1169         iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
1170         if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
1171             iv_dma || src_nents || iv_dma + ivsize !=
1172             sg_dma_address(req->src)) {
1173                 all_contig = false;
1174                 assoc_nents = assoc_nents ? : 1;
1175                 src_nents = src_nents ? : 1;
1176                 sec4_sg_len = assoc_nents + 1 + src_nents;
1177         }
1178         sec4_sg_len += dst_nents;
1179
1180         sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
1181
1182         /* allocate space for base edesc and hw desc commands, link tables */
1183         edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
1184                         sec4_sg_bytes, GFP_DMA | flags);
1185         if (!edesc) {
1186                 dev_err(jrdev, "could not allocate extended descriptor\n");
1187                 return ERR_PTR(-ENOMEM);
1188         }
1189
1190         edesc->assoc_nents = assoc_nents;
1191         edesc->assoc_chained = assoc_chained;
1192         edesc->src_nents = src_nents;
1193         edesc->src_chained = src_chained;
1194         edesc->dst_nents = dst_nents;
1195         edesc->dst_chained = dst_chained;
1196         edesc->iv_dma = iv_dma;
1197         edesc->sec4_sg_bytes = sec4_sg_bytes;
1198         edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
1199                          desc_bytes;
1200         edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1201                                             sec4_sg_bytes, DMA_TO_DEVICE);
1202         *all_contig_ptr = all_contig;
1203
1204         sec4_sg_index = 0;
1205         if (!all_contig) {
1206                 sg_to_sec4_sg(req->assoc,
1207                               (assoc_nents ? : 1),
1208                               edesc->sec4_sg +
1209                               sec4_sg_index, 0);
1210                 sec4_sg_index += assoc_nents ? : 1;
1211                 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
1212                                    iv_dma, ivsize, 0);
1213                 sec4_sg_index += 1;
1214                 sg_to_sec4_sg_last(req->src,
1215                                    (src_nents ? : 1),
1216                                    edesc->sec4_sg +
1217                                    sec4_sg_index, 0);
1218                 sec4_sg_index += src_nents ? : 1;
1219         }
1220         if (dst_nents) {
1221                 sg_to_sec4_sg_last(req->dst, dst_nents,
1222                                    edesc->sec4_sg + sec4_sg_index, 0);
1223         }
1224
1225         return edesc;
1226 }
1227
1228 static int aead_encrypt(struct aead_request *req)
1229 {
1230         struct aead_edesc *edesc;
1231         struct crypto_aead *aead = crypto_aead_reqtfm(req);
1232         struct caam_ctx *ctx = crypto_aead_ctx(aead);
1233         struct device *jrdev = ctx->jrdev;
1234         bool all_contig;
1235         u32 *desc;
1236         int ret = 0;
1237
1238         req->cryptlen += ctx->authsize;
1239
1240         /* allocate extended descriptor */
1241         edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
1242                                  CAAM_CMD_SZ, &all_contig);
1243         if (IS_ERR(edesc))
1244                 return PTR_ERR(edesc);
1245
1246         /* Create and submit job descriptor */
1247         init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req,
1248                       all_contig, true);
1249 #ifdef DEBUG
1250         print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ",
1251                        DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1252                        desc_bytes(edesc->hw_desc), 1);
1253 #endif
1254
1255         desc = edesc->hw_desc;
1256         ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
1257         if (!ret) {
1258                 ret = -EINPROGRESS;
1259         } else {
1260                 aead_unmap(jrdev, edesc, req);
1261                 kfree(edesc);
1262         }
1263
1264         return ret;
1265 }
1266
1267 static int aead_decrypt(struct aead_request *req)
1268 {
1269         struct aead_edesc *edesc;
1270         struct crypto_aead *aead = crypto_aead_reqtfm(req);
1271         struct caam_ctx *ctx = crypto_aead_ctx(aead);
1272         struct device *jrdev = ctx->jrdev;
1273         bool all_contig;
1274         u32 *desc;
1275         int ret = 0;
1276
1277         /* allocate extended descriptor */
1278         edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
1279                                  CAAM_CMD_SZ, &all_contig);
1280         if (IS_ERR(edesc))
1281                 return PTR_ERR(edesc);
1282
1283 #ifdef DEBUG
1284         print_hex_dump(KERN_ERR, "dec src@"xstr(__LINE__)": ",
1285                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1286                        req->cryptlen, 1);
1287 #endif
1288
1289         /* Create and submit job descriptor*/
1290         init_aead_job(ctx->sh_desc_dec,
1291                       ctx->sh_desc_dec_dma, edesc, req, all_contig, false);
1292 #ifdef DEBUG
1293         print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ",
1294                        DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1295                        desc_bytes(edesc->hw_desc), 1);
1296 #endif
1297
1298         desc = edesc->hw_desc;
1299         ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
1300         if (!ret) {
1301                 ret = -EINPROGRESS;
1302         } else {
1303                 aead_unmap(jrdev, edesc, req);
1304                 kfree(edesc);
1305         }
1306
1307         return ret;
1308 }
1309
1310 /*
1311  * allocate and map the aead extended descriptor for aead givencrypt
1312  */
1313 static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
1314                                                *greq, int desc_bytes,
1315                                                u32 *contig_ptr)
1316 {
1317         struct aead_request *req = &greq->areq;
1318         struct crypto_aead *aead = crypto_aead_reqtfm(req);
1319         struct caam_ctx *ctx = crypto_aead_ctx(aead);
1320         struct device *jrdev = ctx->jrdev;
1321         gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1322                        CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1323         int assoc_nents, src_nents, dst_nents = 0;
1324         struct aead_edesc *edesc;
1325         dma_addr_t iv_dma = 0;
1326         int sgc;
1327         u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG;
1328         int ivsize = crypto_aead_ivsize(aead);
1329         bool assoc_chained = false, src_chained = false, dst_chained = false;
1330         int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
1331
1332         assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
1333         src_nents = sg_count(req->src, req->cryptlen, &src_chained);
1334
1335         if (unlikely(req->dst != req->src))
1336                 dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained);
1337
1338         sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
1339                                  DMA_BIDIRECTIONAL, assoc_chained);
1340         if (likely(req->src == req->dst)) {
1341                 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1342                                          DMA_BIDIRECTIONAL, src_chained);
1343         } else {
1344                 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1345                                          DMA_TO_DEVICE, src_chained);
1346                 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
1347                                          DMA_FROM_DEVICE, dst_chained);
1348         }
1349
1350         /* Check if data are contiguous */
1351         iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
1352         if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
1353             iv_dma || src_nents || iv_dma + ivsize != sg_dma_address(req->src))
1354                 contig &= ~GIV_SRC_CONTIG;
1355         if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst))
1356                 contig &= ~GIV_DST_CONTIG;
1357         if (unlikely(req->src != req->dst)) {
1358                 dst_nents = dst_nents ? : 1;
1359                 sec4_sg_len += 1;
1360         }
1361         if (!(contig & GIV_SRC_CONTIG)) {
1362                 assoc_nents = assoc_nents ? : 1;
1363                 src_nents = src_nents ? : 1;
1364                 sec4_sg_len += assoc_nents + 1 + src_nents;
1365                 if (likely(req->src == req->dst))
1366                         contig &= ~GIV_DST_CONTIG;
1367         }
1368         sec4_sg_len += dst_nents;
1369
1370         sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
1371
1372         /* allocate space for base edesc and hw desc commands, link tables */
1373         edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
1374                         sec4_sg_bytes, GFP_DMA | flags);
1375         if (!edesc) {
1376                 dev_err(jrdev, "could not allocate extended descriptor\n");
1377                 return ERR_PTR(-ENOMEM);
1378         }
1379
1380         edesc->assoc_nents = assoc_nents;
1381         edesc->assoc_chained = assoc_chained;
1382         edesc->src_nents = src_nents;
1383         edesc->src_chained = src_chained;
1384         edesc->dst_nents = dst_nents;
1385         edesc->dst_chained = dst_chained;
1386         edesc->iv_dma = iv_dma;
1387         edesc->sec4_sg_bytes = sec4_sg_bytes;
1388         edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
1389                          desc_bytes;
1390         edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1391                                             sec4_sg_bytes, DMA_TO_DEVICE);
1392         *contig_ptr = contig;
1393
1394         sec4_sg_index = 0;
1395         if (!(contig & GIV_SRC_CONTIG)) {
1396                 sg_to_sec4_sg(req->assoc, assoc_nents,
1397                               edesc->sec4_sg +
1398                               sec4_sg_index, 0);
1399                 sec4_sg_index += assoc_nents;
1400                 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
1401                                    iv_dma, ivsize, 0);
1402                 sec4_sg_index += 1;
1403                 sg_to_sec4_sg_last(req->src, src_nents,
1404                                    edesc->sec4_sg +
1405                                    sec4_sg_index, 0);
1406                 sec4_sg_index += src_nents;
1407         }
1408         if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) {
1409                 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
1410                                    iv_dma, ivsize, 0);
1411                 sec4_sg_index += 1;
1412                 sg_to_sec4_sg_last(req->dst, dst_nents,
1413                                    edesc->sec4_sg + sec4_sg_index, 0);
1414         }
1415
1416         return edesc;
1417 }
1418
1419 static int aead_givencrypt(struct aead_givcrypt_request *areq)
1420 {
1421         struct aead_request *req = &areq->areq;
1422         struct aead_edesc *edesc;
1423         struct crypto_aead *aead = crypto_aead_reqtfm(req);
1424         struct caam_ctx *ctx = crypto_aead_ctx(aead);
1425         struct device *jrdev = ctx->jrdev;
1426         u32 contig;
1427         u32 *desc;
1428         int ret = 0;
1429
1430         req->cryptlen += ctx->authsize;
1431
1432         /* allocate extended descriptor */
1433         edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN *
1434                                      CAAM_CMD_SZ, &contig);
1435
1436         if (IS_ERR(edesc))
1437                 return PTR_ERR(edesc);
1438
1439 #ifdef DEBUG
1440         print_hex_dump(KERN_ERR, "giv src@"xstr(__LINE__)": ",
1441                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1442                        req->cryptlen, 1);
1443 #endif
1444
1445         /* Create and submit job descriptor*/
1446         init_aead_giv_job(ctx->sh_desc_givenc,
1447                           ctx->sh_desc_givenc_dma, edesc, req, contig);
1448 #ifdef DEBUG
1449         print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ",
1450                        DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1451                        desc_bytes(edesc->hw_desc), 1);
1452 #endif
1453
1454         desc = edesc->hw_desc;
1455         ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
1456         if (!ret) {
1457                 ret = -EINPROGRESS;
1458         } else {
1459                 aead_unmap(jrdev, edesc, req);
1460                 kfree(edesc);
1461         }
1462
1463         return ret;
1464 }
1465
1466 /*
1467  * allocate and map the ablkcipher extended descriptor for ablkcipher
1468  */
1469 static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
1470                                                        *req, int desc_bytes,
1471                                                        bool *iv_contig_out)
1472 {
1473         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1474         struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1475         struct device *jrdev = ctx->jrdev;
1476         gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1477                                           CRYPTO_TFM_REQ_MAY_SLEEP)) ?
1478                        GFP_KERNEL : GFP_ATOMIC;
1479         int src_nents, dst_nents = 0, sec4_sg_bytes;
1480         struct ablkcipher_edesc *edesc;
1481         dma_addr_t iv_dma = 0;
1482         bool iv_contig = false;
1483         int sgc;
1484         int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1485         bool src_chained = false, dst_chained = false;
1486         int sec4_sg_index;
1487
1488         src_nents = sg_count(req->src, req->nbytes, &src_chained);
1489
1490         if (req->dst != req->src)
1491                 dst_nents = sg_count(req->dst, req->nbytes, &dst_chained);
1492
1493         if (likely(req->src == req->dst)) {
1494                 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1495                                          DMA_BIDIRECTIONAL, src_chained);
1496         } else {
1497                 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1498                                          DMA_TO_DEVICE, src_chained);
1499                 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
1500                                          DMA_FROM_DEVICE, dst_chained);
1501         }
1502
1503         /*
1504          * Check if iv can be contiguous with source and destination.
1505          * If so, include it. If not, create scatterlist.
1506          */
1507         iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
1508         if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
1509                 iv_contig = true;
1510         else
1511                 src_nents = src_nents ? : 1;
1512         sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
1513                         sizeof(struct sec4_sg_entry);
1514
1515         /* allocate space for base edesc and hw desc commands, link tables */
1516         edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes +
1517                         sec4_sg_bytes, GFP_DMA | flags);
1518         if (!edesc) {
1519                 dev_err(jrdev, "could not allocate extended descriptor\n");
1520                 return ERR_PTR(-ENOMEM);
1521         }
1522
1523         edesc->src_nents = src_nents;
1524         edesc->src_chained = src_chained;
1525         edesc->dst_nents = dst_nents;
1526         edesc->dst_chained = dst_chained;
1527         edesc->sec4_sg_bytes = sec4_sg_bytes;
1528         edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
1529                          desc_bytes;
1530
1531         sec4_sg_index = 0;
1532         if (!iv_contig) {
1533                 dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
1534                 sg_to_sec4_sg_last(req->src, src_nents,
1535                                    edesc->sec4_sg + 1, 0);
1536                 sec4_sg_index += 1 + src_nents;
1537         }
1538
1539         if (dst_nents) {
1540                 sg_to_sec4_sg_last(req->dst, dst_nents,
1541                         edesc->sec4_sg + sec4_sg_index, 0);
1542         }
1543
1544         edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1545                                             sec4_sg_bytes, DMA_TO_DEVICE);
1546         edesc->iv_dma = iv_dma;
1547
1548 #ifdef DEBUG
1549         print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"xstr(__LINE__)": ",
1550                        DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
1551                        sec4_sg_bytes, 1);
1552 #endif
1553
1554         *iv_contig_out = iv_contig;
1555         return edesc;
1556 }
1557
1558 static int ablkcipher_encrypt(struct ablkcipher_request *req)
1559 {
1560         struct ablkcipher_edesc *edesc;
1561         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1562         struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1563         struct device *jrdev = ctx->jrdev;
1564         bool iv_contig;
1565         u32 *desc;
1566         int ret = 0;
1567
1568         /* allocate extended descriptor */
1569         edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
1570                                        CAAM_CMD_SZ, &iv_contig);
1571         if (IS_ERR(edesc))
1572                 return PTR_ERR(edesc);
1573
1574         /* Create and submit job descriptor*/
1575         init_ablkcipher_job(ctx->sh_desc_enc,
1576                 ctx->sh_desc_enc_dma, edesc, req, iv_contig);
1577 #ifdef DEBUG
1578         print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"xstr(__LINE__)": ",
1579                        DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1580                        desc_bytes(edesc->hw_desc), 1);
1581 #endif
1582         desc = edesc->hw_desc;
1583         ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
1584
1585         if (!ret) {
1586                 ret = -EINPROGRESS;
1587         } else {
1588                 ablkcipher_unmap(jrdev, edesc, req);
1589                 kfree(edesc);
1590         }
1591
1592         return ret;
1593 }
1594
1595 static int ablkcipher_decrypt(struct ablkcipher_request *req)
1596 {
1597         struct ablkcipher_edesc *edesc;
1598         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1599         struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1600         struct device *jrdev = ctx->jrdev;
1601         bool iv_contig;
1602         u32 *desc;
1603         int ret = 0;
1604
1605         /* allocate extended descriptor */
1606         edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
1607                                        CAAM_CMD_SZ, &iv_contig);
1608         if (IS_ERR(edesc))
1609                 return PTR_ERR(edesc);
1610
1611         /* Create and submit job descriptor*/
1612         init_ablkcipher_job(ctx->sh_desc_dec,
1613                 ctx->sh_desc_dec_dma, edesc, req, iv_contig);
1614         desc = edesc->hw_desc;
1615 #ifdef DEBUG
1616         print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"xstr(__LINE__)": ",
1617                        DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1618                        desc_bytes(edesc->hw_desc), 1);
1619 #endif
1620
1621         ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
1622         if (!ret) {
1623                 ret = -EINPROGRESS;
1624         } else {
1625                 ablkcipher_unmap(jrdev, edesc, req);
1626                 kfree(edesc);
1627         }
1628
1629         return ret;
1630 }
1631
1632 #define template_aead           template_u.aead
1633 #define template_ablkcipher     template_u.ablkcipher
1634 struct caam_alg_template {
1635         char name[CRYPTO_MAX_ALG_NAME];
1636         char driver_name[CRYPTO_MAX_ALG_NAME];
1637         unsigned int blocksize;
1638         u32 type;
1639         union {
1640                 struct ablkcipher_alg ablkcipher;
1641                 struct aead_alg aead;
1642                 struct blkcipher_alg blkcipher;
1643                 struct cipher_alg cipher;
1644                 struct compress_alg compress;
1645                 struct rng_alg rng;
1646         } template_u;
1647         u32 class1_alg_type;
1648         u32 class2_alg_type;
1649         u32 alg_op;
1650 };
1651
1652 static struct caam_alg_template driver_algs[] = {
1653         /*
1654          * single-pass ipsec_esp descriptor
1655          * authencesn(*,*) is also registered, although not present
1656          * explicitly here.
1657          */
1658         {
1659                 .name = "authenc(hmac(md5),cbc(aes))",
1660                 .driver_name = "authenc-hmac-md5-cbc-aes-caam",
1661                 .blocksize = AES_BLOCK_SIZE,
1662                 .type = CRYPTO_ALG_TYPE_AEAD,
1663                 .template_aead = {
1664                         .setkey = aead_setkey,
1665                         .setauthsize = aead_setauthsize,
1666                         .encrypt = aead_encrypt,
1667                         .decrypt = aead_decrypt,
1668                         .givencrypt = aead_givencrypt,
1669                         .geniv = "<built-in>",
1670                         .ivsize = AES_BLOCK_SIZE,
1671                         .maxauthsize = MD5_DIGEST_SIZE,
1672                         },
1673                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1674                 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
1675                 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1676         },
1677         {
1678                 .name = "authenc(hmac(sha1),cbc(aes))",
1679                 .driver_name = "authenc-hmac-sha1-cbc-aes-caam",
1680                 .blocksize = AES_BLOCK_SIZE,
1681                 .type = CRYPTO_ALG_TYPE_AEAD,
1682                 .template_aead = {
1683                         .setkey = aead_setkey,
1684                         .setauthsize = aead_setauthsize,
1685                         .encrypt = aead_encrypt,
1686                         .decrypt = aead_decrypt,
1687                         .givencrypt = aead_givencrypt,
1688                         .geniv = "<built-in>",
1689                         .ivsize = AES_BLOCK_SIZE,
1690                         .maxauthsize = SHA1_DIGEST_SIZE,
1691                         },
1692                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1693                 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
1694                 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1695         },
1696         {
1697                 .name = "authenc(hmac(sha224),cbc(aes))",
1698                 .driver_name = "authenc-hmac-sha224-cbc-aes-caam",
1699                 .blocksize = AES_BLOCK_SIZE,
1700                 .type = CRYPTO_ALG_TYPE_AEAD,
1701                 .template_aead = {
1702                         .setkey = aead_setkey,
1703                         .setauthsize = aead_setauthsize,
1704                         .encrypt = aead_encrypt,
1705                         .decrypt = aead_decrypt,
1706                         .givencrypt = aead_givencrypt,
1707                         .geniv = "<built-in>",
1708                         .ivsize = AES_BLOCK_SIZE,
1709                         .maxauthsize = SHA224_DIGEST_SIZE,
1710                         },
1711                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1712                 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1713                                    OP_ALG_AAI_HMAC_PRECOMP,
1714                 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1715         },
1716         {
1717                 .name = "authenc(hmac(sha256),cbc(aes))",
1718                 .driver_name = "authenc-hmac-sha256-cbc-aes-caam",
1719                 .blocksize = AES_BLOCK_SIZE,
1720                 .type = CRYPTO_ALG_TYPE_AEAD,
1721                 .template_aead = {
1722                         .setkey = aead_setkey,
1723                         .setauthsize = aead_setauthsize,
1724                         .encrypt = aead_encrypt,
1725                         .decrypt = aead_decrypt,
1726                         .givencrypt = aead_givencrypt,
1727                         .geniv = "<built-in>",
1728                         .ivsize = AES_BLOCK_SIZE,
1729                         .maxauthsize = SHA256_DIGEST_SIZE,
1730                         },
1731                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1732                 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1733                                    OP_ALG_AAI_HMAC_PRECOMP,
1734                 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1735         },
1736         {
1737                 .name = "authenc(hmac(sha384),cbc(aes))",
1738                 .driver_name = "authenc-hmac-sha384-cbc-aes-caam",
1739                 .blocksize = AES_BLOCK_SIZE,
1740                 .type = CRYPTO_ALG_TYPE_AEAD,
1741                 .template_aead = {
1742                         .setkey = aead_setkey,
1743                         .setauthsize = aead_setauthsize,
1744                         .encrypt = aead_encrypt,
1745                         .decrypt = aead_decrypt,
1746                         .givencrypt = aead_givencrypt,
1747                         .geniv = "<built-in>",
1748                         .ivsize = AES_BLOCK_SIZE,
1749                         .maxauthsize = SHA384_DIGEST_SIZE,
1750                         },
1751                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1752                 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1753                                    OP_ALG_AAI_HMAC_PRECOMP,
1754                 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1755         },
1756
1757         {
1758                 .name = "authenc(hmac(sha512),cbc(aes))",
1759                 .driver_name = "authenc-hmac-sha512-cbc-aes-caam",
1760                 .blocksize = AES_BLOCK_SIZE,
1761                 .type = CRYPTO_ALG_TYPE_AEAD,
1762                 .template_aead = {
1763                         .setkey = aead_setkey,
1764                         .setauthsize = aead_setauthsize,
1765                         .encrypt = aead_encrypt,
1766                         .decrypt = aead_decrypt,
1767                         .givencrypt = aead_givencrypt,
1768                         .geniv = "<built-in>",
1769                         .ivsize = AES_BLOCK_SIZE,
1770                         .maxauthsize = SHA512_DIGEST_SIZE,
1771                         },
1772                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1773                 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1774                                    OP_ALG_AAI_HMAC_PRECOMP,
1775                 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1776         },
1777         {
1778                 .name = "authenc(hmac(md5),cbc(des3_ede))",
1779                 .driver_name = "authenc-hmac-md5-cbc-des3_ede-caam",
1780                 .blocksize = DES3_EDE_BLOCK_SIZE,
1781                 .type = CRYPTO_ALG_TYPE_AEAD,
1782                 .template_aead = {
1783                         .setkey = aead_setkey,
1784                         .setauthsize = aead_setauthsize,
1785                         .encrypt = aead_encrypt,
1786                         .decrypt = aead_decrypt,
1787                         .givencrypt = aead_givencrypt,
1788                         .geniv = "<built-in>",
1789                         .ivsize = DES3_EDE_BLOCK_SIZE,
1790                         .maxauthsize = MD5_DIGEST_SIZE,
1791                         },
1792                 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1793                 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
1794                 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1795         },
1796         {
1797                 .name = "authenc(hmac(sha1),cbc(des3_ede))",
1798                 .driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam",
1799                 .blocksize = DES3_EDE_BLOCK_SIZE,
1800                 .type = CRYPTO_ALG_TYPE_AEAD,
1801                 .template_aead = {
1802                         .setkey = aead_setkey,
1803                         .setauthsize = aead_setauthsize,
1804                         .encrypt = aead_encrypt,
1805                         .decrypt = aead_decrypt,
1806                         .givencrypt = aead_givencrypt,
1807                         .geniv = "<built-in>",
1808                         .ivsize = DES3_EDE_BLOCK_SIZE,
1809                         .maxauthsize = SHA1_DIGEST_SIZE,
1810                         },
1811                 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1812                 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
1813                 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1814         },
1815         {
1816                 .name = "authenc(hmac(sha224),cbc(des3_ede))",
1817                 .driver_name = "authenc-hmac-sha224-cbc-des3_ede-caam",
1818                 .blocksize = DES3_EDE_BLOCK_SIZE,
1819                 .type = CRYPTO_ALG_TYPE_AEAD,
1820                 .template_aead = {
1821                         .setkey = aead_setkey,
1822                         .setauthsize = aead_setauthsize,
1823                         .encrypt = aead_encrypt,
1824                         .decrypt = aead_decrypt,
1825                         .givencrypt = aead_givencrypt,
1826                         .geniv = "<built-in>",
1827                         .ivsize = DES3_EDE_BLOCK_SIZE,
1828                         .maxauthsize = SHA224_DIGEST_SIZE,
1829                         },
1830                 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1831                 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1832                                    OP_ALG_AAI_HMAC_PRECOMP,
1833                 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1834         },
1835         {
1836                 .name = "authenc(hmac(sha256),cbc(des3_ede))",
1837                 .driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam",
1838                 .blocksize = DES3_EDE_BLOCK_SIZE,
1839                 .type = CRYPTO_ALG_TYPE_AEAD,
1840                 .template_aead = {
1841                         .setkey = aead_setkey,
1842                         .setauthsize = aead_setauthsize,
1843                         .encrypt = aead_encrypt,
1844                         .decrypt = aead_decrypt,
1845                         .givencrypt = aead_givencrypt,
1846                         .geniv = "<built-in>",
1847                         .ivsize = DES3_EDE_BLOCK_SIZE,
1848                         .maxauthsize = SHA256_DIGEST_SIZE,
1849                         },
1850                 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1851                 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1852                                    OP_ALG_AAI_HMAC_PRECOMP,
1853                 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1854         },
1855         {
1856                 .name = "authenc(hmac(sha384),cbc(des3_ede))",
1857                 .driver_name = "authenc-hmac-sha384-cbc-des3_ede-caam",
1858                 .blocksize = DES3_EDE_BLOCK_SIZE,
1859                 .type = CRYPTO_ALG_TYPE_AEAD,
1860                 .template_aead = {
1861                         .setkey = aead_setkey,
1862                         .setauthsize = aead_setauthsize,
1863                         .encrypt = aead_encrypt,
1864                         .decrypt = aead_decrypt,
1865                         .givencrypt = aead_givencrypt,
1866                         .geniv = "<built-in>",
1867                         .ivsize = DES3_EDE_BLOCK_SIZE,
1868                         .maxauthsize = SHA384_DIGEST_SIZE,
1869                         },
1870                 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1871                 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1872                                    OP_ALG_AAI_HMAC_PRECOMP,
1873                 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1874         },
1875         {
1876                 .name = "authenc(hmac(sha512),cbc(des3_ede))",
1877                 .driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam",
1878                 .blocksize = DES3_EDE_BLOCK_SIZE,
1879                 .type = CRYPTO_ALG_TYPE_AEAD,
1880                 .template_aead = {
1881                         .setkey = aead_setkey,
1882                         .setauthsize = aead_setauthsize,
1883                         .encrypt = aead_encrypt,
1884                         .decrypt = aead_decrypt,
1885                         .givencrypt = aead_givencrypt,
1886                         .geniv = "<built-in>",
1887                         .ivsize = DES3_EDE_BLOCK_SIZE,
1888                         .maxauthsize = SHA512_DIGEST_SIZE,
1889                         },
1890                 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1891                 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1892                                    OP_ALG_AAI_HMAC_PRECOMP,
1893                 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1894         },
1895         {
1896                 .name = "authenc(hmac(md5),cbc(des))",
1897                 .driver_name = "authenc-hmac-md5-cbc-des-caam",
1898                 .blocksize = DES_BLOCK_SIZE,
1899                 .type = CRYPTO_ALG_TYPE_AEAD,
1900                 .template_aead = {
1901                         .setkey = aead_setkey,
1902                         .setauthsize = aead_setauthsize,
1903                         .encrypt = aead_encrypt,
1904                         .decrypt = aead_decrypt,
1905                         .givencrypt = aead_givencrypt,
1906                         .geniv = "<built-in>",
1907                         .ivsize = DES_BLOCK_SIZE,
1908                         .maxauthsize = MD5_DIGEST_SIZE,
1909                         },
1910                 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1911                 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
1912                 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1913         },
1914         {
1915                 .name = "authenc(hmac(sha1),cbc(des))",
1916                 .driver_name = "authenc-hmac-sha1-cbc-des-caam",
1917                 .blocksize = DES_BLOCK_SIZE,
1918                 .type = CRYPTO_ALG_TYPE_AEAD,
1919                 .template_aead = {
1920                         .setkey = aead_setkey,
1921                         .setauthsize = aead_setauthsize,
1922                         .encrypt = aead_encrypt,
1923                         .decrypt = aead_decrypt,
1924                         .givencrypt = aead_givencrypt,
1925                         .geniv = "<built-in>",
1926                         .ivsize = DES_BLOCK_SIZE,
1927                         .maxauthsize = SHA1_DIGEST_SIZE,
1928                         },
1929                 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1930                 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
1931                 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1932         },
1933         {
1934                 .name = "authenc(hmac(sha224),cbc(des))",
1935                 .driver_name = "authenc-hmac-sha224-cbc-des-caam",
1936                 .blocksize = DES_BLOCK_SIZE,
1937                 .type = CRYPTO_ALG_TYPE_AEAD,
1938                 .template_aead = {
1939                         .setkey = aead_setkey,
1940                         .setauthsize = aead_setauthsize,
1941                         .encrypt = aead_encrypt,
1942                         .decrypt = aead_decrypt,
1943                         .givencrypt = aead_givencrypt,
1944                         .geniv = "<built-in>",
1945                         .ivsize = DES_BLOCK_SIZE,
1946                         .maxauthsize = SHA224_DIGEST_SIZE,
1947                         },
1948                 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1949                 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1950                                    OP_ALG_AAI_HMAC_PRECOMP,
1951                 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1952         },
1953         {
1954                 .name = "authenc(hmac(sha256),cbc(des))",
1955                 .driver_name = "authenc-hmac-sha256-cbc-des-caam",
1956                 .blocksize = DES_BLOCK_SIZE,
1957                 .type = CRYPTO_ALG_TYPE_AEAD,
1958                 .template_aead = {
1959                         .setkey = aead_setkey,
1960                         .setauthsize = aead_setauthsize,
1961                         .encrypt = aead_encrypt,
1962                         .decrypt = aead_decrypt,
1963                         .givencrypt = aead_givencrypt,
1964                         .geniv = "<built-in>",
1965                         .ivsize = DES_BLOCK_SIZE,
1966                         .maxauthsize = SHA256_DIGEST_SIZE,
1967                         },
1968                 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1969                 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1970                                    OP_ALG_AAI_HMAC_PRECOMP,
1971                 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1972         },
1973         {
1974                 .name = "authenc(hmac(sha384),cbc(des))",
1975                 .driver_name = "authenc-hmac-sha384-cbc-des-caam",
1976                 .blocksize = DES_BLOCK_SIZE,
1977                 .type = CRYPTO_ALG_TYPE_AEAD,
1978                 .template_aead = {
1979                         .setkey = aead_setkey,
1980                         .setauthsize = aead_setauthsize,
1981                         .encrypt = aead_encrypt,
1982                         .decrypt = aead_decrypt,
1983                         .givencrypt = aead_givencrypt,
1984                         .geniv = "<built-in>",
1985                         .ivsize = DES_BLOCK_SIZE,
1986                         .maxauthsize = SHA384_DIGEST_SIZE,
1987                         },
1988                 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1989                 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1990                                    OP_ALG_AAI_HMAC_PRECOMP,
1991                 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1992         },
1993         {
1994                 .name = "authenc(hmac(sha512),cbc(des))",
1995                 .driver_name = "authenc-hmac-sha512-cbc-des-caam",
1996                 .blocksize = DES_BLOCK_SIZE,
1997                 .type = CRYPTO_ALG_TYPE_AEAD,
1998                 .template_aead = {
1999                         .setkey = aead_setkey,
2000                         .setauthsize = aead_setauthsize,
2001                         .encrypt = aead_encrypt,
2002                         .decrypt = aead_decrypt,
2003                         .givencrypt = aead_givencrypt,
2004                         .geniv = "<built-in>",
2005                         .ivsize = DES_BLOCK_SIZE,
2006                         .maxauthsize = SHA512_DIGEST_SIZE,
2007                         },
2008                 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2009                 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2010                                    OP_ALG_AAI_HMAC_PRECOMP,
2011                 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
2012         },
2013         /* ablkcipher descriptor */
2014         {
2015                 .name = "cbc(aes)",
2016                 .driver_name = "cbc-aes-caam",
2017                 .blocksize = AES_BLOCK_SIZE,
2018                 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2019                 .template_ablkcipher = {
2020                         .setkey = ablkcipher_setkey,
2021                         .encrypt = ablkcipher_encrypt,
2022                         .decrypt = ablkcipher_decrypt,
2023                         .geniv = "eseqiv",
2024                         .min_keysize = AES_MIN_KEY_SIZE,
2025                         .max_keysize = AES_MAX_KEY_SIZE,
2026                         .ivsize = AES_BLOCK_SIZE,
2027                         },
2028                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2029         },
2030         {
2031                 .name = "cbc(des3_ede)",
2032                 .driver_name = "cbc-3des-caam",
2033                 .blocksize = DES3_EDE_BLOCK_SIZE,
2034                 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2035                 .template_ablkcipher = {
2036                         .setkey = ablkcipher_setkey,
2037                         .encrypt = ablkcipher_encrypt,
2038                         .decrypt = ablkcipher_decrypt,
2039                         .geniv = "eseqiv",
2040                         .min_keysize = DES3_EDE_KEY_SIZE,
2041                         .max_keysize = DES3_EDE_KEY_SIZE,
2042                         .ivsize = DES3_EDE_BLOCK_SIZE,
2043                         },
2044                 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2045         },
2046         {
2047                 .name = "cbc(des)",
2048                 .driver_name = "cbc-des-caam",
2049                 .blocksize = DES_BLOCK_SIZE,
2050                 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2051                 .template_ablkcipher = {
2052                         .setkey = ablkcipher_setkey,
2053                         .encrypt = ablkcipher_encrypt,
2054                         .decrypt = ablkcipher_decrypt,
2055                         .geniv = "eseqiv",
2056                         .min_keysize = DES_KEY_SIZE,
2057                         .max_keysize = DES_KEY_SIZE,
2058                         .ivsize = DES_BLOCK_SIZE,
2059                         },
2060                 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2061         }
2062 };
2063
2064 struct caam_crypto_alg {
2065         struct list_head entry;
2066         struct device *ctrldev;
2067         int class1_alg_type;
2068         int class2_alg_type;
2069         int alg_op;
2070         struct crypto_alg crypto_alg;
2071 };
2072
2073 static int caam_cra_init(struct crypto_tfm *tfm)
2074 {
2075         struct crypto_alg *alg = tfm->__crt_alg;
2076         struct caam_crypto_alg *caam_alg =
2077                  container_of(alg, struct caam_crypto_alg, crypto_alg);
2078         struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
2079         struct caam_drv_private *priv = dev_get_drvdata(caam_alg->ctrldev);
2080         int tgt_jr = atomic_inc_return(&priv->tfm_count);
2081
2082         /*
2083          * distribute tfms across job rings to ensure in-order
2084          * crypto request processing per tfm
2085          */
2086         ctx->jrdev = priv->jrdev[(tgt_jr / 2) % priv->total_jobrs];
2087
2088         /* copy descriptor header template value */
2089         ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type;
2090         ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam_alg->class2_alg_type;
2091         ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_alg->alg_op;
2092
2093         return 0;
2094 }
2095
2096 static void caam_cra_exit(struct crypto_tfm *tfm)
2097 {
2098         struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
2099
2100         if (ctx->sh_desc_enc_dma &&
2101             !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
2102                 dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
2103                                  desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
2104         if (ctx->sh_desc_dec_dma &&
2105             !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
2106                 dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
2107                                  desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
2108         if (ctx->sh_desc_givenc_dma &&
2109             !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
2110                 dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
2111                                  desc_bytes(ctx->sh_desc_givenc),
2112                                  DMA_TO_DEVICE);
2113 }
2114
2115 static void __exit caam_algapi_exit(void)
2116 {
2117
2118         struct device_node *dev_node;
2119         struct platform_device *pdev;
2120         struct device *ctrldev;
2121         struct caam_drv_private *priv;
2122         struct caam_crypto_alg *t_alg, *n;
2123
2124         dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
2125         if (!dev_node) {
2126                 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
2127                 if (!dev_node)
2128                         return;
2129         }
2130
2131         pdev = of_find_device_by_node(dev_node);
2132         if (!pdev)
2133                 return;
2134
2135         ctrldev = &pdev->dev;
2136         of_node_put(dev_node);
2137         priv = dev_get_drvdata(ctrldev);
2138
2139         if (!priv->alg_list.next)
2140                 return;
2141
2142         list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
2143                 crypto_unregister_alg(&t_alg->crypto_alg);
2144                 list_del(&t_alg->entry);
2145                 kfree(t_alg);
2146         }
2147 }
2148
2149 static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev,
2150                                               struct caam_alg_template
2151                                               *template)
2152 {
2153         struct caam_crypto_alg *t_alg;
2154         struct crypto_alg *alg;
2155
2156         t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL);
2157         if (!t_alg) {
2158                 dev_err(ctrldev, "failed to allocate t_alg\n");
2159                 return ERR_PTR(-ENOMEM);
2160         }
2161
2162         alg = &t_alg->crypto_alg;
2163
2164         snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
2165         snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2166                  template->driver_name);
2167         alg->cra_module = THIS_MODULE;
2168         alg->cra_init = caam_cra_init;
2169         alg->cra_exit = caam_cra_exit;
2170         alg->cra_priority = CAAM_CRA_PRIORITY;
2171         alg->cra_blocksize = template->blocksize;
2172         alg->cra_alignmask = 0;
2173         alg->cra_ctxsize = sizeof(struct caam_ctx);
2174         alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
2175                          template->type;
2176         switch (template->type) {
2177         case CRYPTO_ALG_TYPE_ABLKCIPHER:
2178                 alg->cra_type = &crypto_ablkcipher_type;
2179                 alg->cra_ablkcipher = template->template_ablkcipher;
2180                 break;
2181         case CRYPTO_ALG_TYPE_AEAD:
2182                 alg->cra_type = &crypto_aead_type;
2183                 alg->cra_aead = template->template_aead;
2184                 break;
2185         }
2186
2187         t_alg->class1_alg_type = template->class1_alg_type;
2188         t_alg->class2_alg_type = template->class2_alg_type;
2189         t_alg->alg_op = template->alg_op;
2190         t_alg->ctrldev = ctrldev;
2191
2192         return t_alg;
2193 }
2194
2195 static int __init caam_algapi_init(void)
2196 {
2197         struct device_node *dev_node;
2198         struct platform_device *pdev;
2199         struct device *ctrldev;
2200         struct caam_drv_private *priv;
2201         int i = 0, err = 0;
2202
2203         dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
2204         if (!dev_node) {
2205                 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
2206                 if (!dev_node)
2207                         return -ENODEV;
2208         }
2209
2210         pdev = of_find_device_by_node(dev_node);
2211         if (!pdev)
2212                 return -ENODEV;
2213
2214         ctrldev = &pdev->dev;
2215         priv = dev_get_drvdata(ctrldev);
2216         of_node_put(dev_node);
2217
2218         INIT_LIST_HEAD(&priv->alg_list);
2219
2220         atomic_set(&priv->tfm_count, -1);
2221
2222         /* register crypto algorithms the device supports */
2223         for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2224                 /* TODO: check if h/w supports alg */
2225                 struct caam_crypto_alg *t_alg;
2226                 bool done = false;
2227
2228 authencesn:
2229                 t_alg = caam_alg_alloc(ctrldev, &driver_algs[i]);
2230                 if (IS_ERR(t_alg)) {
2231                         err = PTR_ERR(t_alg);
2232                         dev_warn(ctrldev, "%s alg allocation failed\n",
2233                                  driver_algs[i].driver_name);
2234                         continue;
2235                 }
2236
2237                 err = crypto_register_alg(&t_alg->crypto_alg);
2238                 if (err) {
2239                         dev_warn(ctrldev, "%s alg registration failed\n",
2240                                 t_alg->crypto_alg.cra_driver_name);
2241                         kfree(t_alg);
2242                 } else {
2243                         list_add_tail(&t_alg->entry, &priv->alg_list);
2244                         if (driver_algs[i].type == CRYPTO_ALG_TYPE_AEAD &&
2245                             !memcmp(driver_algs[i].name, "authenc", 7) &&
2246                             !done) {
2247                                 char *name;
2248
2249                                 name = driver_algs[i].name;
2250                                 memmove(name + 10, name + 7, strlen(name) - 7);
2251                                 memcpy(name + 7, "esn", 3);
2252
2253                                 name = driver_algs[i].driver_name;
2254                                 memmove(name + 10, name + 7, strlen(name) - 7);
2255                                 memcpy(name + 7, "esn", 3);
2256
2257                                 done = true;
2258                                 goto authencesn;
2259                         }
2260                 }
2261         }
2262         if (!list_empty(&priv->alg_list))
2263                 dev_info(ctrldev, "%s algorithms registered in /proc/crypto\n",
2264                          (char *)of_get_property(dev_node, "compatible", NULL));
2265
2266         return err;
2267 }
2268
2269 module_init(caam_algapi_init);
2270 module_exit(caam_algapi_exit);
2271
2272 MODULE_LICENSE("GPL");
2273 MODULE_DESCRIPTION("FSL CAAM support for crypto API");
2274 MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");