blob: 2536ebc53602d5436b50f754fa8c06d99747957f [file] [log] [blame]
Ryder Lee785e5c62016-12-19 10:20:44 +08001/*
2 * Cryptographic API.
3 *
4 * Driver for EIP97 SHA1/SHA2(HMAC) acceleration.
5 *
6 * Copyright (c) 2016 Ryder Lee <ryder.lee@mediatek.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * Some ideas are from atmel-sha.c and omap-sham.c drivers.
13 */
14
15#include <crypto/sha.h>
16#include "mtk-platform.h"
17
18#define SHA_ALIGN_MSK (sizeof(u32) - 1)
19#define SHA_QUEUE_SIZE 512
20#define SHA_TMP_BUF_SIZE 512
21#define SHA_BUF_SIZE ((u32)PAGE_SIZE)
22
23#define SHA_OP_UPDATE 1
24#define SHA_OP_FINAL 2
25
26#define SHA_DATA_LEN_MSK cpu_to_le32(GENMASK(16, 0))
27
28/* SHA command token */
29#define SHA_CT_SIZE 5
30#define SHA_CT_CTRL_HDR cpu_to_le32(0x02220000)
Ryder Leea87399622017-01-20 13:41:08 +080031#define SHA_CMD0 cpu_to_le32(0x03020000)
32#define SHA_CMD1 cpu_to_le32(0x21060000)
33#define SHA_CMD2 cpu_to_le32(0xe0e63802)
Ryder Lee785e5c62016-12-19 10:20:44 +080034
35/* SHA transform information */
36#define SHA_TFM_HASH cpu_to_le32(0x2 << 0)
37#define SHA_TFM_INNER_DIG cpu_to_le32(0x1 << 21)
38#define SHA_TFM_SIZE(x) cpu_to_le32((x) << 8)
39#define SHA_TFM_START cpu_to_le32(0x1 << 4)
40#define SHA_TFM_CONTINUE cpu_to_le32(0x1 << 5)
41#define SHA_TFM_HASH_STORE cpu_to_le32(0x1 << 19)
42#define SHA_TFM_SHA1 cpu_to_le32(0x2 << 23)
43#define SHA_TFM_SHA256 cpu_to_le32(0x3 << 23)
44#define SHA_TFM_SHA224 cpu_to_le32(0x4 << 23)
45#define SHA_TFM_SHA512 cpu_to_le32(0x5 << 23)
46#define SHA_TFM_SHA384 cpu_to_le32(0x6 << 23)
47#define SHA_TFM_DIGEST(x) cpu_to_le32(((x) & GENMASK(3, 0)) << 24)
48
49/* SHA flags */
50#define SHA_FLAGS_BUSY BIT(0)
51#define SHA_FLAGS_FINAL BIT(1)
52#define SHA_FLAGS_FINUP BIT(2)
53#define SHA_FLAGS_SG BIT(3)
54#define SHA_FLAGS_ALGO_MSK GENMASK(8, 4)
55#define SHA_FLAGS_SHA1 BIT(4)
56#define SHA_FLAGS_SHA224 BIT(5)
57#define SHA_FLAGS_SHA256 BIT(6)
58#define SHA_FLAGS_SHA384 BIT(7)
59#define SHA_FLAGS_SHA512 BIT(8)
60#define SHA_FLAGS_HMAC BIT(9)
61#define SHA_FLAGS_PAD BIT(10)
62
63/**
64 * mtk_sha_ct is a set of hardware instructions(command token)
65 * that are used to control engine's processing flow of SHA,
66 * and it contains the first two words of transform state.
67 */
68struct mtk_sha_ct {
Ryder Leea87399622017-01-20 13:41:08 +080069 __le32 ctrl[2];
70 __le32 cmd[3];
Ryder Lee785e5c62016-12-19 10:20:44 +080071};
72
73/**
74 * mtk_sha_tfm is used to define SHA transform state
75 * and store result digest that produced by engine.
76 */
77struct mtk_sha_tfm {
Ryder Leea87399622017-01-20 13:41:08 +080078 __le32 ctrl[2];
Ryder Lee785e5c62016-12-19 10:20:44 +080079 __le32 digest[SIZE_IN_WORDS(SHA512_DIGEST_SIZE)];
80};
81
82/**
83 * mtk_sha_info consists of command token and transform state
84 * of SHA, its role is similar to mtk_aes_info.
85 */
86struct mtk_sha_info {
87 struct mtk_sha_ct ct;
88 struct mtk_sha_tfm tfm;
89};
90
91struct mtk_sha_reqctx {
92 struct mtk_sha_info info;
93 unsigned long flags;
94 unsigned long op;
95
96 u64 digcnt;
97 bool start;
98 size_t bufcnt;
99 dma_addr_t dma_addr;
100
Ryder Leea87399622017-01-20 13:41:08 +0800101 __le32 ct_hdr;
102 u32 ct_size;
103 dma_addr_t ct_dma;
104 dma_addr_t tfm_dma;
105
Ryder Lee785e5c62016-12-19 10:20:44 +0800106 /* Walk state */
107 struct scatterlist *sg;
108 u32 offset; /* Offset in current sg */
109 u32 total; /* Total request */
110 size_t ds;
111 size_t bs;
112
113 u8 *buffer;
114};
115
116struct mtk_sha_hmac_ctx {
117 struct crypto_shash *shash;
118 u8 ipad[SHA512_BLOCK_SIZE] __aligned(sizeof(u32));
119 u8 opad[SHA512_BLOCK_SIZE] __aligned(sizeof(u32));
120};
121
122struct mtk_sha_ctx {
123 struct mtk_cryp *cryp;
124 unsigned long flags;
125 u8 id;
126 u8 buf[SHA_BUF_SIZE] __aligned(sizeof(u32));
127
128 struct mtk_sha_hmac_ctx base[0];
129};
130
131struct mtk_sha_drv {
132 struct list_head dev_list;
133 /* Device list lock */
134 spinlock_t lock;
135};
136
137static struct mtk_sha_drv mtk_sha = {
138 .dev_list = LIST_HEAD_INIT(mtk_sha.dev_list),
139 .lock = __SPIN_LOCK_UNLOCKED(mtk_sha.lock),
140};
141
142static int mtk_sha_handle_queue(struct mtk_cryp *cryp, u8 id,
143 struct ahash_request *req);
144
145static inline u32 mtk_sha_read(struct mtk_cryp *cryp, u32 offset)
146{
147 return readl_relaxed(cryp->base + offset);
148}
149
150static inline void mtk_sha_write(struct mtk_cryp *cryp,
151 u32 offset, u32 value)
152{
153 writel_relaxed(value, cryp->base + offset);
154}
155
156static struct mtk_cryp *mtk_sha_find_dev(struct mtk_sha_ctx *tctx)
157{
158 struct mtk_cryp *cryp = NULL;
159 struct mtk_cryp *tmp;
160
161 spin_lock_bh(&mtk_sha.lock);
162 if (!tctx->cryp) {
163 list_for_each_entry(tmp, &mtk_sha.dev_list, sha_list) {
164 cryp = tmp;
165 break;
166 }
167 tctx->cryp = cryp;
168 } else {
169 cryp = tctx->cryp;
170 }
171
172 /*
173 * Assign record id to tfm in round-robin fashion, and this
174 * will help tfm to bind to corresponding descriptor rings.
175 */
176 tctx->id = cryp->rec;
177 cryp->rec = !cryp->rec;
178
179 spin_unlock_bh(&mtk_sha.lock);
180
181 return cryp;
182}
183
184static int mtk_sha_append_sg(struct mtk_sha_reqctx *ctx)
185{
186 size_t count;
187
188 while ((ctx->bufcnt < SHA_BUF_SIZE) && ctx->total) {
189 count = min(ctx->sg->length - ctx->offset, ctx->total);
190 count = min(count, SHA_BUF_SIZE - ctx->bufcnt);
191
192 if (count <= 0) {
193 /*
194 * Check if count <= 0 because the buffer is full or
195 * because the sg length is 0. In the latest case,
196 * check if there is another sg in the list, a 0 length
197 * sg doesn't necessarily mean the end of the sg list.
198 */
199 if ((ctx->sg->length == 0) && !sg_is_last(ctx->sg)) {
200 ctx->sg = sg_next(ctx->sg);
201 continue;
202 } else {
203 break;
204 }
205 }
206
207 scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, ctx->sg,
208 ctx->offset, count, 0);
209
210 ctx->bufcnt += count;
211 ctx->offset += count;
212 ctx->total -= count;
213
214 if (ctx->offset == ctx->sg->length) {
215 ctx->sg = sg_next(ctx->sg);
216 if (ctx->sg)
217 ctx->offset = 0;
218 else
219 ctx->total = 0;
220 }
221 }
222
223 return 0;
224}
225
226/*
227 * The purpose of this padding is to ensure that the padded message is a
228 * multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512).
229 * The bit "1" is appended at the end of the message followed by
230 * "padlen-1" zero bits. Then a 64 bits block (SHA1/SHA224/SHA256) or
231 * 128 bits block (SHA384/SHA512) equals to the message length in bits
232 * is appended.
233 *
234 * For SHA1/SHA224/SHA256, padlen is calculated as followed:
235 * - if message length < 56 bytes then padlen = 56 - message length
236 * - else padlen = 64 + 56 - message length
237 *
238 * For SHA384/SHA512, padlen is calculated as followed:
239 * - if message length < 112 bytes then padlen = 112 - message length
240 * - else padlen = 128 + 112 - message length
241 */
242static void mtk_sha_fill_padding(struct mtk_sha_reqctx *ctx, u32 len)
243{
244 u32 index, padlen;
245 u64 bits[2];
246 u64 size = ctx->digcnt;
247
248 size += ctx->bufcnt;
249 size += len;
250
251 bits[1] = cpu_to_be64(size << 3);
252 bits[0] = cpu_to_be64(size >> 61);
253
254 if (ctx->flags & (SHA_FLAGS_SHA384 | SHA_FLAGS_SHA512)) {
255 index = ctx->bufcnt & 0x7f;
256 padlen = (index < 112) ? (112 - index) : ((128 + 112) - index);
257 *(ctx->buffer + ctx->bufcnt) = 0x80;
258 memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen - 1);
259 memcpy(ctx->buffer + ctx->bufcnt + padlen, bits, 16);
260 ctx->bufcnt += padlen + 16;
261 ctx->flags |= SHA_FLAGS_PAD;
262 } else {
263 index = ctx->bufcnt & 0x3f;
264 padlen = (index < 56) ? (56 - index) : ((64 + 56) - index);
265 *(ctx->buffer + ctx->bufcnt) = 0x80;
266 memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen - 1);
267 memcpy(ctx->buffer + ctx->bufcnt + padlen, &bits[1], 8);
268 ctx->bufcnt += padlen + 8;
269 ctx->flags |= SHA_FLAGS_PAD;
270 }
271}
272
273/* Initialize basic transform information of SHA */
Ryder Leea87399622017-01-20 13:41:08 +0800274static void mtk_sha_info_init(struct mtk_sha_reqctx *ctx)
Ryder Lee785e5c62016-12-19 10:20:44 +0800275{
Ryder Leea87399622017-01-20 13:41:08 +0800276 struct mtk_sha_ct *ct = &ctx->info.ct;
277 struct mtk_sha_tfm *tfm = &ctx->info.tfm;
Ryder Lee785e5c62016-12-19 10:20:44 +0800278
Ryder Leea87399622017-01-20 13:41:08 +0800279 ctx->ct_hdr = SHA_CT_CTRL_HDR;
280 ctx->ct_size = SHA_CT_SIZE;
Ryder Lee785e5c62016-12-19 10:20:44 +0800281
Ryder Leea87399622017-01-20 13:41:08 +0800282 tfm->ctrl[0] = SHA_TFM_HASH | SHA_TFM_INNER_DIG |
283 SHA_TFM_SIZE(SIZE_IN_WORDS(ctx->ds));
Ryder Lee785e5c62016-12-19 10:20:44 +0800284
285 switch (ctx->flags & SHA_FLAGS_ALGO_MSK) {
286 case SHA_FLAGS_SHA1:
Ryder Leea87399622017-01-20 13:41:08 +0800287 tfm->ctrl[0] |= SHA_TFM_SHA1;
Ryder Lee785e5c62016-12-19 10:20:44 +0800288 break;
289 case SHA_FLAGS_SHA224:
Ryder Leea87399622017-01-20 13:41:08 +0800290 tfm->ctrl[0] |= SHA_TFM_SHA224;
Ryder Lee785e5c62016-12-19 10:20:44 +0800291 break;
292 case SHA_FLAGS_SHA256:
Ryder Leea87399622017-01-20 13:41:08 +0800293 tfm->ctrl[0] |= SHA_TFM_SHA256;
Ryder Lee785e5c62016-12-19 10:20:44 +0800294 break;
295 case SHA_FLAGS_SHA384:
Ryder Leea87399622017-01-20 13:41:08 +0800296 tfm->ctrl[0] |= SHA_TFM_SHA384;
Ryder Lee785e5c62016-12-19 10:20:44 +0800297 break;
298 case SHA_FLAGS_SHA512:
Ryder Leea87399622017-01-20 13:41:08 +0800299 tfm->ctrl[0] |= SHA_TFM_SHA512;
Ryder Lee785e5c62016-12-19 10:20:44 +0800300 break;
301
302 default:
303 /* Should not happen... */
304 return;
305 }
306
Ryder Leea87399622017-01-20 13:41:08 +0800307 tfm->ctrl[1] = SHA_TFM_HASH_STORE;
308 ct->ctrl[0] = tfm->ctrl[0] | SHA_TFM_CONTINUE | SHA_TFM_START;
309 ct->ctrl[1] = tfm->ctrl[1];
Ryder Lee785e5c62016-12-19 10:20:44 +0800310
Ryder Leea87399622017-01-20 13:41:08 +0800311 ct->cmd[0] = SHA_CMD0;
312 ct->cmd[1] = SHA_CMD1;
313 ct->cmd[2] = SHA_CMD2 | SHA_TFM_DIGEST(SIZE_IN_WORDS(ctx->ds));
Ryder Lee785e5c62016-12-19 10:20:44 +0800314}
315
316/*
317 * Update input data length field of transform information and
318 * map it to DMA region.
319 */
320static int mtk_sha_info_map(struct mtk_cryp *cryp,
321 struct mtk_sha_rec *sha,
322 size_t len)
323{
324 struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
Ryder Leea87399622017-01-20 13:41:08 +0800325 struct mtk_sha_info *info = &ctx->info;
Ryder Lee785e5c62016-12-19 10:20:44 +0800326 struct mtk_sha_ct *ct = &info->ct;
327
328 if (ctx->start)
329 ctx->start = false;
330 else
Ryder Leea87399622017-01-20 13:41:08 +0800331 ct->ctrl[0] &= ~SHA_TFM_START;
Ryder Lee785e5c62016-12-19 10:20:44 +0800332
Ryder Leea87399622017-01-20 13:41:08 +0800333 ctx->ct_hdr &= ~SHA_DATA_LEN_MSK;
334 ctx->ct_hdr |= cpu_to_le32(len);
335 ct->cmd[0] &= ~SHA_DATA_LEN_MSK;
336 ct->cmd[0] |= cpu_to_le32(len);
Ryder Lee785e5c62016-12-19 10:20:44 +0800337
338 ctx->digcnt += len;
339
Ryder Leea87399622017-01-20 13:41:08 +0800340 ctx->ct_dma = dma_map_single(cryp->dev, info, sizeof(*info),
Ryder Lee785e5c62016-12-19 10:20:44 +0800341 DMA_BIDIRECTIONAL);
Ryder Leea87399622017-01-20 13:41:08 +0800342 if (unlikely(dma_mapping_error(cryp->dev, ctx->ct_dma))) {
Arnd Bergmann41e05322017-01-11 14:55:20 +0100343 dev_err(cryp->dev, "dma %zu bytes error\n", sizeof(*info));
Ryder Lee785e5c62016-12-19 10:20:44 +0800344 return -EINVAL;
345 }
Ryder Leea87399622017-01-20 13:41:08 +0800346 ctx->tfm_dma = ctx->ct_dma + sizeof(*ct);
Ryder Lee785e5c62016-12-19 10:20:44 +0800347
348 return 0;
349}
350
351/*
352 * Because of hardware limitation, we must pre-calculate the inner
353 * and outer digest that need to be processed firstly by engine, then
354 * apply the result digest to the input message. These complex hashing
355 * procedures limits HMAC performance, so we use fallback SW encoding.
356 */
357static int mtk_sha_finish_hmac(struct ahash_request *req)
358{
359 struct mtk_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
360 struct mtk_sha_hmac_ctx *bctx = tctx->base;
361 struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
362
363 SHASH_DESC_ON_STACK(shash, bctx->shash);
364
365 shash->tfm = bctx->shash;
366 shash->flags = 0; /* not CRYPTO_TFM_REQ_MAY_SLEEP */
367
368 return crypto_shash_init(shash) ?:
369 crypto_shash_update(shash, bctx->opad, ctx->bs) ?:
370 crypto_shash_finup(shash, req->result, ctx->ds, req->result);
371}
372
373/* Initialize request context */
374static int mtk_sha_init(struct ahash_request *req)
375{
376 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
377 struct mtk_sha_ctx *tctx = crypto_ahash_ctx(tfm);
378 struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
379
380 ctx->flags = 0;
381 ctx->ds = crypto_ahash_digestsize(tfm);
382
383 switch (ctx->ds) {
384 case SHA1_DIGEST_SIZE:
385 ctx->flags |= SHA_FLAGS_SHA1;
386 ctx->bs = SHA1_BLOCK_SIZE;
387 break;
388 case SHA224_DIGEST_SIZE:
389 ctx->flags |= SHA_FLAGS_SHA224;
390 ctx->bs = SHA224_BLOCK_SIZE;
391 break;
392 case SHA256_DIGEST_SIZE:
393 ctx->flags |= SHA_FLAGS_SHA256;
394 ctx->bs = SHA256_BLOCK_SIZE;
395 break;
396 case SHA384_DIGEST_SIZE:
397 ctx->flags |= SHA_FLAGS_SHA384;
398 ctx->bs = SHA384_BLOCK_SIZE;
399 break;
400 case SHA512_DIGEST_SIZE:
401 ctx->flags |= SHA_FLAGS_SHA512;
402 ctx->bs = SHA512_BLOCK_SIZE;
403 break;
404 default:
405 return -EINVAL;
406 }
407
408 ctx->bufcnt = 0;
409 ctx->digcnt = 0;
410 ctx->buffer = tctx->buf;
411 ctx->start = true;
412
413 if (tctx->flags & SHA_FLAGS_HMAC) {
414 struct mtk_sha_hmac_ctx *bctx = tctx->base;
415
416 memcpy(ctx->buffer, bctx->ipad, ctx->bs);
417 ctx->bufcnt = ctx->bs;
418 ctx->flags |= SHA_FLAGS_HMAC;
419 }
420
421 return 0;
422}
423
424static int mtk_sha_xmit(struct mtk_cryp *cryp, struct mtk_sha_rec *sha,
425 dma_addr_t addr, size_t len)
426{
Ryder Leea87399622017-01-20 13:41:08 +0800427 struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
Ryder Lee785e5c62016-12-19 10:20:44 +0800428 struct mtk_ring *ring = cryp->ring[sha->id];
Ryder Lee44328612017-01-20 13:41:09 +0800429 struct mtk_desc *cmd = ring->cmd_base + ring->cmd_pos;
430 struct mtk_desc *res = ring->res_base + ring->res_pos;
Ryder Lee785e5c62016-12-19 10:20:44 +0800431 int err;
432
433 err = mtk_sha_info_map(cryp, sha, len);
434 if (err)
435 return err;
436
437 /* Fill in the command/result descriptors */
438 res->hdr = MTK_DESC_FIRST |
439 MTK_DESC_LAST |
440 MTK_DESC_BUF_LEN(len);
441
442 res->buf = cpu_to_le32(cryp->tmp_dma);
443
444 cmd->hdr = MTK_DESC_FIRST |
445 MTK_DESC_LAST |
446 MTK_DESC_BUF_LEN(len) |
Ryder Leea87399622017-01-20 13:41:08 +0800447 MTK_DESC_CT_LEN(ctx->ct_size);
Ryder Lee785e5c62016-12-19 10:20:44 +0800448
449 cmd->buf = cpu_to_le32(addr);
Ryder Leea87399622017-01-20 13:41:08 +0800450 cmd->ct = cpu_to_le32(ctx->ct_dma);
451 cmd->ct_hdr = ctx->ct_hdr;
452 cmd->tfm = cpu_to_le32(ctx->tfm_dma);
Ryder Lee785e5c62016-12-19 10:20:44 +0800453
Ryder Lee44328612017-01-20 13:41:09 +0800454 if (++ring->cmd_pos == MTK_DESC_NUM)
455 ring->cmd_pos = 0;
Ryder Lee785e5c62016-12-19 10:20:44 +0800456
Ryder Lee44328612017-01-20 13:41:09 +0800457 ring->res_pos = ring->cmd_pos;
Ryder Lee785e5c62016-12-19 10:20:44 +0800458 /*
459 * Make sure that all changes to the DMA ring are done before we
460 * start engine.
461 */
462 wmb();
463 /* Start DMA transfer */
464 mtk_sha_write(cryp, RDR_PREP_COUNT(sha->id), MTK_DESC_CNT(1));
465 mtk_sha_write(cryp, CDR_PREP_COUNT(sha->id), MTK_DESC_CNT(1));
466
467 return -EINPROGRESS;
468}
469
470static int mtk_sha_xmit2(struct mtk_cryp *cryp,
471 struct mtk_sha_rec *sha,
472 struct mtk_sha_reqctx *ctx,
473 size_t len1, size_t len2)
474{
475 struct mtk_ring *ring = cryp->ring[sha->id];
Ryder Lee44328612017-01-20 13:41:09 +0800476 struct mtk_desc *cmd = ring->cmd_base + ring->cmd_pos;
477 struct mtk_desc *res = ring->res_base + ring->res_pos;
Ryder Lee785e5c62016-12-19 10:20:44 +0800478 int err;
479
480 err = mtk_sha_info_map(cryp, sha, len1 + len2);
481 if (err)
482 return err;
483
484 /* Fill in the command/result descriptors */
485 res->hdr = MTK_DESC_BUF_LEN(len1) | MTK_DESC_FIRST;
486 res->buf = cpu_to_le32(cryp->tmp_dma);
487
488 cmd->hdr = MTK_DESC_BUF_LEN(len1) |
489 MTK_DESC_FIRST |
Ryder Leea87399622017-01-20 13:41:08 +0800490 MTK_DESC_CT_LEN(ctx->ct_size);
Ryder Lee785e5c62016-12-19 10:20:44 +0800491 cmd->buf = cpu_to_le32(sg_dma_address(ctx->sg));
Ryder Leea87399622017-01-20 13:41:08 +0800492 cmd->ct = cpu_to_le32(ctx->ct_dma);
493 cmd->ct_hdr = ctx->ct_hdr;
494 cmd->tfm = cpu_to_le32(ctx->tfm_dma);
Ryder Lee785e5c62016-12-19 10:20:44 +0800495
Ryder Lee44328612017-01-20 13:41:09 +0800496 if (++ring->cmd_pos == MTK_DESC_NUM)
497 ring->cmd_pos = 0;
Ryder Lee785e5c62016-12-19 10:20:44 +0800498
Ryder Lee44328612017-01-20 13:41:09 +0800499 ring->res_pos = ring->cmd_pos;
500
501 cmd = ring->cmd_base + ring->cmd_pos;
502 res = ring->res_base + ring->res_pos;
Ryder Lee785e5c62016-12-19 10:20:44 +0800503
504 res->hdr = MTK_DESC_BUF_LEN(len2) | MTK_DESC_LAST;
505 res->buf = cpu_to_le32(cryp->tmp_dma);
506
507 cmd->hdr = MTK_DESC_BUF_LEN(len2) | MTK_DESC_LAST;
508 cmd->buf = cpu_to_le32(ctx->dma_addr);
509
Ryder Lee44328612017-01-20 13:41:09 +0800510 if (++ring->cmd_pos == MTK_DESC_NUM)
511 ring->cmd_pos = 0;
512
513 ring->res_pos = ring->cmd_pos;
Ryder Lee785e5c62016-12-19 10:20:44 +0800514
515 /*
516 * Make sure that all changes to the DMA ring are done before we
517 * start engine.
518 */
519 wmb();
520 /* Start DMA transfer */
521 mtk_sha_write(cryp, RDR_PREP_COUNT(sha->id), MTK_DESC_CNT(2));
522 mtk_sha_write(cryp, CDR_PREP_COUNT(sha->id), MTK_DESC_CNT(2));
523
524 return -EINPROGRESS;
525}
526
527static int mtk_sha_dma_map(struct mtk_cryp *cryp,
528 struct mtk_sha_rec *sha,
529 struct mtk_sha_reqctx *ctx,
530 size_t count)
531{
532 ctx->dma_addr = dma_map_single(cryp->dev, ctx->buffer,
533 SHA_BUF_SIZE, DMA_TO_DEVICE);
534 if (unlikely(dma_mapping_error(cryp->dev, ctx->dma_addr))) {
535 dev_err(cryp->dev, "dma map error\n");
536 return -EINVAL;
537 }
538
539 ctx->flags &= ~SHA_FLAGS_SG;
540
541 return mtk_sha_xmit(cryp, sha, ctx->dma_addr, count);
542}
543
544static int mtk_sha_update_slow(struct mtk_cryp *cryp,
545 struct mtk_sha_rec *sha)
546{
547 struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
548 size_t count;
549 u32 final;
550
551 mtk_sha_append_sg(ctx);
552
553 final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
554
Arnd Bergmann41e05322017-01-11 14:55:20 +0100555 dev_dbg(cryp->dev, "slow: bufcnt: %zu\n", ctx->bufcnt);
Ryder Lee785e5c62016-12-19 10:20:44 +0800556
557 if (final) {
558 sha->flags |= SHA_FLAGS_FINAL;
559 mtk_sha_fill_padding(ctx, 0);
560 }
561
562 if (final || (ctx->bufcnt == SHA_BUF_SIZE && ctx->total)) {
563 count = ctx->bufcnt;
564 ctx->bufcnt = 0;
565
566 return mtk_sha_dma_map(cryp, sha, ctx, count);
567 }
568 return 0;
569}
570
571static int mtk_sha_update_start(struct mtk_cryp *cryp,
572 struct mtk_sha_rec *sha)
573{
574 struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
575 u32 len, final, tail;
576 struct scatterlist *sg;
577
578 if (!ctx->total)
579 return 0;
580
581 if (ctx->bufcnt || ctx->offset)
582 return mtk_sha_update_slow(cryp, sha);
583
584 sg = ctx->sg;
585
586 if (!IS_ALIGNED(sg->offset, sizeof(u32)))
587 return mtk_sha_update_slow(cryp, sha);
588
589 if (!sg_is_last(sg) && !IS_ALIGNED(sg->length, ctx->bs))
590 /* size is not ctx->bs aligned */
591 return mtk_sha_update_slow(cryp, sha);
592
593 len = min(ctx->total, sg->length);
594
595 if (sg_is_last(sg)) {
596 if (!(ctx->flags & SHA_FLAGS_FINUP)) {
597 /* not last sg must be ctx->bs aligned */
598 tail = len & (ctx->bs - 1);
599 len -= tail;
600 }
601 }
602
603 ctx->total -= len;
604 ctx->offset = len; /* offset where to start slow */
605
606 final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
607
608 /* Add padding */
609 if (final) {
610 size_t count;
611
612 tail = len & (ctx->bs - 1);
613 len -= tail;
614 ctx->total += tail;
615 ctx->offset = len; /* offset where to start slow */
616
617 sg = ctx->sg;
618 mtk_sha_append_sg(ctx);
619 mtk_sha_fill_padding(ctx, len);
620
621 ctx->dma_addr = dma_map_single(cryp->dev, ctx->buffer,
622 SHA_BUF_SIZE, DMA_TO_DEVICE);
623 if (unlikely(dma_mapping_error(cryp->dev, ctx->dma_addr))) {
624 dev_err(cryp->dev, "dma map bytes error\n");
625 return -EINVAL;
626 }
627
628 sha->flags |= SHA_FLAGS_FINAL;
629 count = ctx->bufcnt;
630 ctx->bufcnt = 0;
631
632 if (len == 0) {
633 ctx->flags &= ~SHA_FLAGS_SG;
634 return mtk_sha_xmit(cryp, sha, ctx->dma_addr, count);
635
636 } else {
637 ctx->sg = sg;
638 if (!dma_map_sg(cryp->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
639 dev_err(cryp->dev, "dma_map_sg error\n");
640 return -EINVAL;
641 }
642
643 ctx->flags |= SHA_FLAGS_SG;
644 return mtk_sha_xmit2(cryp, sha, ctx, len, count);
645 }
646 }
647
648 if (!dma_map_sg(cryp->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
649 dev_err(cryp->dev, "dma_map_sg error\n");
650 return -EINVAL;
651 }
652
653 ctx->flags |= SHA_FLAGS_SG;
654
655 return mtk_sha_xmit(cryp, sha, sg_dma_address(ctx->sg), len);
656}
657
658static int mtk_sha_final_req(struct mtk_cryp *cryp,
659 struct mtk_sha_rec *sha)
660{
661 struct ahash_request *req = sha->req;
662 struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
663 size_t count;
664
665 mtk_sha_fill_padding(ctx, 0);
666
667 sha->flags |= SHA_FLAGS_FINAL;
668 count = ctx->bufcnt;
669 ctx->bufcnt = 0;
670
671 return mtk_sha_dma_map(cryp, sha, ctx, count);
672}
673
674/* Copy ready hash (+ finalize hmac) */
675static int mtk_sha_finish(struct ahash_request *req)
676{
677 struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
678 u32 *digest = ctx->info.tfm.digest;
679 u32 *result = (u32 *)req->result;
680 int i;
681
682 /* Get the hash from the digest buffer */
683 for (i = 0; i < SIZE_IN_WORDS(ctx->ds); i++)
684 result[i] = le32_to_cpu(digest[i]);
685
686 if (ctx->flags & SHA_FLAGS_HMAC)
687 return mtk_sha_finish_hmac(req);
688
689 return 0;
690}
691
692static void mtk_sha_finish_req(struct mtk_cryp *cryp,
693 struct mtk_sha_rec *sha, int err)
694{
695 if (likely(!err && (SHA_FLAGS_FINAL & sha->flags)))
696 err = mtk_sha_finish(sha->req);
697
698 sha->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL);
699
700 sha->req->base.complete(&sha->req->base, err);
701
702 /* Handle new request */
703 mtk_sha_handle_queue(cryp, sha->id - RING2, NULL);
704}
705
706static int mtk_sha_handle_queue(struct mtk_cryp *cryp, u8 id,
707 struct ahash_request *req)
708{
709 struct mtk_sha_rec *sha = cryp->sha[id];
710 struct crypto_async_request *async_req, *backlog;
711 struct mtk_sha_reqctx *ctx;
712 unsigned long flags;
713 int err = 0, ret = 0;
714
715 spin_lock_irqsave(&sha->lock, flags);
716 if (req)
717 ret = ahash_enqueue_request(&sha->queue, req);
718
719 if (SHA_FLAGS_BUSY & sha->flags) {
720 spin_unlock_irqrestore(&sha->lock, flags);
721 return ret;
722 }
723
724 backlog = crypto_get_backlog(&sha->queue);
725 async_req = crypto_dequeue_request(&sha->queue);
726 if (async_req)
727 sha->flags |= SHA_FLAGS_BUSY;
728 spin_unlock_irqrestore(&sha->lock, flags);
729
730 if (!async_req)
731 return ret;
732
733 if (backlog)
734 backlog->complete(backlog, -EINPROGRESS);
735
736 req = ahash_request_cast(async_req);
737 ctx = ahash_request_ctx(req);
738
739 sha->req = req;
Ryder Lee785e5c62016-12-19 10:20:44 +0800740
Ryder Leea87399622017-01-20 13:41:08 +0800741 mtk_sha_info_init(ctx);
Ryder Lee785e5c62016-12-19 10:20:44 +0800742
743 if (ctx->op == SHA_OP_UPDATE) {
744 err = mtk_sha_update_start(cryp, sha);
745 if (err != -EINPROGRESS && (ctx->flags & SHA_FLAGS_FINUP))
746 /* No final() after finup() */
747 err = mtk_sha_final_req(cryp, sha);
748 } else if (ctx->op == SHA_OP_FINAL) {
749 err = mtk_sha_final_req(cryp, sha);
750 }
751
752 if (unlikely(err != -EINPROGRESS))
753 /* Task will not finish it, so do it here */
754 mtk_sha_finish_req(cryp, sha, err);
755
756 return ret;
757}
758
759static int mtk_sha_enqueue(struct ahash_request *req, u32 op)
760{
761 struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
762 struct mtk_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
763
764 ctx->op = op;
765
766 return mtk_sha_handle_queue(tctx->cryp, tctx->id, req);
767}
768
769static void mtk_sha_unmap(struct mtk_cryp *cryp, struct mtk_sha_rec *sha)
770{
771 struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
772
Ryder Leea87399622017-01-20 13:41:08 +0800773 dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(ctx->info),
774 DMA_BIDIRECTIONAL);
Ryder Lee785e5c62016-12-19 10:20:44 +0800775
776 if (ctx->flags & SHA_FLAGS_SG) {
777 dma_unmap_sg(cryp->dev, ctx->sg, 1, DMA_TO_DEVICE);
778 if (ctx->sg->length == ctx->offset) {
779 ctx->sg = sg_next(ctx->sg);
780 if (ctx->sg)
781 ctx->offset = 0;
782 }
783 if (ctx->flags & SHA_FLAGS_PAD) {
784 dma_unmap_single(cryp->dev, ctx->dma_addr,
785 SHA_BUF_SIZE, DMA_TO_DEVICE);
786 }
787 } else
788 dma_unmap_single(cryp->dev, ctx->dma_addr,
789 SHA_BUF_SIZE, DMA_TO_DEVICE);
790}
791
792static void mtk_sha_complete(struct mtk_cryp *cryp,
793 struct mtk_sha_rec *sha)
794{
795 int err = 0;
796
797 err = mtk_sha_update_start(cryp, sha);
798 if (err != -EINPROGRESS)
799 mtk_sha_finish_req(cryp, sha, err);
800}
801
802static int mtk_sha_update(struct ahash_request *req)
803{
804 struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
805
806 ctx->total = req->nbytes;
807 ctx->sg = req->src;
808 ctx->offset = 0;
809
810 if ((ctx->bufcnt + ctx->total < SHA_BUF_SIZE) &&
811 !(ctx->flags & SHA_FLAGS_FINUP))
812 return mtk_sha_append_sg(ctx);
813
814 return mtk_sha_enqueue(req, SHA_OP_UPDATE);
815}
816
817static int mtk_sha_final(struct ahash_request *req)
818{
819 struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
820
821 ctx->flags |= SHA_FLAGS_FINUP;
822
823 if (ctx->flags & SHA_FLAGS_PAD)
824 return mtk_sha_finish(req);
825
826 return mtk_sha_enqueue(req, SHA_OP_FINAL);
827}
828
829static int mtk_sha_finup(struct ahash_request *req)
830{
831 struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
832 int err1, err2;
833
834 ctx->flags |= SHA_FLAGS_FINUP;
835
836 err1 = mtk_sha_update(req);
837 if (err1 == -EINPROGRESS || err1 == -EBUSY)
838 return err1;
839 /*
840 * final() has to be always called to cleanup resources
841 * even if update() failed
842 */
843 err2 = mtk_sha_final(req);
844
845 return err1 ?: err2;
846}
847
848static int mtk_sha_digest(struct ahash_request *req)
849{
850 return mtk_sha_init(req) ?: mtk_sha_finup(req);
851}
852
853static int mtk_sha_setkey(struct crypto_ahash *tfm,
854 const unsigned char *key, u32 keylen)
855{
856 struct mtk_sha_ctx *tctx = crypto_ahash_ctx(tfm);
857 struct mtk_sha_hmac_ctx *bctx = tctx->base;
858 size_t bs = crypto_shash_blocksize(bctx->shash);
859 size_t ds = crypto_shash_digestsize(bctx->shash);
860 int err, i;
861
862 SHASH_DESC_ON_STACK(shash, bctx->shash);
863
864 shash->tfm = bctx->shash;
865 shash->flags = crypto_shash_get_flags(bctx->shash) &
866 CRYPTO_TFM_REQ_MAY_SLEEP;
867
868 if (keylen > bs) {
869 err = crypto_shash_digest(shash, key, keylen, bctx->ipad);
870 if (err)
871 return err;
872 keylen = ds;
873 } else {
874 memcpy(bctx->ipad, key, keylen);
875 }
876
877 memset(bctx->ipad + keylen, 0, bs - keylen);
878 memcpy(bctx->opad, bctx->ipad, bs);
879
880 for (i = 0; i < bs; i++) {
881 bctx->ipad[i] ^= 0x36;
882 bctx->opad[i] ^= 0x5c;
883 }
884
Colin Ian Kingf2831482017-01-03 13:21:22 +0000885 return 0;
Ryder Lee785e5c62016-12-19 10:20:44 +0800886}
887
888static int mtk_sha_export(struct ahash_request *req, void *out)
889{
890 const struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
891
892 memcpy(out, ctx, sizeof(*ctx));
893 return 0;
894}
895
896static int mtk_sha_import(struct ahash_request *req, const void *in)
897{
898 struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
899
900 memcpy(ctx, in, sizeof(*ctx));
901 return 0;
902}
903
904static int mtk_sha_cra_init_alg(struct crypto_tfm *tfm,
905 const char *alg_base)
906{
907 struct mtk_sha_ctx *tctx = crypto_tfm_ctx(tfm);
908 struct mtk_cryp *cryp = NULL;
909
910 cryp = mtk_sha_find_dev(tctx);
911 if (!cryp)
912 return -ENODEV;
913
914 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
915 sizeof(struct mtk_sha_reqctx));
916
917 if (alg_base) {
918 struct mtk_sha_hmac_ctx *bctx = tctx->base;
919
920 tctx->flags |= SHA_FLAGS_HMAC;
921 bctx->shash = crypto_alloc_shash(alg_base, 0,
922 CRYPTO_ALG_NEED_FALLBACK);
923 if (IS_ERR(bctx->shash)) {
924 pr_err("base driver %s could not be loaded.\n",
925 alg_base);
926
927 return PTR_ERR(bctx->shash);
928 }
929 }
930 return 0;
931}
932
933static int mtk_sha_cra_init(struct crypto_tfm *tfm)
934{
935 return mtk_sha_cra_init_alg(tfm, NULL);
936}
937
938static int mtk_sha_cra_sha1_init(struct crypto_tfm *tfm)
939{
940 return mtk_sha_cra_init_alg(tfm, "sha1");
941}
942
943static int mtk_sha_cra_sha224_init(struct crypto_tfm *tfm)
944{
945 return mtk_sha_cra_init_alg(tfm, "sha224");
946}
947
948static int mtk_sha_cra_sha256_init(struct crypto_tfm *tfm)
949{
950 return mtk_sha_cra_init_alg(tfm, "sha256");
951}
952
953static int mtk_sha_cra_sha384_init(struct crypto_tfm *tfm)
954{
955 return mtk_sha_cra_init_alg(tfm, "sha384");
956}
957
958static int mtk_sha_cra_sha512_init(struct crypto_tfm *tfm)
959{
960 return mtk_sha_cra_init_alg(tfm, "sha512");
961}
962
963static void mtk_sha_cra_exit(struct crypto_tfm *tfm)
964{
965 struct mtk_sha_ctx *tctx = crypto_tfm_ctx(tfm);
966
967 if (tctx->flags & SHA_FLAGS_HMAC) {
968 struct mtk_sha_hmac_ctx *bctx = tctx->base;
969
970 crypto_free_shash(bctx->shash);
971 }
972}
973
974static struct ahash_alg algs_sha1_sha224_sha256[] = {
975{
976 .init = mtk_sha_init,
977 .update = mtk_sha_update,
978 .final = mtk_sha_final,
979 .finup = mtk_sha_finup,
980 .digest = mtk_sha_digest,
981 .export = mtk_sha_export,
982 .import = mtk_sha_import,
983 .halg.digestsize = SHA1_DIGEST_SIZE,
984 .halg.statesize = sizeof(struct mtk_sha_reqctx),
985 .halg.base = {
986 .cra_name = "sha1",
987 .cra_driver_name = "mtk-sha1",
988 .cra_priority = 400,
989 .cra_flags = CRYPTO_ALG_ASYNC,
990 .cra_blocksize = SHA1_BLOCK_SIZE,
991 .cra_ctxsize = sizeof(struct mtk_sha_ctx),
992 .cra_alignmask = SHA_ALIGN_MSK,
993 .cra_module = THIS_MODULE,
994 .cra_init = mtk_sha_cra_init,
995 .cra_exit = mtk_sha_cra_exit,
996 }
997},
998{
999 .init = mtk_sha_init,
1000 .update = mtk_sha_update,
1001 .final = mtk_sha_final,
1002 .finup = mtk_sha_finup,
1003 .digest = mtk_sha_digest,
1004 .export = mtk_sha_export,
1005 .import = mtk_sha_import,
1006 .halg.digestsize = SHA224_DIGEST_SIZE,
1007 .halg.statesize = sizeof(struct mtk_sha_reqctx),
1008 .halg.base = {
1009 .cra_name = "sha224",
1010 .cra_driver_name = "mtk-sha224",
1011 .cra_priority = 400,
1012 .cra_flags = CRYPTO_ALG_ASYNC,
1013 .cra_blocksize = SHA224_BLOCK_SIZE,
1014 .cra_ctxsize = sizeof(struct mtk_sha_ctx),
1015 .cra_alignmask = SHA_ALIGN_MSK,
1016 .cra_module = THIS_MODULE,
1017 .cra_init = mtk_sha_cra_init,
1018 .cra_exit = mtk_sha_cra_exit,
1019 }
1020},
1021{
1022 .init = mtk_sha_init,
1023 .update = mtk_sha_update,
1024 .final = mtk_sha_final,
1025 .finup = mtk_sha_finup,
1026 .digest = mtk_sha_digest,
1027 .export = mtk_sha_export,
1028 .import = mtk_sha_import,
1029 .halg.digestsize = SHA256_DIGEST_SIZE,
1030 .halg.statesize = sizeof(struct mtk_sha_reqctx),
1031 .halg.base = {
1032 .cra_name = "sha256",
1033 .cra_driver_name = "mtk-sha256",
1034 .cra_priority = 400,
1035 .cra_flags = CRYPTO_ALG_ASYNC,
1036 .cra_blocksize = SHA256_BLOCK_SIZE,
1037 .cra_ctxsize = sizeof(struct mtk_sha_ctx),
1038 .cra_alignmask = SHA_ALIGN_MSK,
1039 .cra_module = THIS_MODULE,
1040 .cra_init = mtk_sha_cra_init,
1041 .cra_exit = mtk_sha_cra_exit,
1042 }
1043},
1044{
1045 .init = mtk_sha_init,
1046 .update = mtk_sha_update,
1047 .final = mtk_sha_final,
1048 .finup = mtk_sha_finup,
1049 .digest = mtk_sha_digest,
1050 .export = mtk_sha_export,
1051 .import = mtk_sha_import,
1052 .setkey = mtk_sha_setkey,
1053 .halg.digestsize = SHA1_DIGEST_SIZE,
1054 .halg.statesize = sizeof(struct mtk_sha_reqctx),
1055 .halg.base = {
1056 .cra_name = "hmac(sha1)",
1057 .cra_driver_name = "mtk-hmac-sha1",
1058 .cra_priority = 400,
1059 .cra_flags = CRYPTO_ALG_ASYNC |
1060 CRYPTO_ALG_NEED_FALLBACK,
1061 .cra_blocksize = SHA1_BLOCK_SIZE,
1062 .cra_ctxsize = sizeof(struct mtk_sha_ctx) +
1063 sizeof(struct mtk_sha_hmac_ctx),
1064 .cra_alignmask = SHA_ALIGN_MSK,
1065 .cra_module = THIS_MODULE,
1066 .cra_init = mtk_sha_cra_sha1_init,
1067 .cra_exit = mtk_sha_cra_exit,
1068 }
1069},
1070{
1071 .init = mtk_sha_init,
1072 .update = mtk_sha_update,
1073 .final = mtk_sha_final,
1074 .finup = mtk_sha_finup,
1075 .digest = mtk_sha_digest,
1076 .export = mtk_sha_export,
1077 .import = mtk_sha_import,
1078 .setkey = mtk_sha_setkey,
1079 .halg.digestsize = SHA224_DIGEST_SIZE,
1080 .halg.statesize = sizeof(struct mtk_sha_reqctx),
1081 .halg.base = {
1082 .cra_name = "hmac(sha224)",
1083 .cra_driver_name = "mtk-hmac-sha224",
1084 .cra_priority = 400,
1085 .cra_flags = CRYPTO_ALG_ASYNC |
1086 CRYPTO_ALG_NEED_FALLBACK,
1087 .cra_blocksize = SHA224_BLOCK_SIZE,
1088 .cra_ctxsize = sizeof(struct mtk_sha_ctx) +
1089 sizeof(struct mtk_sha_hmac_ctx),
1090 .cra_alignmask = SHA_ALIGN_MSK,
1091 .cra_module = THIS_MODULE,
1092 .cra_init = mtk_sha_cra_sha224_init,
1093 .cra_exit = mtk_sha_cra_exit,
1094 }
1095},
1096{
1097 .init = mtk_sha_init,
1098 .update = mtk_sha_update,
1099 .final = mtk_sha_final,
1100 .finup = mtk_sha_finup,
1101 .digest = mtk_sha_digest,
1102 .export = mtk_sha_export,
1103 .import = mtk_sha_import,
1104 .setkey = mtk_sha_setkey,
1105 .halg.digestsize = SHA256_DIGEST_SIZE,
1106 .halg.statesize = sizeof(struct mtk_sha_reqctx),
1107 .halg.base = {
1108 .cra_name = "hmac(sha256)",
1109 .cra_driver_name = "mtk-hmac-sha256",
1110 .cra_priority = 400,
1111 .cra_flags = CRYPTO_ALG_ASYNC |
1112 CRYPTO_ALG_NEED_FALLBACK,
1113 .cra_blocksize = SHA256_BLOCK_SIZE,
1114 .cra_ctxsize = sizeof(struct mtk_sha_ctx) +
1115 sizeof(struct mtk_sha_hmac_ctx),
1116 .cra_alignmask = SHA_ALIGN_MSK,
1117 .cra_module = THIS_MODULE,
1118 .cra_init = mtk_sha_cra_sha256_init,
1119 .cra_exit = mtk_sha_cra_exit,
1120 }
1121},
1122};
1123
1124static struct ahash_alg algs_sha384_sha512[] = {
1125{
1126 .init = mtk_sha_init,
1127 .update = mtk_sha_update,
1128 .final = mtk_sha_final,
1129 .finup = mtk_sha_finup,
1130 .digest = mtk_sha_digest,
1131 .export = mtk_sha_export,
1132 .import = mtk_sha_import,
1133 .halg.digestsize = SHA384_DIGEST_SIZE,
1134 .halg.statesize = sizeof(struct mtk_sha_reqctx),
1135 .halg.base = {
1136 .cra_name = "sha384",
1137 .cra_driver_name = "mtk-sha384",
1138 .cra_priority = 400,
1139 .cra_flags = CRYPTO_ALG_ASYNC,
1140 .cra_blocksize = SHA384_BLOCK_SIZE,
1141 .cra_ctxsize = sizeof(struct mtk_sha_ctx),
1142 .cra_alignmask = SHA_ALIGN_MSK,
1143 .cra_module = THIS_MODULE,
1144 .cra_init = mtk_sha_cra_init,
1145 .cra_exit = mtk_sha_cra_exit,
1146 }
1147},
1148{
1149 .init = mtk_sha_init,
1150 .update = mtk_sha_update,
1151 .final = mtk_sha_final,
1152 .finup = mtk_sha_finup,
1153 .digest = mtk_sha_digest,
1154 .export = mtk_sha_export,
1155 .import = mtk_sha_import,
1156 .halg.digestsize = SHA512_DIGEST_SIZE,
1157 .halg.statesize = sizeof(struct mtk_sha_reqctx),
1158 .halg.base = {
1159 .cra_name = "sha512",
1160 .cra_driver_name = "mtk-sha512",
1161 .cra_priority = 400,
1162 .cra_flags = CRYPTO_ALG_ASYNC,
1163 .cra_blocksize = SHA512_BLOCK_SIZE,
1164 .cra_ctxsize = sizeof(struct mtk_sha_ctx),
1165 .cra_alignmask = SHA_ALIGN_MSK,
1166 .cra_module = THIS_MODULE,
1167 .cra_init = mtk_sha_cra_init,
1168 .cra_exit = mtk_sha_cra_exit,
1169 }
1170},
1171{
1172 .init = mtk_sha_init,
1173 .update = mtk_sha_update,
1174 .final = mtk_sha_final,
1175 .finup = mtk_sha_finup,
1176 .digest = mtk_sha_digest,
1177 .export = mtk_sha_export,
1178 .import = mtk_sha_import,
1179 .setkey = mtk_sha_setkey,
1180 .halg.digestsize = SHA384_DIGEST_SIZE,
1181 .halg.statesize = sizeof(struct mtk_sha_reqctx),
1182 .halg.base = {
1183 .cra_name = "hmac(sha384)",
1184 .cra_driver_name = "mtk-hmac-sha384",
1185 .cra_priority = 400,
1186 .cra_flags = CRYPTO_ALG_ASYNC |
1187 CRYPTO_ALG_NEED_FALLBACK,
1188 .cra_blocksize = SHA384_BLOCK_SIZE,
1189 .cra_ctxsize = sizeof(struct mtk_sha_ctx) +
1190 sizeof(struct mtk_sha_hmac_ctx),
1191 .cra_alignmask = SHA_ALIGN_MSK,
1192 .cra_module = THIS_MODULE,
1193 .cra_init = mtk_sha_cra_sha384_init,
1194 .cra_exit = mtk_sha_cra_exit,
1195 }
1196},
1197{
1198 .init = mtk_sha_init,
1199 .update = mtk_sha_update,
1200 .final = mtk_sha_final,
1201 .finup = mtk_sha_finup,
1202 .digest = mtk_sha_digest,
1203 .export = mtk_sha_export,
1204 .import = mtk_sha_import,
1205 .setkey = mtk_sha_setkey,
1206 .halg.digestsize = SHA512_DIGEST_SIZE,
1207 .halg.statesize = sizeof(struct mtk_sha_reqctx),
1208 .halg.base = {
1209 .cra_name = "hmac(sha512)",
1210 .cra_driver_name = "mtk-hmac-sha512",
1211 .cra_priority = 400,
1212 .cra_flags = CRYPTO_ALG_ASYNC |
1213 CRYPTO_ALG_NEED_FALLBACK,
1214 .cra_blocksize = SHA512_BLOCK_SIZE,
1215 .cra_ctxsize = sizeof(struct mtk_sha_ctx) +
1216 sizeof(struct mtk_sha_hmac_ctx),
1217 .cra_alignmask = SHA_ALIGN_MSK,
1218 .cra_module = THIS_MODULE,
1219 .cra_init = mtk_sha_cra_sha512_init,
1220 .cra_exit = mtk_sha_cra_exit,
1221 }
1222},
1223};
1224
1225static void mtk_sha_task0(unsigned long data)
1226{
1227 struct mtk_cryp *cryp = (struct mtk_cryp *)data;
1228 struct mtk_sha_rec *sha = cryp->sha[0];
1229
1230 mtk_sha_unmap(cryp, sha);
1231 mtk_sha_complete(cryp, sha);
1232}
1233
1234static void mtk_sha_task1(unsigned long data)
1235{
1236 struct mtk_cryp *cryp = (struct mtk_cryp *)data;
1237 struct mtk_sha_rec *sha = cryp->sha[1];
1238
1239 mtk_sha_unmap(cryp, sha);
1240 mtk_sha_complete(cryp, sha);
1241}
1242
1243static irqreturn_t mtk_sha_ring2_irq(int irq, void *dev_id)
1244{
1245 struct mtk_cryp *cryp = (struct mtk_cryp *)dev_id;
1246 struct mtk_sha_rec *sha = cryp->sha[0];
1247 u32 val = mtk_sha_read(cryp, RDR_STAT(RING2));
1248
1249 mtk_sha_write(cryp, RDR_STAT(RING2), val);
1250
1251 if (likely((SHA_FLAGS_BUSY & sha->flags))) {
1252 mtk_sha_write(cryp, RDR_PROC_COUNT(RING2), MTK_CNT_RST);
1253 mtk_sha_write(cryp, RDR_THRESH(RING2),
1254 MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE);
1255
1256 tasklet_schedule(&sha->task);
1257 } else {
1258 dev_warn(cryp->dev, "AES interrupt when no active requests.\n");
1259 }
1260 return IRQ_HANDLED;
1261}
1262
1263static irqreturn_t mtk_sha_ring3_irq(int irq, void *dev_id)
1264{
1265 struct mtk_cryp *cryp = (struct mtk_cryp *)dev_id;
1266 struct mtk_sha_rec *sha = cryp->sha[1];
1267 u32 val = mtk_sha_read(cryp, RDR_STAT(RING3));
1268
1269 mtk_sha_write(cryp, RDR_STAT(RING3), val);
1270
1271 if (likely((SHA_FLAGS_BUSY & sha->flags))) {
1272 mtk_sha_write(cryp, RDR_PROC_COUNT(RING3), MTK_CNT_RST);
1273 mtk_sha_write(cryp, RDR_THRESH(RING3),
1274 MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE);
1275
1276 tasklet_schedule(&sha->task);
1277 } else {
1278 dev_warn(cryp->dev, "AES interrupt when no active requests.\n");
1279 }
1280 return IRQ_HANDLED;
1281}
1282
1283/*
1284 * The purpose of two SHA records is used to get extra performance.
1285 * It is similar to mtk_aes_record_init().
1286 */
1287static int mtk_sha_record_init(struct mtk_cryp *cryp)
1288{
1289 struct mtk_sha_rec **sha = cryp->sha;
1290 int i, err = -ENOMEM;
1291
1292 for (i = 0; i < MTK_REC_NUM; i++) {
1293 sha[i] = kzalloc(sizeof(**sha), GFP_KERNEL);
1294 if (!sha[i])
1295 goto err_cleanup;
1296
1297 sha[i]->id = i + RING2;
1298
1299 spin_lock_init(&sha[i]->lock);
1300 crypto_init_queue(&sha[i]->queue, SHA_QUEUE_SIZE);
1301 }
1302
1303 tasklet_init(&sha[0]->task, mtk_sha_task0, (unsigned long)cryp);
1304 tasklet_init(&sha[1]->task, mtk_sha_task1, (unsigned long)cryp);
1305
1306 cryp->rec = 1;
1307
1308 return 0;
1309
1310err_cleanup:
1311 for (; i--; )
1312 kfree(sha[i]);
1313 return err;
1314}
1315
1316static void mtk_sha_record_free(struct mtk_cryp *cryp)
1317{
1318 int i;
1319
1320 for (i = 0; i < MTK_REC_NUM; i++) {
1321 tasklet_kill(&cryp->sha[i]->task);
1322 kfree(cryp->sha[i]);
1323 }
1324}
1325
1326static void mtk_sha_unregister_algs(void)
1327{
1328 int i;
1329
1330 for (i = 0; i < ARRAY_SIZE(algs_sha1_sha224_sha256); i++)
1331 crypto_unregister_ahash(&algs_sha1_sha224_sha256[i]);
1332
1333 for (i = 0; i < ARRAY_SIZE(algs_sha384_sha512); i++)
1334 crypto_unregister_ahash(&algs_sha384_sha512[i]);
1335}
1336
1337static int mtk_sha_register_algs(void)
1338{
1339 int err, i;
1340
1341 for (i = 0; i < ARRAY_SIZE(algs_sha1_sha224_sha256); i++) {
1342 err = crypto_register_ahash(&algs_sha1_sha224_sha256[i]);
1343 if (err)
1344 goto err_sha_224_256_algs;
1345 }
1346
1347 for (i = 0; i < ARRAY_SIZE(algs_sha384_sha512); i++) {
1348 err = crypto_register_ahash(&algs_sha384_sha512[i]);
1349 if (err)
1350 goto err_sha_384_512_algs;
1351 }
1352
1353 return 0;
1354
1355err_sha_384_512_algs:
1356 for (; i--; )
1357 crypto_unregister_ahash(&algs_sha384_sha512[i]);
1358 i = ARRAY_SIZE(algs_sha1_sha224_sha256);
1359err_sha_224_256_algs:
1360 for (; i--; )
1361 crypto_unregister_ahash(&algs_sha1_sha224_sha256[i]);
1362
1363 return err;
1364}
1365
1366int mtk_hash_alg_register(struct mtk_cryp *cryp)
1367{
1368 int err;
1369
1370 INIT_LIST_HEAD(&cryp->sha_list);
1371
1372 /* Initialize two hash records */
1373 err = mtk_sha_record_init(cryp);
1374 if (err)
1375 goto err_record;
1376
1377 /* Ring2 is use by SHA record0 */
1378 err = devm_request_irq(cryp->dev, cryp->irq[RING2],
1379 mtk_sha_ring2_irq, IRQF_TRIGGER_LOW,
1380 "mtk-sha", cryp);
1381 if (err) {
1382 dev_err(cryp->dev, "unable to request sha irq0.\n");
1383 goto err_res;
1384 }
1385
1386 /* Ring3 is use by SHA record1 */
1387 err = devm_request_irq(cryp->dev, cryp->irq[RING3],
1388 mtk_sha_ring3_irq, IRQF_TRIGGER_LOW,
1389 "mtk-sha", cryp);
1390 if (err) {
1391 dev_err(cryp->dev, "unable to request sha irq1.\n");
1392 goto err_res;
1393 }
1394
1395 /* Enable ring2 and ring3 interrupt for hash */
1396 mtk_sha_write(cryp, AIC_ENABLE_SET(RING2), MTK_IRQ_RDR2);
1397 mtk_sha_write(cryp, AIC_ENABLE_SET(RING3), MTK_IRQ_RDR3);
1398
1399 cryp->tmp = dma_alloc_coherent(cryp->dev, SHA_TMP_BUF_SIZE,
1400 &cryp->tmp_dma, GFP_KERNEL);
1401 if (!cryp->tmp) {
1402 dev_err(cryp->dev, "unable to allocate tmp buffer.\n");
1403 err = -EINVAL;
1404 goto err_res;
1405 }
1406
1407 spin_lock(&mtk_sha.lock);
1408 list_add_tail(&cryp->sha_list, &mtk_sha.dev_list);
1409 spin_unlock(&mtk_sha.lock);
1410
1411 err = mtk_sha_register_algs();
1412 if (err)
1413 goto err_algs;
1414
1415 return 0;
1416
1417err_algs:
1418 spin_lock(&mtk_sha.lock);
1419 list_del(&cryp->sha_list);
1420 spin_unlock(&mtk_sha.lock);
1421 dma_free_coherent(cryp->dev, SHA_TMP_BUF_SIZE,
1422 cryp->tmp, cryp->tmp_dma);
1423err_res:
1424 mtk_sha_record_free(cryp);
1425err_record:
1426
1427 dev_err(cryp->dev, "mtk-sha initialization failed.\n");
1428 return err;
1429}
1430
1431void mtk_hash_alg_release(struct mtk_cryp *cryp)
1432{
1433 spin_lock(&mtk_sha.lock);
1434 list_del(&cryp->sha_list);
1435 spin_unlock(&mtk_sha.lock);
1436
1437 mtk_sha_unregister_algs();
1438 dma_free_coherent(cryp->dev, SHA_TMP_BUF_SIZE,
1439 cryp->tmp, cryp->tmp_dma);
1440 mtk_sha_record_free(cryp);
1441}