blob: 6878160234b5d0b4d6a78ed282878c71aa30625a [file] [log] [blame]
Tom Lendacky63b94502013-11-12 11:46:16 -06001/*
2 * AMD Cryptographic Coprocessor (CCP) driver
3 *
Gary R Hookea0375a2016-03-01 13:49:25 -06004 * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
Tom Lendacky63b94502013-11-12 11:46:16 -06005 *
6 * Author: Tom Lendacky <thomas.lendacky@amd.com>
Gary R Hooka43eb982016-07-26 19:09:31 -05007 * Author: Gary R Hook <gary.hook@amd.com>
Tom Lendacky63b94502013-11-12 11:46:16 -06008 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/pci.h>
Tom Lendacky63b94502013-11-12 11:46:16 -060017#include <linux/interrupt.h>
Tom Lendacky63b94502013-11-12 11:46:16 -060018#include <crypto/scatterwalk.h>
Gary R Hookea0375a2016-03-01 13:49:25 -060019#include <linux/ccp.h>
Tom Lendacky63b94502013-11-12 11:46:16 -060020
21#include "ccp-dev.h"
22
Tom Lendackyc11baa02014-01-24 16:18:02 -060023/* SHA initial context values */
Gary R Hook4b394a22016-07-26 19:10:21 -050024static const __be32 ccp_sha1_init[SHA1_DIGEST_SIZE / sizeof(__be32)] = {
Tom Lendackyc11baa02014-01-24 16:18:02 -060025 cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1),
26 cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3),
Gary R Hook4b394a22016-07-26 19:10:21 -050027 cpu_to_be32(SHA1_H4),
Tom Lendackyc11baa02014-01-24 16:18:02 -060028};
29
Gary R Hook4b394a22016-07-26 19:10:21 -050030static const __be32 ccp_sha224_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = {
Tom Lendackyc11baa02014-01-24 16:18:02 -060031 cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1),
32 cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3),
33 cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5),
34 cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7),
35};
36
Gary R Hook4b394a22016-07-26 19:10:21 -050037static const __be32 ccp_sha256_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = {
Tom Lendackyc11baa02014-01-24 16:18:02 -060038 cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1),
39 cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3),
40 cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5),
41 cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7),
42};
43
Gary R Hook4b394a22016-07-26 19:10:21 -050044#define CCP_NEW_JOBID(ccp) ((ccp->vdata->version == CCP_VERSION(3, 0)) ? \
45 ccp_gen_jobid(ccp) : 0)
46
Tom Lendacky63b94502013-11-12 11:46:16 -060047static u32 ccp_gen_jobid(struct ccp_device *ccp)
48{
49 return atomic_inc_return(&ccp->current_id) & CCP_JOBID_MASK;
50}
51
52static void ccp_sg_free(struct ccp_sg_workarea *wa)
53{
54 if (wa->dma_count)
55 dma_unmap_sg(wa->dma_dev, wa->dma_sg, wa->nents, wa->dma_dir);
56
57 wa->dma_count = 0;
58}
59
60static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev,
Tom Lendacky81a59f02014-01-06 13:34:17 -060061 struct scatterlist *sg, u64 len,
Tom Lendacky63b94502013-11-12 11:46:16 -060062 enum dma_data_direction dma_dir)
63{
64 memset(wa, 0, sizeof(*wa));
65
66 wa->sg = sg;
67 if (!sg)
68 return 0;
69
Tom Lendackyfb43f692015-06-01 11:15:53 -050070 wa->nents = sg_nents_for_len(sg, len);
71 if (wa->nents < 0)
72 return wa->nents;
73
Tom Lendacky63b94502013-11-12 11:46:16 -060074 wa->bytes_left = len;
75 wa->sg_used = 0;
76
77 if (len == 0)
78 return 0;
79
80 if (dma_dir == DMA_NONE)
81 return 0;
82
83 wa->dma_sg = sg;
84 wa->dma_dev = dev;
85 wa->dma_dir = dma_dir;
86 wa->dma_count = dma_map_sg(dev, sg, wa->nents, dma_dir);
87 if (!wa->dma_count)
88 return -ENOMEM;
89
Tom Lendacky63b94502013-11-12 11:46:16 -060090 return 0;
91}
92
93static void ccp_update_sg_workarea(struct ccp_sg_workarea *wa, unsigned int len)
94{
Tom Lendacky81a59f02014-01-06 13:34:17 -060095 unsigned int nbytes = min_t(u64, len, wa->bytes_left);
Tom Lendacky63b94502013-11-12 11:46:16 -060096
97 if (!wa->sg)
98 return;
99
100 wa->sg_used += nbytes;
101 wa->bytes_left -= nbytes;
102 if (wa->sg_used == wa->sg->length) {
103 wa->sg = sg_next(wa->sg);
104 wa->sg_used = 0;
105 }
106}
107
108static void ccp_dm_free(struct ccp_dm_workarea *wa)
109{
110 if (wa->length <= CCP_DMAPOOL_MAX_SIZE) {
111 if (wa->address)
112 dma_pool_free(wa->dma_pool, wa->address,
113 wa->dma.address);
114 } else {
115 if (wa->dma.address)
116 dma_unmap_single(wa->dev, wa->dma.address, wa->length,
117 wa->dma.dir);
118 kfree(wa->address);
119 }
120
121 wa->address = NULL;
122 wa->dma.address = 0;
123}
124
125static int ccp_init_dm_workarea(struct ccp_dm_workarea *wa,
126 struct ccp_cmd_queue *cmd_q,
127 unsigned int len,
128 enum dma_data_direction dir)
129{
130 memset(wa, 0, sizeof(*wa));
131
132 if (!len)
133 return 0;
134
135 wa->dev = cmd_q->ccp->dev;
136 wa->length = len;
137
138 if (len <= CCP_DMAPOOL_MAX_SIZE) {
139 wa->dma_pool = cmd_q->dma_pool;
140
141 wa->address = dma_pool_alloc(wa->dma_pool, GFP_KERNEL,
142 &wa->dma.address);
143 if (!wa->address)
144 return -ENOMEM;
145
146 wa->dma.length = CCP_DMAPOOL_MAX_SIZE;
147
148 memset(wa->address, 0, CCP_DMAPOOL_MAX_SIZE);
149 } else {
150 wa->address = kzalloc(len, GFP_KERNEL);
151 if (!wa->address)
152 return -ENOMEM;
153
154 wa->dma.address = dma_map_single(wa->dev, wa->address, len,
155 dir);
156 if (!wa->dma.address)
157 return -ENOMEM;
158
159 wa->dma.length = len;
160 }
161 wa->dma.dir = dir;
162
163 return 0;
164}
165
166static void ccp_set_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
167 struct scatterlist *sg, unsigned int sg_offset,
168 unsigned int len)
169{
170 WARN_ON(!wa->address);
171
172 scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len,
173 0);
174}
175
176static void ccp_get_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
177 struct scatterlist *sg, unsigned int sg_offset,
178 unsigned int len)
179{
180 WARN_ON(!wa->address);
181
182 scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len,
183 1);
184}
185
Tom Lendacky355eba52015-10-01 16:32:31 -0500186static int ccp_reverse_set_dm_area(struct ccp_dm_workarea *wa,
187 struct scatterlist *sg,
188 unsigned int len, unsigned int se_len,
189 bool sign_extend)
Tom Lendacky63b94502013-11-12 11:46:16 -0600190{
Gary R Hook956ee212016-07-26 19:09:40 -0500191 unsigned int nbytes, sg_offset, dm_offset, sb_len, i;
Tom Lendacky63b94502013-11-12 11:46:16 -0600192 u8 buffer[CCP_REVERSE_BUF_SIZE];
193
Tom Lendacky355eba52015-10-01 16:32:31 -0500194 if (WARN_ON(se_len > sizeof(buffer)))
195 return -EINVAL;
Tom Lendacky63b94502013-11-12 11:46:16 -0600196
197 sg_offset = len;
198 dm_offset = 0;
199 nbytes = len;
200 while (nbytes) {
Gary R Hook956ee212016-07-26 19:09:40 -0500201 sb_len = min_t(unsigned int, nbytes, se_len);
202 sg_offset -= sb_len;
Tom Lendacky63b94502013-11-12 11:46:16 -0600203
Gary R Hook956ee212016-07-26 19:09:40 -0500204 scatterwalk_map_and_copy(buffer, sg, sg_offset, sb_len, 0);
205 for (i = 0; i < sb_len; i++)
206 wa->address[dm_offset + i] = buffer[sb_len - i - 1];
Tom Lendacky63b94502013-11-12 11:46:16 -0600207
Gary R Hook956ee212016-07-26 19:09:40 -0500208 dm_offset += sb_len;
209 nbytes -= sb_len;
Tom Lendacky63b94502013-11-12 11:46:16 -0600210
Gary R Hook956ee212016-07-26 19:09:40 -0500211 if ((sb_len != se_len) && sign_extend) {
Tom Lendacky63b94502013-11-12 11:46:16 -0600212 /* Must sign-extend to nearest sign-extend length */
213 if (wa->address[dm_offset - 1] & 0x80)
214 memset(wa->address + dm_offset, 0xff,
Gary R Hook956ee212016-07-26 19:09:40 -0500215 se_len - sb_len);
Tom Lendacky63b94502013-11-12 11:46:16 -0600216 }
217 }
Tom Lendacky355eba52015-10-01 16:32:31 -0500218
219 return 0;
Tom Lendacky63b94502013-11-12 11:46:16 -0600220}
221
222static void ccp_reverse_get_dm_area(struct ccp_dm_workarea *wa,
223 struct scatterlist *sg,
224 unsigned int len)
225{
Gary R Hook956ee212016-07-26 19:09:40 -0500226 unsigned int nbytes, sg_offset, dm_offset, sb_len, i;
Tom Lendacky63b94502013-11-12 11:46:16 -0600227 u8 buffer[CCP_REVERSE_BUF_SIZE];
228
229 sg_offset = 0;
230 dm_offset = len;
231 nbytes = len;
232 while (nbytes) {
Gary R Hook956ee212016-07-26 19:09:40 -0500233 sb_len = min_t(unsigned int, nbytes, sizeof(buffer));
234 dm_offset -= sb_len;
Tom Lendacky63b94502013-11-12 11:46:16 -0600235
Gary R Hook956ee212016-07-26 19:09:40 -0500236 for (i = 0; i < sb_len; i++)
237 buffer[sb_len - i - 1] = wa->address[dm_offset + i];
238 scatterwalk_map_and_copy(buffer, sg, sg_offset, sb_len, 1);
Tom Lendacky63b94502013-11-12 11:46:16 -0600239
Gary R Hook956ee212016-07-26 19:09:40 -0500240 sg_offset += sb_len;
241 nbytes -= sb_len;
Tom Lendacky63b94502013-11-12 11:46:16 -0600242 }
243}
244
245static void ccp_free_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q)
246{
247 ccp_dm_free(&data->dm_wa);
248 ccp_sg_free(&data->sg_wa);
249}
250
251static int ccp_init_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q,
Tom Lendacky81a59f02014-01-06 13:34:17 -0600252 struct scatterlist *sg, u64 sg_len,
Tom Lendacky63b94502013-11-12 11:46:16 -0600253 unsigned int dm_len,
254 enum dma_data_direction dir)
255{
256 int ret;
257
258 memset(data, 0, sizeof(*data));
259
260 ret = ccp_init_sg_workarea(&data->sg_wa, cmd_q->ccp->dev, sg, sg_len,
261 dir);
262 if (ret)
263 goto e_err;
264
265 ret = ccp_init_dm_workarea(&data->dm_wa, cmd_q, dm_len, dir);
266 if (ret)
267 goto e_err;
268
269 return 0;
270
271e_err:
272 ccp_free_data(data, cmd_q);
273
274 return ret;
275}
276
277static unsigned int ccp_queue_buf(struct ccp_data *data, unsigned int from)
278{
279 struct ccp_sg_workarea *sg_wa = &data->sg_wa;
280 struct ccp_dm_workarea *dm_wa = &data->dm_wa;
281 unsigned int buf_count, nbytes;
282
283 /* Clear the buffer if setting it */
284 if (!from)
285 memset(dm_wa->address, 0, dm_wa->length);
286
287 if (!sg_wa->sg)
288 return 0;
289
Tom Lendacky81a59f02014-01-06 13:34:17 -0600290 /* Perform the copy operation
291 * nbytes will always be <= UINT_MAX because dm_wa->length is
292 * an unsigned int
293 */
294 nbytes = min_t(u64, sg_wa->bytes_left, dm_wa->length);
Tom Lendacky63b94502013-11-12 11:46:16 -0600295 scatterwalk_map_and_copy(dm_wa->address, sg_wa->sg, sg_wa->sg_used,
296 nbytes, from);
297
298 /* Update the structures and generate the count */
299 buf_count = 0;
300 while (sg_wa->bytes_left && (buf_count < dm_wa->length)) {
Tom Lendacky81a59f02014-01-06 13:34:17 -0600301 nbytes = min(sg_wa->sg->length - sg_wa->sg_used,
302 dm_wa->length - buf_count);
303 nbytes = min_t(u64, sg_wa->bytes_left, nbytes);
Tom Lendacky63b94502013-11-12 11:46:16 -0600304
305 buf_count += nbytes;
306 ccp_update_sg_workarea(sg_wa, nbytes);
307 }
308
309 return buf_count;
310}
311
312static unsigned int ccp_fill_queue_buf(struct ccp_data *data)
313{
314 return ccp_queue_buf(data, 0);
315}
316
317static unsigned int ccp_empty_queue_buf(struct ccp_data *data)
318{
319 return ccp_queue_buf(data, 1);
320}
321
322static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst,
323 struct ccp_op *op, unsigned int block_size,
324 bool blocksize_op)
325{
326 unsigned int sg_src_len, sg_dst_len, op_len;
327
328 /* The CCP can only DMA from/to one address each per operation. This
329 * requires that we find the smallest DMA area between the source
Tom Lendacky81a59f02014-01-06 13:34:17 -0600330 * and destination. The resulting len values will always be <= UINT_MAX
331 * because the dma length is an unsigned int.
Tom Lendacky63b94502013-11-12 11:46:16 -0600332 */
Tom Lendacky81a59f02014-01-06 13:34:17 -0600333 sg_src_len = sg_dma_len(src->sg_wa.sg) - src->sg_wa.sg_used;
334 sg_src_len = min_t(u64, src->sg_wa.bytes_left, sg_src_len);
Tom Lendacky63b94502013-11-12 11:46:16 -0600335
336 if (dst) {
Tom Lendacky81a59f02014-01-06 13:34:17 -0600337 sg_dst_len = sg_dma_len(dst->sg_wa.sg) - dst->sg_wa.sg_used;
338 sg_dst_len = min_t(u64, src->sg_wa.bytes_left, sg_dst_len);
Tom Lendacky63b94502013-11-12 11:46:16 -0600339 op_len = min(sg_src_len, sg_dst_len);
Tom Lendacky8db88462015-02-03 13:07:05 -0600340 } else {
Tom Lendacky63b94502013-11-12 11:46:16 -0600341 op_len = sg_src_len;
Tom Lendacky8db88462015-02-03 13:07:05 -0600342 }
Tom Lendacky63b94502013-11-12 11:46:16 -0600343
344 /* The data operation length will be at least block_size in length
345 * or the smaller of available sg room remaining for the source or
346 * the destination
347 */
348 op_len = max(op_len, block_size);
349
350 /* Unless we have to buffer data, there's no reason to wait */
351 op->soc = 0;
352
353 if (sg_src_len < block_size) {
354 /* Not enough data in the sg element, so it
355 * needs to be buffered into a blocksize chunk
356 */
357 int cp_len = ccp_fill_queue_buf(src);
358
359 op->soc = 1;
360 op->src.u.dma.address = src->dm_wa.dma.address;
361 op->src.u.dma.offset = 0;
362 op->src.u.dma.length = (blocksize_op) ? block_size : cp_len;
363 } else {
364 /* Enough data in the sg element, but we need to
365 * adjust for any previously copied data
366 */
367 op->src.u.dma.address = sg_dma_address(src->sg_wa.sg);
368 op->src.u.dma.offset = src->sg_wa.sg_used;
369 op->src.u.dma.length = op_len & ~(block_size - 1);
370
371 ccp_update_sg_workarea(&src->sg_wa, op->src.u.dma.length);
372 }
373
374 if (dst) {
375 if (sg_dst_len < block_size) {
376 /* Not enough room in the sg element or we're on the
377 * last piece of data (when using padding), so the
378 * output needs to be buffered into a blocksize chunk
379 */
380 op->soc = 1;
381 op->dst.u.dma.address = dst->dm_wa.dma.address;
382 op->dst.u.dma.offset = 0;
383 op->dst.u.dma.length = op->src.u.dma.length;
384 } else {
385 /* Enough room in the sg element, but we need to
386 * adjust for any previously used area
387 */
388 op->dst.u.dma.address = sg_dma_address(dst->sg_wa.sg);
389 op->dst.u.dma.offset = dst->sg_wa.sg_used;
390 op->dst.u.dma.length = op->src.u.dma.length;
391 }
392 }
393}
394
395static void ccp_process_data(struct ccp_data *src, struct ccp_data *dst,
396 struct ccp_op *op)
397{
398 op->init = 0;
399
400 if (dst) {
401 if (op->dst.u.dma.address == dst->dm_wa.dma.address)
402 ccp_empty_queue_buf(dst);
403 else
404 ccp_update_sg_workarea(&dst->sg_wa,
405 op->dst.u.dma.length);
406 }
407}
408
Gary R Hook956ee212016-07-26 19:09:40 -0500409static int ccp_copy_to_from_sb(struct ccp_cmd_queue *cmd_q,
410 struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
411 u32 byte_swap, bool from)
Tom Lendacky63b94502013-11-12 11:46:16 -0600412{
413 struct ccp_op op;
414
415 memset(&op, 0, sizeof(op));
416
417 op.cmd_q = cmd_q;
418 op.jobid = jobid;
419 op.eom = 1;
420
421 if (from) {
422 op.soc = 1;
Gary R Hook956ee212016-07-26 19:09:40 -0500423 op.src.type = CCP_MEMTYPE_SB;
424 op.src.u.sb = sb;
Tom Lendacky63b94502013-11-12 11:46:16 -0600425 op.dst.type = CCP_MEMTYPE_SYSTEM;
426 op.dst.u.dma.address = wa->dma.address;
427 op.dst.u.dma.length = wa->length;
428 } else {
429 op.src.type = CCP_MEMTYPE_SYSTEM;
430 op.src.u.dma.address = wa->dma.address;
431 op.src.u.dma.length = wa->length;
Gary R Hook956ee212016-07-26 19:09:40 -0500432 op.dst.type = CCP_MEMTYPE_SB;
433 op.dst.u.sb = sb;
Tom Lendacky63b94502013-11-12 11:46:16 -0600434 }
435
436 op.u.passthru.byte_swap = byte_swap;
437
Gary R Hooka43eb982016-07-26 19:09:31 -0500438 return cmd_q->ccp->vdata->perform->passthru(&op);
Tom Lendacky63b94502013-11-12 11:46:16 -0600439}
440
Gary R Hook956ee212016-07-26 19:09:40 -0500441static int ccp_copy_to_sb(struct ccp_cmd_queue *cmd_q,
442 struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
443 u32 byte_swap)
Tom Lendacky63b94502013-11-12 11:46:16 -0600444{
Gary R Hook956ee212016-07-26 19:09:40 -0500445 return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, false);
Tom Lendacky63b94502013-11-12 11:46:16 -0600446}
447
Gary R Hook956ee212016-07-26 19:09:40 -0500448static int ccp_copy_from_sb(struct ccp_cmd_queue *cmd_q,
449 struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
450 u32 byte_swap)
Tom Lendacky63b94502013-11-12 11:46:16 -0600451{
Gary R Hook956ee212016-07-26 19:09:40 -0500452 return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, true);
Tom Lendacky63b94502013-11-12 11:46:16 -0600453}
454
455static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q,
456 struct ccp_cmd *cmd)
457{
458 struct ccp_aes_engine *aes = &cmd->u.aes;
459 struct ccp_dm_workarea key, ctx;
460 struct ccp_data src;
461 struct ccp_op op;
462 unsigned int dm_offset;
463 int ret;
464
465 if (!((aes->key_len == AES_KEYSIZE_128) ||
466 (aes->key_len == AES_KEYSIZE_192) ||
467 (aes->key_len == AES_KEYSIZE_256)))
468 return -EINVAL;
469
470 if (aes->src_len & (AES_BLOCK_SIZE - 1))
471 return -EINVAL;
472
473 if (aes->iv_len != AES_BLOCK_SIZE)
474 return -EINVAL;
475
476 if (!aes->key || !aes->iv || !aes->src)
477 return -EINVAL;
478
479 if (aes->cmac_final) {
480 if (aes->cmac_key_len != AES_BLOCK_SIZE)
481 return -EINVAL;
482
483 if (!aes->cmac_key)
484 return -EINVAL;
485 }
486
Gary R Hook956ee212016-07-26 19:09:40 -0500487 BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT != 1);
488 BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT != 1);
Tom Lendacky63b94502013-11-12 11:46:16 -0600489
490 ret = -EIO;
491 memset(&op, 0, sizeof(op));
492 op.cmd_q = cmd_q;
Gary R Hook4b394a22016-07-26 19:10:21 -0500493 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
Gary R Hook956ee212016-07-26 19:09:40 -0500494 op.sb_key = cmd_q->sb_key;
495 op.sb_ctx = cmd_q->sb_ctx;
Tom Lendacky63b94502013-11-12 11:46:16 -0600496 op.init = 1;
497 op.u.aes.type = aes->type;
498 op.u.aes.mode = aes->mode;
499 op.u.aes.action = aes->action;
500
Gary R Hook956ee212016-07-26 19:09:40 -0500501 /* All supported key sizes fit in a single (32-byte) SB entry
Tom Lendacky63b94502013-11-12 11:46:16 -0600502 * and must be in little endian format. Use the 256-bit byte
503 * swap passthru option to convert from big endian to little
504 * endian.
505 */
506 ret = ccp_init_dm_workarea(&key, cmd_q,
Gary R Hook956ee212016-07-26 19:09:40 -0500507 CCP_AES_KEY_SB_COUNT * CCP_SB_BYTES,
Tom Lendacky63b94502013-11-12 11:46:16 -0600508 DMA_TO_DEVICE);
509 if (ret)
510 return ret;
511
Gary R Hook956ee212016-07-26 19:09:40 -0500512 dm_offset = CCP_SB_BYTES - aes->key_len;
Tom Lendacky63b94502013-11-12 11:46:16 -0600513 ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
Gary R Hook956ee212016-07-26 19:09:40 -0500514 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
515 CCP_PASSTHRU_BYTESWAP_256BIT);
Tom Lendacky63b94502013-11-12 11:46:16 -0600516 if (ret) {
517 cmd->engine_error = cmd_q->cmd_error;
518 goto e_key;
519 }
520
Gary R Hook956ee212016-07-26 19:09:40 -0500521 /* The AES context fits in a single (32-byte) SB entry and
Tom Lendacky63b94502013-11-12 11:46:16 -0600522 * must be in little endian format. Use the 256-bit byte swap
523 * passthru option to convert from big endian to little endian.
524 */
525 ret = ccp_init_dm_workarea(&ctx, cmd_q,
Gary R Hook956ee212016-07-26 19:09:40 -0500526 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
Tom Lendacky63b94502013-11-12 11:46:16 -0600527 DMA_BIDIRECTIONAL);
528 if (ret)
529 goto e_key;
530
Gary R Hook956ee212016-07-26 19:09:40 -0500531 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
Tom Lendacky63b94502013-11-12 11:46:16 -0600532 ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
Gary R Hook956ee212016-07-26 19:09:40 -0500533 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
534 CCP_PASSTHRU_BYTESWAP_256BIT);
Tom Lendacky63b94502013-11-12 11:46:16 -0600535 if (ret) {
536 cmd->engine_error = cmd_q->cmd_error;
537 goto e_ctx;
538 }
539
540 /* Send data to the CCP AES engine */
541 ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len,
542 AES_BLOCK_SIZE, DMA_TO_DEVICE);
543 if (ret)
544 goto e_ctx;
545
546 while (src.sg_wa.bytes_left) {
547 ccp_prepare_data(&src, NULL, &op, AES_BLOCK_SIZE, true);
548 if (aes->cmac_final && !src.sg_wa.bytes_left) {
549 op.eom = 1;
550
551 /* Push the K1/K2 key to the CCP now */
Gary R Hook956ee212016-07-26 19:09:40 -0500552 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid,
553 op.sb_ctx,
554 CCP_PASSTHRU_BYTESWAP_256BIT);
Tom Lendacky63b94502013-11-12 11:46:16 -0600555 if (ret) {
556 cmd->engine_error = cmd_q->cmd_error;
557 goto e_src;
558 }
559
560 ccp_set_dm_area(&ctx, 0, aes->cmac_key, 0,
561 aes->cmac_key_len);
Gary R Hook956ee212016-07-26 19:09:40 -0500562 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
563 CCP_PASSTHRU_BYTESWAP_256BIT);
Tom Lendacky63b94502013-11-12 11:46:16 -0600564 if (ret) {
565 cmd->engine_error = cmd_q->cmd_error;
566 goto e_src;
567 }
568 }
569
Gary R Hooka43eb982016-07-26 19:09:31 -0500570 ret = cmd_q->ccp->vdata->perform->aes(&op);
Tom Lendacky63b94502013-11-12 11:46:16 -0600571 if (ret) {
572 cmd->engine_error = cmd_q->cmd_error;
573 goto e_src;
574 }
575
576 ccp_process_data(&src, NULL, &op);
577 }
578
579 /* Retrieve the AES context - convert from LE to BE using
580 * 32-byte (256-bit) byteswapping
581 */
Gary R Hook956ee212016-07-26 19:09:40 -0500582 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
583 CCP_PASSTHRU_BYTESWAP_256BIT);
Tom Lendacky63b94502013-11-12 11:46:16 -0600584 if (ret) {
585 cmd->engine_error = cmd_q->cmd_error;
586 goto e_src;
587 }
588
589 /* ...but we only need AES_BLOCK_SIZE bytes */
Gary R Hook956ee212016-07-26 19:09:40 -0500590 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
Tom Lendacky63b94502013-11-12 11:46:16 -0600591 ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
592
593e_src:
594 ccp_free_data(&src, cmd_q);
595
596e_ctx:
597 ccp_dm_free(&ctx);
598
599e_key:
600 ccp_dm_free(&key);
601
602 return ret;
603}
604
605static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
606{
607 struct ccp_aes_engine *aes = &cmd->u.aes;
608 struct ccp_dm_workarea key, ctx;
609 struct ccp_data src, dst;
610 struct ccp_op op;
611 unsigned int dm_offset;
612 bool in_place = false;
613 int ret;
614
615 if (aes->mode == CCP_AES_MODE_CMAC)
616 return ccp_run_aes_cmac_cmd(cmd_q, cmd);
617
618 if (!((aes->key_len == AES_KEYSIZE_128) ||
619 (aes->key_len == AES_KEYSIZE_192) ||
620 (aes->key_len == AES_KEYSIZE_256)))
621 return -EINVAL;
622
623 if (((aes->mode == CCP_AES_MODE_ECB) ||
624 (aes->mode == CCP_AES_MODE_CBC) ||
625 (aes->mode == CCP_AES_MODE_CFB)) &&
626 (aes->src_len & (AES_BLOCK_SIZE - 1)))
627 return -EINVAL;
628
629 if (!aes->key || !aes->src || !aes->dst)
630 return -EINVAL;
631
632 if (aes->mode != CCP_AES_MODE_ECB) {
633 if (aes->iv_len != AES_BLOCK_SIZE)
634 return -EINVAL;
635
636 if (!aes->iv)
637 return -EINVAL;
638 }
639
Gary R Hook956ee212016-07-26 19:09:40 -0500640 BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT != 1);
641 BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT != 1);
Tom Lendacky63b94502013-11-12 11:46:16 -0600642
643 ret = -EIO;
644 memset(&op, 0, sizeof(op));
645 op.cmd_q = cmd_q;
Gary R Hook4b394a22016-07-26 19:10:21 -0500646 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
Gary R Hook956ee212016-07-26 19:09:40 -0500647 op.sb_key = cmd_q->sb_key;
648 op.sb_ctx = cmd_q->sb_ctx;
Tom Lendacky63b94502013-11-12 11:46:16 -0600649 op.init = (aes->mode == CCP_AES_MODE_ECB) ? 0 : 1;
650 op.u.aes.type = aes->type;
651 op.u.aes.mode = aes->mode;
652 op.u.aes.action = aes->action;
653
Gary R Hook956ee212016-07-26 19:09:40 -0500654 /* All supported key sizes fit in a single (32-byte) SB entry
Tom Lendacky63b94502013-11-12 11:46:16 -0600655 * and must be in little endian format. Use the 256-bit byte
656 * swap passthru option to convert from big endian to little
657 * endian.
658 */
659 ret = ccp_init_dm_workarea(&key, cmd_q,
Gary R Hook956ee212016-07-26 19:09:40 -0500660 CCP_AES_KEY_SB_COUNT * CCP_SB_BYTES,
Tom Lendacky63b94502013-11-12 11:46:16 -0600661 DMA_TO_DEVICE);
662 if (ret)
663 return ret;
664
Gary R Hook956ee212016-07-26 19:09:40 -0500665 dm_offset = CCP_SB_BYTES - aes->key_len;
Tom Lendacky63b94502013-11-12 11:46:16 -0600666 ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
Gary R Hook956ee212016-07-26 19:09:40 -0500667 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
668 CCP_PASSTHRU_BYTESWAP_256BIT);
Tom Lendacky63b94502013-11-12 11:46:16 -0600669 if (ret) {
670 cmd->engine_error = cmd_q->cmd_error;
671 goto e_key;
672 }
673
Gary R Hook956ee212016-07-26 19:09:40 -0500674 /* The AES context fits in a single (32-byte) SB entry and
Tom Lendacky63b94502013-11-12 11:46:16 -0600675 * must be in little endian format. Use the 256-bit byte swap
676 * passthru option to convert from big endian to little endian.
677 */
678 ret = ccp_init_dm_workarea(&ctx, cmd_q,
Gary R Hook956ee212016-07-26 19:09:40 -0500679 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
Tom Lendacky63b94502013-11-12 11:46:16 -0600680 DMA_BIDIRECTIONAL);
681 if (ret)
682 goto e_key;
683
684 if (aes->mode != CCP_AES_MODE_ECB) {
Gary R Hook4b394a22016-07-26 19:10:21 -0500685 /* Load the AES context - convert to LE */
Gary R Hook956ee212016-07-26 19:09:40 -0500686 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
Tom Lendacky63b94502013-11-12 11:46:16 -0600687 ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
Gary R Hook956ee212016-07-26 19:09:40 -0500688 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
689 CCP_PASSTHRU_BYTESWAP_256BIT);
Tom Lendacky63b94502013-11-12 11:46:16 -0600690 if (ret) {
691 cmd->engine_error = cmd_q->cmd_error;
692 goto e_ctx;
693 }
694 }
Gary R Hookf7cc02b32017-02-08 13:07:06 -0600695 switch (aes->mode) {
696 case CCP_AES_MODE_CFB: /* CFB128 only */
697 case CCP_AES_MODE_CTR:
698 op.u.aes.size = AES_BLOCK_SIZE * BITS_PER_BYTE - 1;
699 break;
700 default:
701 op.u.aes.size = 0;
702 }
Tom Lendacky63b94502013-11-12 11:46:16 -0600703
704 /* Prepare the input and output data workareas. For in-place
705 * operations we need to set the dma direction to BIDIRECTIONAL
706 * and copy the src workarea to the dst workarea.
707 */
708 if (sg_virt(aes->src) == sg_virt(aes->dst))
709 in_place = true;
710
711 ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len,
712 AES_BLOCK_SIZE,
713 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
714 if (ret)
715 goto e_ctx;
716
Tom Lendacky8db88462015-02-03 13:07:05 -0600717 if (in_place) {
Tom Lendacky63b94502013-11-12 11:46:16 -0600718 dst = src;
Tom Lendacky8db88462015-02-03 13:07:05 -0600719 } else {
Tom Lendacky63b94502013-11-12 11:46:16 -0600720 ret = ccp_init_data(&dst, cmd_q, aes->dst, aes->src_len,
721 AES_BLOCK_SIZE, DMA_FROM_DEVICE);
722 if (ret)
723 goto e_src;
724 }
725
726 /* Send data to the CCP AES engine */
727 while (src.sg_wa.bytes_left) {
728 ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true);
729 if (!src.sg_wa.bytes_left) {
730 op.eom = 1;
731
732 /* Since we don't retrieve the AES context in ECB
733 * mode we have to wait for the operation to complete
734 * on the last piece of data
735 */
736 if (aes->mode == CCP_AES_MODE_ECB)
737 op.soc = 1;
738 }
739
Gary R Hooka43eb982016-07-26 19:09:31 -0500740 ret = cmd_q->ccp->vdata->perform->aes(&op);
Tom Lendacky63b94502013-11-12 11:46:16 -0600741 if (ret) {
742 cmd->engine_error = cmd_q->cmd_error;
743 goto e_dst;
744 }
745
746 ccp_process_data(&src, &dst, &op);
747 }
748
749 if (aes->mode != CCP_AES_MODE_ECB) {
750 /* Retrieve the AES context - convert from LE to BE using
751 * 32-byte (256-bit) byteswapping
752 */
Gary R Hook956ee212016-07-26 19:09:40 -0500753 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
754 CCP_PASSTHRU_BYTESWAP_256BIT);
Tom Lendacky63b94502013-11-12 11:46:16 -0600755 if (ret) {
756 cmd->engine_error = cmd_q->cmd_error;
757 goto e_dst;
758 }
759
760 /* ...but we only need AES_BLOCK_SIZE bytes */
Gary R Hook956ee212016-07-26 19:09:40 -0500761 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
Tom Lendacky63b94502013-11-12 11:46:16 -0600762 ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
763 }
764
765e_dst:
766 if (!in_place)
767 ccp_free_data(&dst, cmd_q);
768
769e_src:
770 ccp_free_data(&src, cmd_q);
771
772e_ctx:
773 ccp_dm_free(&ctx);
774
775e_key:
776 ccp_dm_free(&key);
777
778 return ret;
779}
780
781static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
782 struct ccp_cmd *cmd)
783{
784 struct ccp_xts_aes_engine *xts = &cmd->u.xts;
785 struct ccp_dm_workarea key, ctx;
786 struct ccp_data src, dst;
787 struct ccp_op op;
788 unsigned int unit_size, dm_offset;
789 bool in_place = false;
790 int ret;
791
792 switch (xts->unit_size) {
793 case CCP_XTS_AES_UNIT_SIZE_16:
794 unit_size = 16;
795 break;
796 case CCP_XTS_AES_UNIT_SIZE_512:
797 unit_size = 512;
798 break;
799 case CCP_XTS_AES_UNIT_SIZE_1024:
800 unit_size = 1024;
801 break;
802 case CCP_XTS_AES_UNIT_SIZE_2048:
803 unit_size = 2048;
804 break;
805 case CCP_XTS_AES_UNIT_SIZE_4096:
806 unit_size = 4096;
807 break;
808
809 default:
810 return -EINVAL;
811 }
812
813 if (xts->key_len != AES_KEYSIZE_128)
814 return -EINVAL;
815
816 if (!xts->final && (xts->src_len & (AES_BLOCK_SIZE - 1)))
817 return -EINVAL;
818
819 if (xts->iv_len != AES_BLOCK_SIZE)
820 return -EINVAL;
821
822 if (!xts->key || !xts->iv || !xts->src || !xts->dst)
823 return -EINVAL;
824
Gary R Hook956ee212016-07-26 19:09:40 -0500825 BUILD_BUG_ON(CCP_XTS_AES_KEY_SB_COUNT != 1);
826 BUILD_BUG_ON(CCP_XTS_AES_CTX_SB_COUNT != 1);
Tom Lendacky63b94502013-11-12 11:46:16 -0600827
828 ret = -EIO;
829 memset(&op, 0, sizeof(op));
830 op.cmd_q = cmd_q;
Gary R Hook4b394a22016-07-26 19:10:21 -0500831 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
Gary R Hook956ee212016-07-26 19:09:40 -0500832 op.sb_key = cmd_q->sb_key;
833 op.sb_ctx = cmd_q->sb_ctx;
Tom Lendacky63b94502013-11-12 11:46:16 -0600834 op.init = 1;
835 op.u.xts.action = xts->action;
836 op.u.xts.unit_size = xts->unit_size;
837
Gary R Hook956ee212016-07-26 19:09:40 -0500838 /* All supported key sizes fit in a single (32-byte) SB entry
Tom Lendacky63b94502013-11-12 11:46:16 -0600839 * and must be in little endian format. Use the 256-bit byte
840 * swap passthru option to convert from big endian to little
841 * endian.
842 */
843 ret = ccp_init_dm_workarea(&key, cmd_q,
Gary R Hook956ee212016-07-26 19:09:40 -0500844 CCP_XTS_AES_KEY_SB_COUNT * CCP_SB_BYTES,
Tom Lendacky63b94502013-11-12 11:46:16 -0600845 DMA_TO_DEVICE);
846 if (ret)
847 return ret;
848
Gary R Hook956ee212016-07-26 19:09:40 -0500849 dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128;
Tom Lendacky63b94502013-11-12 11:46:16 -0600850 ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len);
851 ccp_set_dm_area(&key, 0, xts->key, dm_offset, xts->key_len);
Gary R Hook956ee212016-07-26 19:09:40 -0500852 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
853 CCP_PASSTHRU_BYTESWAP_256BIT);
Tom Lendacky63b94502013-11-12 11:46:16 -0600854 if (ret) {
855 cmd->engine_error = cmd_q->cmd_error;
856 goto e_key;
857 }
858
Gary R Hook956ee212016-07-26 19:09:40 -0500859 /* The AES context fits in a single (32-byte) SB entry and
Tom Lendacky63b94502013-11-12 11:46:16 -0600860 * for XTS is already in little endian format so no byte swapping
861 * is needed.
862 */
863 ret = ccp_init_dm_workarea(&ctx, cmd_q,
Gary R Hook956ee212016-07-26 19:09:40 -0500864 CCP_XTS_AES_CTX_SB_COUNT * CCP_SB_BYTES,
Tom Lendacky63b94502013-11-12 11:46:16 -0600865 DMA_BIDIRECTIONAL);
866 if (ret)
867 goto e_key;
868
869 ccp_set_dm_area(&ctx, 0, xts->iv, 0, xts->iv_len);
Gary R Hook956ee212016-07-26 19:09:40 -0500870 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
871 CCP_PASSTHRU_BYTESWAP_NOOP);
Tom Lendacky63b94502013-11-12 11:46:16 -0600872 if (ret) {
873 cmd->engine_error = cmd_q->cmd_error;
874 goto e_ctx;
875 }
876
877 /* Prepare the input and output data workareas. For in-place
878 * operations we need to set the dma direction to BIDIRECTIONAL
879 * and copy the src workarea to the dst workarea.
880 */
881 if (sg_virt(xts->src) == sg_virt(xts->dst))
882 in_place = true;
883
884 ret = ccp_init_data(&src, cmd_q, xts->src, xts->src_len,
885 unit_size,
886 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
887 if (ret)
888 goto e_ctx;
889
Tom Lendacky8db88462015-02-03 13:07:05 -0600890 if (in_place) {
Tom Lendacky63b94502013-11-12 11:46:16 -0600891 dst = src;
Tom Lendacky8db88462015-02-03 13:07:05 -0600892 } else {
Tom Lendacky63b94502013-11-12 11:46:16 -0600893 ret = ccp_init_data(&dst, cmd_q, xts->dst, xts->src_len,
894 unit_size, DMA_FROM_DEVICE);
895 if (ret)
896 goto e_src;
897 }
898
899 /* Send data to the CCP AES engine */
900 while (src.sg_wa.bytes_left) {
901 ccp_prepare_data(&src, &dst, &op, unit_size, true);
902 if (!src.sg_wa.bytes_left)
903 op.eom = 1;
904
Gary R Hooka43eb982016-07-26 19:09:31 -0500905 ret = cmd_q->ccp->vdata->perform->xts_aes(&op);
Tom Lendacky63b94502013-11-12 11:46:16 -0600906 if (ret) {
907 cmd->engine_error = cmd_q->cmd_error;
908 goto e_dst;
909 }
910
911 ccp_process_data(&src, &dst, &op);
912 }
913
914 /* Retrieve the AES context - convert from LE to BE using
915 * 32-byte (256-bit) byteswapping
916 */
Gary R Hook956ee212016-07-26 19:09:40 -0500917 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
918 CCP_PASSTHRU_BYTESWAP_256BIT);
Tom Lendacky63b94502013-11-12 11:46:16 -0600919 if (ret) {
920 cmd->engine_error = cmd_q->cmd_error;
921 goto e_dst;
922 }
923
924 /* ...but we only need AES_BLOCK_SIZE bytes */
Gary R Hook956ee212016-07-26 19:09:40 -0500925 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
Tom Lendacky63b94502013-11-12 11:46:16 -0600926 ccp_get_dm_area(&ctx, dm_offset, xts->iv, 0, xts->iv_len);
927
928e_dst:
929 if (!in_place)
930 ccp_free_data(&dst, cmd_q);
931
932e_src:
933 ccp_free_data(&src, cmd_q);
934
935e_ctx:
936 ccp_dm_free(&ctx);
937
938e_key:
939 ccp_dm_free(&key);
940
941 return ret;
942}
943
944static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
945{
946 struct ccp_sha_engine *sha = &cmd->u.sha;
947 struct ccp_dm_workarea ctx;
948 struct ccp_data src;
949 struct ccp_op op;
Gary R Hook4b394a22016-07-26 19:10:21 -0500950 unsigned int ioffset, ooffset;
951 unsigned int digest_size;
952 int sb_count;
953 const void *init;
954 u64 block_size;
955 int ctx_size;
Tom Lendacky63b94502013-11-12 11:46:16 -0600956 int ret;
957
Gary R Hook4b394a22016-07-26 19:10:21 -0500958 switch (sha->type) {
959 case CCP_SHA_TYPE_1:
960 if (sha->ctx_len < SHA1_DIGEST_SIZE)
961 return -EINVAL;
962 block_size = SHA1_BLOCK_SIZE;
963 break;
964 case CCP_SHA_TYPE_224:
965 if (sha->ctx_len < SHA224_DIGEST_SIZE)
966 return -EINVAL;
967 block_size = SHA224_BLOCK_SIZE;
968 break;
969 case CCP_SHA_TYPE_256:
970 if (sha->ctx_len < SHA256_DIGEST_SIZE)
971 return -EINVAL;
972 block_size = SHA256_BLOCK_SIZE;
973 break;
974 default:
Tom Lendacky63b94502013-11-12 11:46:16 -0600975 return -EINVAL;
Gary R Hook4b394a22016-07-26 19:10:21 -0500976 }
Tom Lendacky63b94502013-11-12 11:46:16 -0600977
978 if (!sha->ctx)
979 return -EINVAL;
980
Gary R Hook4b394a22016-07-26 19:10:21 -0500981 if (!sha->final && (sha->src_len & (block_size - 1)))
Tom Lendacky63b94502013-11-12 11:46:16 -0600982 return -EINVAL;
983
Gary R Hook4b394a22016-07-26 19:10:21 -0500984 /* The version 3 device can't handle zero-length input */
985 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) {
Tom Lendacky63b94502013-11-12 11:46:16 -0600986
Gary R Hook4b394a22016-07-26 19:10:21 -0500987 if (!sha->src_len) {
988 unsigned int digest_len;
989 const u8 *sha_zero;
990
991 /* Not final, just return */
992 if (!sha->final)
993 return 0;
994
995 /* CCP can't do a zero length sha operation so the
996 * caller must buffer the data.
997 */
998 if (sha->msg_bits)
999 return -EINVAL;
1000
1001 /* The CCP cannot perform zero-length sha operations
1002 * so the caller is required to buffer data for the
1003 * final operation. However, a sha operation for a
1004 * message with a total length of zero is valid so
1005 * known values are required to supply the result.
1006 */
1007 switch (sha->type) {
1008 case CCP_SHA_TYPE_1:
1009 sha_zero = sha1_zero_message_hash;
1010 digest_len = SHA1_DIGEST_SIZE;
1011 break;
1012 case CCP_SHA_TYPE_224:
1013 sha_zero = sha224_zero_message_hash;
1014 digest_len = SHA224_DIGEST_SIZE;
1015 break;
1016 case CCP_SHA_TYPE_256:
1017 sha_zero = sha256_zero_message_hash;
1018 digest_len = SHA256_DIGEST_SIZE;
1019 break;
1020 default:
1021 return -EINVAL;
1022 }
1023
1024 scatterwalk_map_and_copy((void *)sha_zero, sha->ctx, 0,
1025 digest_len, 1);
1026
Tom Lendacky63b94502013-11-12 11:46:16 -06001027 return 0;
Tom Lendacky63b94502013-11-12 11:46:16 -06001028 }
Tom Lendacky63b94502013-11-12 11:46:16 -06001029 }
1030
Gary R Hook4b394a22016-07-26 19:10:21 -05001031 /* Set variables used throughout */
1032 switch (sha->type) {
1033 case CCP_SHA_TYPE_1:
1034 digest_size = SHA1_DIGEST_SIZE;
1035 init = (void *) ccp_sha1_init;
1036 ctx_size = SHA1_DIGEST_SIZE;
1037 sb_count = 1;
1038 if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0))
1039 ooffset = ioffset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
1040 else
1041 ooffset = ioffset = 0;
1042 break;
1043 case CCP_SHA_TYPE_224:
1044 digest_size = SHA224_DIGEST_SIZE;
1045 init = (void *) ccp_sha224_init;
1046 ctx_size = SHA256_DIGEST_SIZE;
1047 sb_count = 1;
1048 ioffset = 0;
1049 if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0))
1050 ooffset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
1051 else
1052 ooffset = 0;
1053 break;
1054 case CCP_SHA_TYPE_256:
1055 digest_size = SHA256_DIGEST_SIZE;
1056 init = (void *) ccp_sha256_init;
1057 ctx_size = SHA256_DIGEST_SIZE;
1058 sb_count = 1;
1059 ooffset = ioffset = 0;
1060 break;
1061 default:
1062 ret = -EINVAL;
1063 goto e_data;
1064 }
Tom Lendacky63b94502013-11-12 11:46:16 -06001065
Gary R Hook4b394a22016-07-26 19:10:21 -05001066 /* For zero-length plaintext the src pointer is ignored;
1067 * otherwise both parts must be valid
1068 */
1069 if (sha->src_len && !sha->src)
1070 return -EINVAL;
Tom Lendacky63b94502013-11-12 11:46:16 -06001071
1072 memset(&op, 0, sizeof(op));
1073 op.cmd_q = cmd_q;
Gary R Hook4b394a22016-07-26 19:10:21 -05001074 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
1075 op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
Tom Lendacky63b94502013-11-12 11:46:16 -06001076 op.u.sha.type = sha->type;
1077 op.u.sha.msg_bits = sha->msg_bits;
1078
Gary R Hook4b394a22016-07-26 19:10:21 -05001079 ret = ccp_init_dm_workarea(&ctx, cmd_q, sb_count * CCP_SB_BYTES,
Tom Lendacky63b94502013-11-12 11:46:16 -06001080 DMA_BIDIRECTIONAL);
1081 if (ret)
1082 return ret;
Tom Lendackyc11baa02014-01-24 16:18:02 -06001083 if (sha->first) {
Tom Lendackyc11baa02014-01-24 16:18:02 -06001084 switch (sha->type) {
1085 case CCP_SHA_TYPE_1:
Tom Lendackyc11baa02014-01-24 16:18:02 -06001086 case CCP_SHA_TYPE_224:
Tom Lendackyc11baa02014-01-24 16:18:02 -06001087 case CCP_SHA_TYPE_256:
Gary R Hook4b394a22016-07-26 19:10:21 -05001088 memcpy(ctx.address + ioffset, init, ctx_size);
Tom Lendackyc11baa02014-01-24 16:18:02 -06001089 break;
1090 default:
1091 ret = -EINVAL;
1092 goto e_ctx;
1093 }
Tom Lendacky8db88462015-02-03 13:07:05 -06001094 } else {
Gary R Hook4b394a22016-07-26 19:10:21 -05001095 /* Restore the context */
1096 ccp_set_dm_area(&ctx, 0, sha->ctx, 0,
1097 sb_count * CCP_SB_BYTES);
Tom Lendacky8db88462015-02-03 13:07:05 -06001098 }
Tom Lendackyc11baa02014-01-24 16:18:02 -06001099
Gary R Hook956ee212016-07-26 19:09:40 -05001100 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1101 CCP_PASSTHRU_BYTESWAP_256BIT);
Tom Lendacky63b94502013-11-12 11:46:16 -06001102 if (ret) {
1103 cmd->engine_error = cmd_q->cmd_error;
1104 goto e_ctx;
1105 }
1106
Gary R Hook4b394a22016-07-26 19:10:21 -05001107 if (sha->src) {
1108 /* Send data to the CCP SHA engine; block_size is set above */
1109 ret = ccp_init_data(&src, cmd_q, sha->src, sha->src_len,
1110 block_size, DMA_TO_DEVICE);
1111 if (ret)
1112 goto e_ctx;
Tom Lendacky63b94502013-11-12 11:46:16 -06001113
Gary R Hook4b394a22016-07-26 19:10:21 -05001114 while (src.sg_wa.bytes_left) {
1115 ccp_prepare_data(&src, NULL, &op, block_size, false);
1116 if (sha->final && !src.sg_wa.bytes_left)
1117 op.eom = 1;
Tom Lendacky63b94502013-11-12 11:46:16 -06001118
Gary R Hook4b394a22016-07-26 19:10:21 -05001119 ret = cmd_q->ccp->vdata->perform->sha(&op);
1120 if (ret) {
1121 cmd->engine_error = cmd_q->cmd_error;
1122 goto e_data;
1123 }
1124
1125 ccp_process_data(&src, NULL, &op);
1126 }
1127 } else {
1128 op.eom = 1;
Gary R Hooka43eb982016-07-26 19:09:31 -05001129 ret = cmd_q->ccp->vdata->perform->sha(&op);
Tom Lendacky63b94502013-11-12 11:46:16 -06001130 if (ret) {
1131 cmd->engine_error = cmd_q->cmd_error;
1132 goto e_data;
1133 }
Tom Lendacky63b94502013-11-12 11:46:16 -06001134 }
1135
1136 /* Retrieve the SHA context - convert from LE to BE using
1137 * 32-byte (256-bit) byteswapping to BE
1138 */
Gary R Hook956ee212016-07-26 19:09:40 -05001139 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1140 CCP_PASSTHRU_BYTESWAP_256BIT);
Tom Lendacky63b94502013-11-12 11:46:16 -06001141 if (ret) {
1142 cmd->engine_error = cmd_q->cmd_error;
1143 goto e_data;
1144 }
1145
Gary R Hook4b394a22016-07-26 19:10:21 -05001146 if (sha->final) {
1147 /* Finishing up, so get the digest */
1148 switch (sha->type) {
1149 case CCP_SHA_TYPE_1:
1150 case CCP_SHA_TYPE_224:
1151 case CCP_SHA_TYPE_256:
1152 ccp_get_dm_area(&ctx, ooffset,
1153 sha->ctx, 0,
1154 digest_size);
1155 break;
1156 default:
1157 ret = -EINVAL;
1158 goto e_ctx;
1159 }
1160 } else {
1161 /* Stash the context */
1162 ccp_get_dm_area(&ctx, 0, sha->ctx, 0,
1163 sb_count * CCP_SB_BYTES);
1164 }
Tom Lendacky63b94502013-11-12 11:46:16 -06001165
Tom Lendackyc11baa02014-01-24 16:18:02 -06001166 if (sha->final && sha->opad) {
1167 /* HMAC operation, recursively perform final SHA */
1168 struct ccp_cmd hmac_cmd;
1169 struct scatterlist sg;
Tom Lendackyc11baa02014-01-24 16:18:02 -06001170 u8 *hmac_buf;
1171
Tom Lendackyc11baa02014-01-24 16:18:02 -06001172 if (sha->opad_len != block_size) {
1173 ret = -EINVAL;
1174 goto e_data;
1175 }
1176
1177 hmac_buf = kmalloc(block_size + digest_size, GFP_KERNEL);
1178 if (!hmac_buf) {
1179 ret = -ENOMEM;
1180 goto e_data;
1181 }
1182 sg_init_one(&sg, hmac_buf, block_size + digest_size);
1183
1184 scatterwalk_map_and_copy(hmac_buf, sha->opad, 0, block_size, 0);
Gary R Hook4b394a22016-07-26 19:10:21 -05001185 switch (sha->type) {
1186 case CCP_SHA_TYPE_1:
1187 case CCP_SHA_TYPE_224:
1188 case CCP_SHA_TYPE_256:
1189 memcpy(hmac_buf + block_size,
1190 ctx.address + ooffset,
1191 digest_size);
1192 break;
1193 default:
1194 ret = -EINVAL;
1195 goto e_ctx;
1196 }
Tom Lendackyc11baa02014-01-24 16:18:02 -06001197
1198 memset(&hmac_cmd, 0, sizeof(hmac_cmd));
1199 hmac_cmd.engine = CCP_ENGINE_SHA;
1200 hmac_cmd.u.sha.type = sha->type;
1201 hmac_cmd.u.sha.ctx = sha->ctx;
1202 hmac_cmd.u.sha.ctx_len = sha->ctx_len;
1203 hmac_cmd.u.sha.src = &sg;
1204 hmac_cmd.u.sha.src_len = block_size + digest_size;
1205 hmac_cmd.u.sha.opad = NULL;
1206 hmac_cmd.u.sha.opad_len = 0;
1207 hmac_cmd.u.sha.first = 1;
1208 hmac_cmd.u.sha.final = 1;
1209 hmac_cmd.u.sha.msg_bits = (block_size + digest_size) << 3;
1210
1211 ret = ccp_run_sha_cmd(cmd_q, &hmac_cmd);
1212 if (ret)
1213 cmd->engine_error = hmac_cmd.engine_error;
1214
1215 kfree(hmac_buf);
1216 }
1217
Tom Lendacky63b94502013-11-12 11:46:16 -06001218e_data:
Gary R Hook4b394a22016-07-26 19:10:21 -05001219 if (sha->src)
1220 ccp_free_data(&src, cmd_q);
Tom Lendacky63b94502013-11-12 11:46:16 -06001221
1222e_ctx:
1223 ccp_dm_free(&ctx);
1224
1225 return ret;
1226}
1227
1228static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1229{
1230 struct ccp_rsa_engine *rsa = &cmd->u.rsa;
1231 struct ccp_dm_workarea exp, src;
1232 struct ccp_data dst;
1233 struct ccp_op op;
Gary R Hook956ee212016-07-26 19:09:40 -05001234 unsigned int sb_count, i_len, o_len;
Tom Lendacky63b94502013-11-12 11:46:16 -06001235 int ret;
1236
1237 if (rsa->key_size > CCP_RSA_MAX_WIDTH)
1238 return -EINVAL;
1239
1240 if (!rsa->exp || !rsa->mod || !rsa->src || !rsa->dst)
1241 return -EINVAL;
1242
1243 /* The RSA modulus must precede the message being acted upon, so
1244 * it must be copied to a DMA area where the message and the
1245 * modulus can be concatenated. Therefore the input buffer
1246 * length required is twice the output buffer length (which
1247 * must be a multiple of 256-bits).
1248 */
1249 o_len = ((rsa->key_size + 255) / 256) * 32;
1250 i_len = o_len * 2;
1251
Gary R Hook956ee212016-07-26 19:09:40 -05001252 sb_count = o_len / CCP_SB_BYTES;
Tom Lendacky63b94502013-11-12 11:46:16 -06001253
1254 memset(&op, 0, sizeof(op));
1255 op.cmd_q = cmd_q;
1256 op.jobid = ccp_gen_jobid(cmd_q->ccp);
Gary R Hook58a690b2016-07-26 19:09:50 -05001257 op.sb_key = cmd_q->ccp->vdata->perform->sballoc(cmd_q, sb_count);
1258
Gary R Hook956ee212016-07-26 19:09:40 -05001259 if (!op.sb_key)
Tom Lendacky63b94502013-11-12 11:46:16 -06001260 return -EIO;
1261
Gary R Hook956ee212016-07-26 19:09:40 -05001262 /* The RSA exponent may span multiple (32-byte) SB entries and must
Tom Lendacky63b94502013-11-12 11:46:16 -06001263 * be in little endian format. Reverse copy each 32-byte chunk
1264 * of the exponent (En chunk to E0 chunk, E(n-1) chunk to E1 chunk)
1265 * and each byte within that chunk and do not perform any byte swap
1266 * operations on the passthru operation.
1267 */
1268 ret = ccp_init_dm_workarea(&exp, cmd_q, o_len, DMA_TO_DEVICE);
1269 if (ret)
Gary R Hook956ee212016-07-26 19:09:40 -05001270 goto e_sb;
Tom Lendacky63b94502013-11-12 11:46:16 -06001271
Tom Lendacky355eba52015-10-01 16:32:31 -05001272 ret = ccp_reverse_set_dm_area(&exp, rsa->exp, rsa->exp_len,
Gary R Hook956ee212016-07-26 19:09:40 -05001273 CCP_SB_BYTES, false);
Tom Lendacky355eba52015-10-01 16:32:31 -05001274 if (ret)
1275 goto e_exp;
Gary R Hook956ee212016-07-26 19:09:40 -05001276 ret = ccp_copy_to_sb(cmd_q, &exp, op.jobid, op.sb_key,
1277 CCP_PASSTHRU_BYTESWAP_NOOP);
Tom Lendacky63b94502013-11-12 11:46:16 -06001278 if (ret) {
1279 cmd->engine_error = cmd_q->cmd_error;
1280 goto e_exp;
1281 }
1282
1283 /* Concatenate the modulus and the message. Both the modulus and
1284 * the operands must be in little endian format. Since the input
1285 * is in big endian format it must be converted.
1286 */
1287 ret = ccp_init_dm_workarea(&src, cmd_q, i_len, DMA_TO_DEVICE);
1288 if (ret)
1289 goto e_exp;
1290
Tom Lendacky355eba52015-10-01 16:32:31 -05001291 ret = ccp_reverse_set_dm_area(&src, rsa->mod, rsa->mod_len,
Gary R Hook956ee212016-07-26 19:09:40 -05001292 CCP_SB_BYTES, false);
Tom Lendacky355eba52015-10-01 16:32:31 -05001293 if (ret)
1294 goto e_src;
Tom Lendacky63b94502013-11-12 11:46:16 -06001295 src.address += o_len; /* Adjust the address for the copy operation */
Tom Lendacky355eba52015-10-01 16:32:31 -05001296 ret = ccp_reverse_set_dm_area(&src, rsa->src, rsa->src_len,
Gary R Hook956ee212016-07-26 19:09:40 -05001297 CCP_SB_BYTES, false);
Tom Lendacky355eba52015-10-01 16:32:31 -05001298 if (ret)
1299 goto e_src;
Tom Lendacky63b94502013-11-12 11:46:16 -06001300 src.address -= o_len; /* Reset the address to original value */
1301
1302 /* Prepare the output area for the operation */
1303 ret = ccp_init_data(&dst, cmd_q, rsa->dst, rsa->mod_len,
1304 o_len, DMA_FROM_DEVICE);
1305 if (ret)
1306 goto e_src;
1307
1308 op.soc = 1;
1309 op.src.u.dma.address = src.dma.address;
1310 op.src.u.dma.offset = 0;
1311 op.src.u.dma.length = i_len;
1312 op.dst.u.dma.address = dst.dm_wa.dma.address;
1313 op.dst.u.dma.offset = 0;
1314 op.dst.u.dma.length = o_len;
1315
1316 op.u.rsa.mod_size = rsa->key_size;
1317 op.u.rsa.input_len = i_len;
1318
Gary R Hooka43eb982016-07-26 19:09:31 -05001319 ret = cmd_q->ccp->vdata->perform->rsa(&op);
Tom Lendacky63b94502013-11-12 11:46:16 -06001320 if (ret) {
1321 cmd->engine_error = cmd_q->cmd_error;
1322 goto e_dst;
1323 }
1324
1325 ccp_reverse_get_dm_area(&dst.dm_wa, rsa->dst, rsa->mod_len);
1326
1327e_dst:
1328 ccp_free_data(&dst, cmd_q);
1329
1330e_src:
1331 ccp_dm_free(&src);
1332
1333e_exp:
1334 ccp_dm_free(&exp);
1335
Gary R Hook956ee212016-07-26 19:09:40 -05001336e_sb:
Gary R Hook58a690b2016-07-26 19:09:50 -05001337 cmd_q->ccp->vdata->perform->sbfree(cmd_q, op.sb_key, sb_count);
Tom Lendacky63b94502013-11-12 11:46:16 -06001338
1339 return ret;
1340}
1341
1342static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q,
1343 struct ccp_cmd *cmd)
1344{
1345 struct ccp_passthru_engine *pt = &cmd->u.passthru;
1346 struct ccp_dm_workarea mask;
1347 struct ccp_data src, dst;
1348 struct ccp_op op;
1349 bool in_place = false;
1350 unsigned int i;
Gary R Hook4b394a22016-07-26 19:10:21 -05001351 int ret = 0;
Tom Lendacky63b94502013-11-12 11:46:16 -06001352
1353 if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1)))
1354 return -EINVAL;
1355
1356 if (!pt->src || !pt->dst)
1357 return -EINVAL;
1358
1359 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
1360 if (pt->mask_len != CCP_PASSTHRU_MASKSIZE)
1361 return -EINVAL;
1362 if (!pt->mask)
1363 return -EINVAL;
1364 }
1365
Gary R Hook956ee212016-07-26 19:09:40 -05001366 BUILD_BUG_ON(CCP_PASSTHRU_SB_COUNT != 1);
Tom Lendacky63b94502013-11-12 11:46:16 -06001367
1368 memset(&op, 0, sizeof(op));
1369 op.cmd_q = cmd_q;
Gary R Hook4b394a22016-07-26 19:10:21 -05001370 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
Tom Lendacky63b94502013-11-12 11:46:16 -06001371
1372 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
1373 /* Load the mask */
Gary R Hook956ee212016-07-26 19:09:40 -05001374 op.sb_key = cmd_q->sb_key;
Tom Lendacky63b94502013-11-12 11:46:16 -06001375
1376 ret = ccp_init_dm_workarea(&mask, cmd_q,
Gary R Hook956ee212016-07-26 19:09:40 -05001377 CCP_PASSTHRU_SB_COUNT *
1378 CCP_SB_BYTES,
Tom Lendacky63b94502013-11-12 11:46:16 -06001379 DMA_TO_DEVICE);
1380 if (ret)
1381 return ret;
1382
1383 ccp_set_dm_area(&mask, 0, pt->mask, 0, pt->mask_len);
Gary R Hook956ee212016-07-26 19:09:40 -05001384 ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key,
1385 CCP_PASSTHRU_BYTESWAP_NOOP);
Tom Lendacky63b94502013-11-12 11:46:16 -06001386 if (ret) {
1387 cmd->engine_error = cmd_q->cmd_error;
1388 goto e_mask;
1389 }
1390 }
1391
1392 /* Prepare the input and output data workareas. For in-place
1393 * operations we need to set the dma direction to BIDIRECTIONAL
1394 * and copy the src workarea to the dst workarea.
1395 */
1396 if (sg_virt(pt->src) == sg_virt(pt->dst))
1397 in_place = true;
1398
1399 ret = ccp_init_data(&src, cmd_q, pt->src, pt->src_len,
1400 CCP_PASSTHRU_MASKSIZE,
1401 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1402 if (ret)
1403 goto e_mask;
1404
Tom Lendacky8db88462015-02-03 13:07:05 -06001405 if (in_place) {
Tom Lendacky63b94502013-11-12 11:46:16 -06001406 dst = src;
Tom Lendacky8db88462015-02-03 13:07:05 -06001407 } else {
Tom Lendacky63b94502013-11-12 11:46:16 -06001408 ret = ccp_init_data(&dst, cmd_q, pt->dst, pt->src_len,
1409 CCP_PASSTHRU_MASKSIZE, DMA_FROM_DEVICE);
1410 if (ret)
1411 goto e_src;
1412 }
1413
1414 /* Send data to the CCP Passthru engine
1415 * Because the CCP engine works on a single source and destination
1416 * dma address at a time, each entry in the source scatterlist
1417 * (after the dma_map_sg call) must be less than or equal to the
1418 * (remaining) length in the destination scatterlist entry and the
1419 * length must be a multiple of CCP_PASSTHRU_BLOCKSIZE
1420 */
1421 dst.sg_wa.sg_used = 0;
1422 for (i = 1; i <= src.sg_wa.dma_count; i++) {
1423 if (!dst.sg_wa.sg ||
1424 (dst.sg_wa.sg->length < src.sg_wa.sg->length)) {
1425 ret = -EINVAL;
1426 goto e_dst;
1427 }
1428
1429 if (i == src.sg_wa.dma_count) {
1430 op.eom = 1;
1431 op.soc = 1;
1432 }
1433
1434 op.src.type = CCP_MEMTYPE_SYSTEM;
1435 op.src.u.dma.address = sg_dma_address(src.sg_wa.sg);
1436 op.src.u.dma.offset = 0;
1437 op.src.u.dma.length = sg_dma_len(src.sg_wa.sg);
1438
1439 op.dst.type = CCP_MEMTYPE_SYSTEM;
1440 op.dst.u.dma.address = sg_dma_address(dst.sg_wa.sg);
Dave Jones80e84c12014-02-09 09:59:14 +08001441 op.dst.u.dma.offset = dst.sg_wa.sg_used;
1442 op.dst.u.dma.length = op.src.u.dma.length;
Tom Lendacky63b94502013-11-12 11:46:16 -06001443
Gary R Hooka43eb982016-07-26 19:09:31 -05001444 ret = cmd_q->ccp->vdata->perform->passthru(&op);
Tom Lendacky63b94502013-11-12 11:46:16 -06001445 if (ret) {
1446 cmd->engine_error = cmd_q->cmd_error;
1447 goto e_dst;
1448 }
1449
1450 dst.sg_wa.sg_used += src.sg_wa.sg->length;
1451 if (dst.sg_wa.sg_used == dst.sg_wa.sg->length) {
1452 dst.sg_wa.sg = sg_next(dst.sg_wa.sg);
1453 dst.sg_wa.sg_used = 0;
1454 }
1455 src.sg_wa.sg = sg_next(src.sg_wa.sg);
1456 }
1457
1458e_dst:
1459 if (!in_place)
1460 ccp_free_data(&dst, cmd_q);
1461
1462e_src:
1463 ccp_free_data(&src, cmd_q);
1464
1465e_mask:
1466 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
1467 ccp_dm_free(&mask);
1468
1469 return ret;
1470}
1471
Gary R Hook58ea8ab2016-04-18 09:21:44 -05001472static int ccp_run_passthru_nomap_cmd(struct ccp_cmd_queue *cmd_q,
1473 struct ccp_cmd *cmd)
1474{
1475 struct ccp_passthru_nomap_engine *pt = &cmd->u.passthru_nomap;
1476 struct ccp_dm_workarea mask;
1477 struct ccp_op op;
1478 int ret;
1479
1480 if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1)))
1481 return -EINVAL;
1482
1483 if (!pt->src_dma || !pt->dst_dma)
1484 return -EINVAL;
1485
1486 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
1487 if (pt->mask_len != CCP_PASSTHRU_MASKSIZE)
1488 return -EINVAL;
1489 if (!pt->mask)
1490 return -EINVAL;
1491 }
1492
Gary R Hook956ee212016-07-26 19:09:40 -05001493 BUILD_BUG_ON(CCP_PASSTHRU_SB_COUNT != 1);
Gary R Hook58ea8ab2016-04-18 09:21:44 -05001494
1495 memset(&op, 0, sizeof(op));
1496 op.cmd_q = cmd_q;
1497 op.jobid = ccp_gen_jobid(cmd_q->ccp);
1498
1499 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
1500 /* Load the mask */
Gary R Hook956ee212016-07-26 19:09:40 -05001501 op.sb_key = cmd_q->sb_key;
Gary R Hook58ea8ab2016-04-18 09:21:44 -05001502
1503 mask.length = pt->mask_len;
1504 mask.dma.address = pt->mask;
1505 mask.dma.length = pt->mask_len;
1506
Gary R Hook956ee212016-07-26 19:09:40 -05001507 ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key,
Gary R Hook58ea8ab2016-04-18 09:21:44 -05001508 CCP_PASSTHRU_BYTESWAP_NOOP);
1509 if (ret) {
1510 cmd->engine_error = cmd_q->cmd_error;
1511 return ret;
1512 }
1513 }
1514
1515 /* Send data to the CCP Passthru engine */
1516 op.eom = 1;
1517 op.soc = 1;
1518
1519 op.src.type = CCP_MEMTYPE_SYSTEM;
1520 op.src.u.dma.address = pt->src_dma;
1521 op.src.u.dma.offset = 0;
1522 op.src.u.dma.length = pt->src_len;
1523
1524 op.dst.type = CCP_MEMTYPE_SYSTEM;
1525 op.dst.u.dma.address = pt->dst_dma;
1526 op.dst.u.dma.offset = 0;
1527 op.dst.u.dma.length = pt->src_len;
1528
Gary R Hooka43eb982016-07-26 19:09:31 -05001529 ret = cmd_q->ccp->vdata->perform->passthru(&op);
Gary R Hook58ea8ab2016-04-18 09:21:44 -05001530 if (ret)
1531 cmd->engine_error = cmd_q->cmd_error;
1532
1533 return ret;
1534}
1535
Tom Lendacky63b94502013-11-12 11:46:16 -06001536static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1537{
1538 struct ccp_ecc_engine *ecc = &cmd->u.ecc;
1539 struct ccp_dm_workarea src, dst;
1540 struct ccp_op op;
1541 int ret;
1542 u8 *save;
1543
1544 if (!ecc->u.mm.operand_1 ||
1545 (ecc->u.mm.operand_1_len > CCP_ECC_MODULUS_BYTES))
1546 return -EINVAL;
1547
1548 if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT)
1549 if (!ecc->u.mm.operand_2 ||
1550 (ecc->u.mm.operand_2_len > CCP_ECC_MODULUS_BYTES))
1551 return -EINVAL;
1552
1553 if (!ecc->u.mm.result ||
1554 (ecc->u.mm.result_len < CCP_ECC_MODULUS_BYTES))
1555 return -EINVAL;
1556
1557 memset(&op, 0, sizeof(op));
1558 op.cmd_q = cmd_q;
Gary R Hook4b394a22016-07-26 19:10:21 -05001559 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
Tom Lendacky63b94502013-11-12 11:46:16 -06001560
1561 /* Concatenate the modulus and the operands. Both the modulus and
1562 * the operands must be in little endian format. Since the input
1563 * is in big endian format it must be converted and placed in a
1564 * fixed length buffer.
1565 */
1566 ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE,
1567 DMA_TO_DEVICE);
1568 if (ret)
1569 return ret;
1570
1571 /* Save the workarea address since it is updated in order to perform
1572 * the concatenation
1573 */
1574 save = src.address;
1575
1576 /* Copy the ECC modulus */
Tom Lendacky355eba52015-10-01 16:32:31 -05001577 ret = ccp_reverse_set_dm_area(&src, ecc->mod, ecc->mod_len,
1578 CCP_ECC_OPERAND_SIZE, false);
1579 if (ret)
1580 goto e_src;
Tom Lendacky63b94502013-11-12 11:46:16 -06001581 src.address += CCP_ECC_OPERAND_SIZE;
1582
1583 /* Copy the first operand */
Tom Lendacky355eba52015-10-01 16:32:31 -05001584 ret = ccp_reverse_set_dm_area(&src, ecc->u.mm.operand_1,
1585 ecc->u.mm.operand_1_len,
1586 CCP_ECC_OPERAND_SIZE, false);
1587 if (ret)
1588 goto e_src;
Tom Lendacky63b94502013-11-12 11:46:16 -06001589 src.address += CCP_ECC_OPERAND_SIZE;
1590
1591 if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT) {
1592 /* Copy the second operand */
Tom Lendacky355eba52015-10-01 16:32:31 -05001593 ret = ccp_reverse_set_dm_area(&src, ecc->u.mm.operand_2,
1594 ecc->u.mm.operand_2_len,
1595 CCP_ECC_OPERAND_SIZE, false);
1596 if (ret)
1597 goto e_src;
Tom Lendacky63b94502013-11-12 11:46:16 -06001598 src.address += CCP_ECC_OPERAND_SIZE;
1599 }
1600
1601 /* Restore the workarea address */
1602 src.address = save;
1603
1604 /* Prepare the output area for the operation */
1605 ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE,
1606 DMA_FROM_DEVICE);
1607 if (ret)
1608 goto e_src;
1609
1610 op.soc = 1;
1611 op.src.u.dma.address = src.dma.address;
1612 op.src.u.dma.offset = 0;
1613 op.src.u.dma.length = src.length;
1614 op.dst.u.dma.address = dst.dma.address;
1615 op.dst.u.dma.offset = 0;
1616 op.dst.u.dma.length = dst.length;
1617
1618 op.u.ecc.function = cmd->u.ecc.function;
1619
Gary R Hooka43eb982016-07-26 19:09:31 -05001620 ret = cmd_q->ccp->vdata->perform->ecc(&op);
Tom Lendacky63b94502013-11-12 11:46:16 -06001621 if (ret) {
1622 cmd->engine_error = cmd_q->cmd_error;
1623 goto e_dst;
1624 }
1625
1626 ecc->ecc_result = le16_to_cpup(
1627 (const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET));
1628 if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) {
1629 ret = -EIO;
1630 goto e_dst;
1631 }
1632
1633 /* Save the ECC result */
1634 ccp_reverse_get_dm_area(&dst, ecc->u.mm.result, CCP_ECC_MODULUS_BYTES);
1635
1636e_dst:
1637 ccp_dm_free(&dst);
1638
1639e_src:
1640 ccp_dm_free(&src);
1641
1642 return ret;
1643}
1644
1645static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1646{
1647 struct ccp_ecc_engine *ecc = &cmd->u.ecc;
1648 struct ccp_dm_workarea src, dst;
1649 struct ccp_op op;
1650 int ret;
1651 u8 *save;
1652
1653 if (!ecc->u.pm.point_1.x ||
1654 (ecc->u.pm.point_1.x_len > CCP_ECC_MODULUS_BYTES) ||
1655 !ecc->u.pm.point_1.y ||
1656 (ecc->u.pm.point_1.y_len > CCP_ECC_MODULUS_BYTES))
1657 return -EINVAL;
1658
1659 if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) {
1660 if (!ecc->u.pm.point_2.x ||
1661 (ecc->u.pm.point_2.x_len > CCP_ECC_MODULUS_BYTES) ||
1662 !ecc->u.pm.point_2.y ||
1663 (ecc->u.pm.point_2.y_len > CCP_ECC_MODULUS_BYTES))
1664 return -EINVAL;
1665 } else {
1666 if (!ecc->u.pm.domain_a ||
1667 (ecc->u.pm.domain_a_len > CCP_ECC_MODULUS_BYTES))
1668 return -EINVAL;
1669
1670 if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT)
1671 if (!ecc->u.pm.scalar ||
1672 (ecc->u.pm.scalar_len > CCP_ECC_MODULUS_BYTES))
1673 return -EINVAL;
1674 }
1675
1676 if (!ecc->u.pm.result.x ||
1677 (ecc->u.pm.result.x_len < CCP_ECC_MODULUS_BYTES) ||
1678 !ecc->u.pm.result.y ||
1679 (ecc->u.pm.result.y_len < CCP_ECC_MODULUS_BYTES))
1680 return -EINVAL;
1681
1682 memset(&op, 0, sizeof(op));
1683 op.cmd_q = cmd_q;
Gary R Hook4b394a22016-07-26 19:10:21 -05001684 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
Tom Lendacky63b94502013-11-12 11:46:16 -06001685
1686 /* Concatenate the modulus and the operands. Both the modulus and
1687 * the operands must be in little endian format. Since the input
1688 * is in big endian format it must be converted and placed in a
1689 * fixed length buffer.
1690 */
1691 ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE,
1692 DMA_TO_DEVICE);
1693 if (ret)
1694 return ret;
1695
1696 /* Save the workarea address since it is updated in order to perform
1697 * the concatenation
1698 */
1699 save = src.address;
1700
1701 /* Copy the ECC modulus */
Tom Lendacky355eba52015-10-01 16:32:31 -05001702 ret = ccp_reverse_set_dm_area(&src, ecc->mod, ecc->mod_len,
1703 CCP_ECC_OPERAND_SIZE, false);
1704 if (ret)
1705 goto e_src;
Tom Lendacky63b94502013-11-12 11:46:16 -06001706 src.address += CCP_ECC_OPERAND_SIZE;
1707
1708 /* Copy the first point X and Y coordinate */
Tom Lendacky355eba52015-10-01 16:32:31 -05001709 ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.point_1.x,
1710 ecc->u.pm.point_1.x_len,
1711 CCP_ECC_OPERAND_SIZE, false);
1712 if (ret)
1713 goto e_src;
Tom Lendacky63b94502013-11-12 11:46:16 -06001714 src.address += CCP_ECC_OPERAND_SIZE;
Tom Lendacky355eba52015-10-01 16:32:31 -05001715 ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.point_1.y,
1716 ecc->u.pm.point_1.y_len,
1717 CCP_ECC_OPERAND_SIZE, false);
1718 if (ret)
1719 goto e_src;
Tom Lendacky63b94502013-11-12 11:46:16 -06001720 src.address += CCP_ECC_OPERAND_SIZE;
1721
Gary R Hook4b394a22016-07-26 19:10:21 -05001722 /* Set the first point Z coordinate to 1 */
Tom Lendacky8db88462015-02-03 13:07:05 -06001723 *src.address = 0x01;
Tom Lendacky63b94502013-11-12 11:46:16 -06001724 src.address += CCP_ECC_OPERAND_SIZE;
1725
1726 if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) {
1727 /* Copy the second point X and Y coordinate */
Tom Lendacky355eba52015-10-01 16:32:31 -05001728 ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.point_2.x,
1729 ecc->u.pm.point_2.x_len,
1730 CCP_ECC_OPERAND_SIZE, false);
1731 if (ret)
1732 goto e_src;
Tom Lendacky63b94502013-11-12 11:46:16 -06001733 src.address += CCP_ECC_OPERAND_SIZE;
Tom Lendacky355eba52015-10-01 16:32:31 -05001734 ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.point_2.y,
1735 ecc->u.pm.point_2.y_len,
1736 CCP_ECC_OPERAND_SIZE, false);
1737 if (ret)
1738 goto e_src;
Tom Lendacky63b94502013-11-12 11:46:16 -06001739 src.address += CCP_ECC_OPERAND_SIZE;
1740
Gary R Hook4b394a22016-07-26 19:10:21 -05001741 /* Set the second point Z coordinate to 1 */
Tom Lendacky8db88462015-02-03 13:07:05 -06001742 *src.address = 0x01;
Tom Lendacky63b94502013-11-12 11:46:16 -06001743 src.address += CCP_ECC_OPERAND_SIZE;
1744 } else {
1745 /* Copy the Domain "a" parameter */
Tom Lendacky355eba52015-10-01 16:32:31 -05001746 ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.domain_a,
1747 ecc->u.pm.domain_a_len,
1748 CCP_ECC_OPERAND_SIZE, false);
1749 if (ret)
1750 goto e_src;
Tom Lendacky63b94502013-11-12 11:46:16 -06001751 src.address += CCP_ECC_OPERAND_SIZE;
1752
1753 if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT) {
1754 /* Copy the scalar value */
Tom Lendacky355eba52015-10-01 16:32:31 -05001755 ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.scalar,
1756 ecc->u.pm.scalar_len,
1757 CCP_ECC_OPERAND_SIZE,
1758 false);
1759 if (ret)
1760 goto e_src;
Tom Lendacky63b94502013-11-12 11:46:16 -06001761 src.address += CCP_ECC_OPERAND_SIZE;
1762 }
1763 }
1764
1765 /* Restore the workarea address */
1766 src.address = save;
1767
1768 /* Prepare the output area for the operation */
1769 ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE,
1770 DMA_FROM_DEVICE);
1771 if (ret)
1772 goto e_src;
1773
1774 op.soc = 1;
1775 op.src.u.dma.address = src.dma.address;
1776 op.src.u.dma.offset = 0;
1777 op.src.u.dma.length = src.length;
1778 op.dst.u.dma.address = dst.dma.address;
1779 op.dst.u.dma.offset = 0;
1780 op.dst.u.dma.length = dst.length;
1781
1782 op.u.ecc.function = cmd->u.ecc.function;
1783
Gary R Hooka43eb982016-07-26 19:09:31 -05001784 ret = cmd_q->ccp->vdata->perform->ecc(&op);
Tom Lendacky63b94502013-11-12 11:46:16 -06001785 if (ret) {
1786 cmd->engine_error = cmd_q->cmd_error;
1787 goto e_dst;
1788 }
1789
1790 ecc->ecc_result = le16_to_cpup(
1791 (const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET));
1792 if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) {
1793 ret = -EIO;
1794 goto e_dst;
1795 }
1796
1797 /* Save the workarea address since it is updated as we walk through
1798 * to copy the point math result
1799 */
1800 save = dst.address;
1801
1802 /* Save the ECC result X and Y coordinates */
1803 ccp_reverse_get_dm_area(&dst, ecc->u.pm.result.x,
1804 CCP_ECC_MODULUS_BYTES);
1805 dst.address += CCP_ECC_OUTPUT_SIZE;
1806 ccp_reverse_get_dm_area(&dst, ecc->u.pm.result.y,
1807 CCP_ECC_MODULUS_BYTES);
1808 dst.address += CCP_ECC_OUTPUT_SIZE;
1809
1810 /* Restore the workarea address */
1811 dst.address = save;
1812
1813e_dst:
1814 ccp_dm_free(&dst);
1815
1816e_src:
1817 ccp_dm_free(&src);
1818
1819 return ret;
1820}
1821
1822static int ccp_run_ecc_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1823{
1824 struct ccp_ecc_engine *ecc = &cmd->u.ecc;
1825
1826 ecc->ecc_result = 0;
1827
1828 if (!ecc->mod ||
1829 (ecc->mod_len > CCP_ECC_MODULUS_BYTES))
1830 return -EINVAL;
1831
1832 switch (ecc->function) {
1833 case CCP_ECC_FUNCTION_MMUL_384BIT:
1834 case CCP_ECC_FUNCTION_MADD_384BIT:
1835 case CCP_ECC_FUNCTION_MINV_384BIT:
1836 return ccp_run_ecc_mm_cmd(cmd_q, cmd);
1837
1838 case CCP_ECC_FUNCTION_PADD_384BIT:
1839 case CCP_ECC_FUNCTION_PMUL_384BIT:
1840 case CCP_ECC_FUNCTION_PDBL_384BIT:
1841 return ccp_run_ecc_pm_cmd(cmd_q, cmd);
1842
1843 default:
1844 return -EINVAL;
1845 }
1846}
1847
1848int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1849{
1850 int ret;
1851
1852 cmd->engine_error = 0;
1853 cmd_q->cmd_error = 0;
1854 cmd_q->int_rcvd = 0;
Gary R Hookbb4e89b2016-07-26 19:10:13 -05001855 cmd_q->free_slots = cmd_q->ccp->vdata->perform->get_free_slots(cmd_q);
Tom Lendacky63b94502013-11-12 11:46:16 -06001856
1857 switch (cmd->engine) {
1858 case CCP_ENGINE_AES:
1859 ret = ccp_run_aes_cmd(cmd_q, cmd);
1860 break;
1861 case CCP_ENGINE_XTS_AES_128:
1862 ret = ccp_run_xts_aes_cmd(cmd_q, cmd);
1863 break;
1864 case CCP_ENGINE_SHA:
1865 ret = ccp_run_sha_cmd(cmd_q, cmd);
1866 break;
1867 case CCP_ENGINE_RSA:
1868 ret = ccp_run_rsa_cmd(cmd_q, cmd);
1869 break;
1870 case CCP_ENGINE_PASSTHRU:
Gary R Hook58ea8ab2016-04-18 09:21:44 -05001871 if (cmd->flags & CCP_CMD_PASSTHRU_NO_DMA_MAP)
1872 ret = ccp_run_passthru_nomap_cmd(cmd_q, cmd);
1873 else
1874 ret = ccp_run_passthru_cmd(cmd_q, cmd);
Tom Lendacky63b94502013-11-12 11:46:16 -06001875 break;
1876 case CCP_ENGINE_ECC:
1877 ret = ccp_run_ecc_cmd(cmd_q, cmd);
1878 break;
1879 default:
1880 ret = -EINVAL;
1881 }
1882
1883 return ret;
1884}