Merge tag 'mmc-v4.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc
[cascardo/linux.git] / drivers / crypto / ccp / ccp-ops.c
1 /*
2  * AMD Cryptographic Coprocessor (CCP) driver
3  *
4  * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
5  *
6  * Author: Tom Lendacky <thomas.lendacky@amd.com>
7  * Author: Gary R Hook <gary.hook@amd.com>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  */
13
14 #include <linux/module.h>
15 #include <linux/kernel.h>
16 #include <linux/pci.h>
17 #include <linux/interrupt.h>
18 #include <crypto/scatterwalk.h>
19 #include <linux/ccp.h>
20
21 #include "ccp-dev.h"
22
23 /* SHA initial context values */
24 static const __be32 ccp_sha1_init[SHA1_DIGEST_SIZE / sizeof(__be32)] = {
25         cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1),
26         cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3),
27         cpu_to_be32(SHA1_H4),
28 };
29
30 static const __be32 ccp_sha224_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = {
31         cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1),
32         cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3),
33         cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5),
34         cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7),
35 };
36
37 static const __be32 ccp_sha256_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = {
38         cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1),
39         cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3),
40         cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5),
41         cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7),
42 };
43
44 #define CCP_NEW_JOBID(ccp)      ((ccp->vdata->version == CCP_VERSION(3, 0)) ? \
45                                         ccp_gen_jobid(ccp) : 0)
46
47 static u32 ccp_gen_jobid(struct ccp_device *ccp)
48 {
49         return atomic_inc_return(&ccp->current_id) & CCP_JOBID_MASK;
50 }
51
52 static void ccp_sg_free(struct ccp_sg_workarea *wa)
53 {
54         if (wa->dma_count)
55                 dma_unmap_sg(wa->dma_dev, wa->dma_sg, wa->nents, wa->dma_dir);
56
57         wa->dma_count = 0;
58 }
59
60 static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev,
61                                 struct scatterlist *sg, u64 len,
62                                 enum dma_data_direction dma_dir)
63 {
64         memset(wa, 0, sizeof(*wa));
65
66         wa->sg = sg;
67         if (!sg)
68                 return 0;
69
70         wa->nents = sg_nents_for_len(sg, len);
71         if (wa->nents < 0)
72                 return wa->nents;
73
74         wa->bytes_left = len;
75         wa->sg_used = 0;
76
77         if (len == 0)
78                 return 0;
79
80         if (dma_dir == DMA_NONE)
81                 return 0;
82
83         wa->dma_sg = sg;
84         wa->dma_dev = dev;
85         wa->dma_dir = dma_dir;
86         wa->dma_count = dma_map_sg(dev, sg, wa->nents, dma_dir);
87         if (!wa->dma_count)
88                 return -ENOMEM;
89
90         return 0;
91 }
92
93 static void ccp_update_sg_workarea(struct ccp_sg_workarea *wa, unsigned int len)
94 {
95         unsigned int nbytes = min_t(u64, len, wa->bytes_left);
96
97         if (!wa->sg)
98                 return;
99
100         wa->sg_used += nbytes;
101         wa->bytes_left -= nbytes;
102         if (wa->sg_used == wa->sg->length) {
103                 wa->sg = sg_next(wa->sg);
104                 wa->sg_used = 0;
105         }
106 }
107
108 static void ccp_dm_free(struct ccp_dm_workarea *wa)
109 {
110         if (wa->length <= CCP_DMAPOOL_MAX_SIZE) {
111                 if (wa->address)
112                         dma_pool_free(wa->dma_pool, wa->address,
113                                       wa->dma.address);
114         } else {
115                 if (wa->dma.address)
116                         dma_unmap_single(wa->dev, wa->dma.address, wa->length,
117                                          wa->dma.dir);
118                 kfree(wa->address);
119         }
120
121         wa->address = NULL;
122         wa->dma.address = 0;
123 }
124
125 static int ccp_init_dm_workarea(struct ccp_dm_workarea *wa,
126                                 struct ccp_cmd_queue *cmd_q,
127                                 unsigned int len,
128                                 enum dma_data_direction dir)
129 {
130         memset(wa, 0, sizeof(*wa));
131
132         if (!len)
133                 return 0;
134
135         wa->dev = cmd_q->ccp->dev;
136         wa->length = len;
137
138         if (len <= CCP_DMAPOOL_MAX_SIZE) {
139                 wa->dma_pool = cmd_q->dma_pool;
140
141                 wa->address = dma_pool_alloc(wa->dma_pool, GFP_KERNEL,
142                                              &wa->dma.address);
143                 if (!wa->address)
144                         return -ENOMEM;
145
146                 wa->dma.length = CCP_DMAPOOL_MAX_SIZE;
147
148                 memset(wa->address, 0, CCP_DMAPOOL_MAX_SIZE);
149         } else {
150                 wa->address = kzalloc(len, GFP_KERNEL);
151                 if (!wa->address)
152                         return -ENOMEM;
153
154                 wa->dma.address = dma_map_single(wa->dev, wa->address, len,
155                                                  dir);
156                 if (!wa->dma.address)
157                         return -ENOMEM;
158
159                 wa->dma.length = len;
160         }
161         wa->dma.dir = dir;
162
163         return 0;
164 }
165
166 static void ccp_set_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
167                             struct scatterlist *sg, unsigned int sg_offset,
168                             unsigned int len)
169 {
170         WARN_ON(!wa->address);
171
172         scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len,
173                                  0);
174 }
175
176 static void ccp_get_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
177                             struct scatterlist *sg, unsigned int sg_offset,
178                             unsigned int len)
179 {
180         WARN_ON(!wa->address);
181
182         scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len,
183                                  1);
184 }
185
186 static int ccp_reverse_set_dm_area(struct ccp_dm_workarea *wa,
187                                    struct scatterlist *sg,
188                                    unsigned int len, unsigned int se_len,
189                                    bool sign_extend)
190 {
191         unsigned int nbytes, sg_offset, dm_offset, sb_len, i;
192         u8 buffer[CCP_REVERSE_BUF_SIZE];
193
194         if (WARN_ON(se_len > sizeof(buffer)))
195                 return -EINVAL;
196
197         sg_offset = len;
198         dm_offset = 0;
199         nbytes = len;
200         while (nbytes) {
201                 sb_len = min_t(unsigned int, nbytes, se_len);
202                 sg_offset -= sb_len;
203
204                 scatterwalk_map_and_copy(buffer, sg, sg_offset, sb_len, 0);
205                 for (i = 0; i < sb_len; i++)
206                         wa->address[dm_offset + i] = buffer[sb_len - i - 1];
207
208                 dm_offset += sb_len;
209                 nbytes -= sb_len;
210
211                 if ((sb_len != se_len) && sign_extend) {
212                         /* Must sign-extend to nearest sign-extend length */
213                         if (wa->address[dm_offset - 1] & 0x80)
214                                 memset(wa->address + dm_offset, 0xff,
215                                        se_len - sb_len);
216                 }
217         }
218
219         return 0;
220 }
221
222 static void ccp_reverse_get_dm_area(struct ccp_dm_workarea *wa,
223                                     struct scatterlist *sg,
224                                     unsigned int len)
225 {
226         unsigned int nbytes, sg_offset, dm_offset, sb_len, i;
227         u8 buffer[CCP_REVERSE_BUF_SIZE];
228
229         sg_offset = 0;
230         dm_offset = len;
231         nbytes = len;
232         while (nbytes) {
233                 sb_len = min_t(unsigned int, nbytes, sizeof(buffer));
234                 dm_offset -= sb_len;
235
236                 for (i = 0; i < sb_len; i++)
237                         buffer[sb_len - i - 1] = wa->address[dm_offset + i];
238                 scatterwalk_map_and_copy(buffer, sg, sg_offset, sb_len, 1);
239
240                 sg_offset += sb_len;
241                 nbytes -= sb_len;
242         }
243 }
244
245 static void ccp_free_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q)
246 {
247         ccp_dm_free(&data->dm_wa);
248         ccp_sg_free(&data->sg_wa);
249 }
250
251 static int ccp_init_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q,
252                          struct scatterlist *sg, u64 sg_len,
253                          unsigned int dm_len,
254                          enum dma_data_direction dir)
255 {
256         int ret;
257
258         memset(data, 0, sizeof(*data));
259
260         ret = ccp_init_sg_workarea(&data->sg_wa, cmd_q->ccp->dev, sg, sg_len,
261                                    dir);
262         if (ret)
263                 goto e_err;
264
265         ret = ccp_init_dm_workarea(&data->dm_wa, cmd_q, dm_len, dir);
266         if (ret)
267                 goto e_err;
268
269         return 0;
270
271 e_err:
272         ccp_free_data(data, cmd_q);
273
274         return ret;
275 }
276
277 static unsigned int ccp_queue_buf(struct ccp_data *data, unsigned int from)
278 {
279         struct ccp_sg_workarea *sg_wa = &data->sg_wa;
280         struct ccp_dm_workarea *dm_wa = &data->dm_wa;
281         unsigned int buf_count, nbytes;
282
283         /* Clear the buffer if setting it */
284         if (!from)
285                 memset(dm_wa->address, 0, dm_wa->length);
286
287         if (!sg_wa->sg)
288                 return 0;
289
290         /* Perform the copy operation
291          *   nbytes will always be <= UINT_MAX because dm_wa->length is
292          *   an unsigned int
293          */
294         nbytes = min_t(u64, sg_wa->bytes_left, dm_wa->length);
295         scatterwalk_map_and_copy(dm_wa->address, sg_wa->sg, sg_wa->sg_used,
296                                  nbytes, from);
297
298         /* Update the structures and generate the count */
299         buf_count = 0;
300         while (sg_wa->bytes_left && (buf_count < dm_wa->length)) {
301                 nbytes = min(sg_wa->sg->length - sg_wa->sg_used,
302                              dm_wa->length - buf_count);
303                 nbytes = min_t(u64, sg_wa->bytes_left, nbytes);
304
305                 buf_count += nbytes;
306                 ccp_update_sg_workarea(sg_wa, nbytes);
307         }
308
309         return buf_count;
310 }
311
312 static unsigned int ccp_fill_queue_buf(struct ccp_data *data)
313 {
314         return ccp_queue_buf(data, 0);
315 }
316
317 static unsigned int ccp_empty_queue_buf(struct ccp_data *data)
318 {
319         return ccp_queue_buf(data, 1);
320 }
321
322 static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst,
323                              struct ccp_op *op, unsigned int block_size,
324                              bool blocksize_op)
325 {
326         unsigned int sg_src_len, sg_dst_len, op_len;
327
328         /* The CCP can only DMA from/to one address each per operation. This
329          * requires that we find the smallest DMA area between the source
330          * and destination. The resulting len values will always be <= UINT_MAX
331          * because the dma length is an unsigned int.
332          */
333         sg_src_len = sg_dma_len(src->sg_wa.sg) - src->sg_wa.sg_used;
334         sg_src_len = min_t(u64, src->sg_wa.bytes_left, sg_src_len);
335
336         if (dst) {
337                 sg_dst_len = sg_dma_len(dst->sg_wa.sg) - dst->sg_wa.sg_used;
338                 sg_dst_len = min_t(u64, src->sg_wa.bytes_left, sg_dst_len);
339                 op_len = min(sg_src_len, sg_dst_len);
340         } else {
341                 op_len = sg_src_len;
342         }
343
344         /* The data operation length will be at least block_size in length
345          * or the smaller of available sg room remaining for the source or
346          * the destination
347          */
348         op_len = max(op_len, block_size);
349
350         /* Unless we have to buffer data, there's no reason to wait */
351         op->soc = 0;
352
353         if (sg_src_len < block_size) {
354                 /* Not enough data in the sg element, so it
355                  * needs to be buffered into a blocksize chunk
356                  */
357                 int cp_len = ccp_fill_queue_buf(src);
358
359                 op->soc = 1;
360                 op->src.u.dma.address = src->dm_wa.dma.address;
361                 op->src.u.dma.offset = 0;
362                 op->src.u.dma.length = (blocksize_op) ? block_size : cp_len;
363         } else {
364                 /* Enough data in the sg element, but we need to
365                  * adjust for any previously copied data
366                  */
367                 op->src.u.dma.address = sg_dma_address(src->sg_wa.sg);
368                 op->src.u.dma.offset = src->sg_wa.sg_used;
369                 op->src.u.dma.length = op_len & ~(block_size - 1);
370
371                 ccp_update_sg_workarea(&src->sg_wa, op->src.u.dma.length);
372         }
373
374         if (dst) {
375                 if (sg_dst_len < block_size) {
376                         /* Not enough room in the sg element or we're on the
377                          * last piece of data (when using padding), so the
378                          * output needs to be buffered into a blocksize chunk
379                          */
380                         op->soc = 1;
381                         op->dst.u.dma.address = dst->dm_wa.dma.address;
382                         op->dst.u.dma.offset = 0;
383                         op->dst.u.dma.length = op->src.u.dma.length;
384                 } else {
385                         /* Enough room in the sg element, but we need to
386                          * adjust for any previously used area
387                          */
388                         op->dst.u.dma.address = sg_dma_address(dst->sg_wa.sg);
389                         op->dst.u.dma.offset = dst->sg_wa.sg_used;
390                         op->dst.u.dma.length = op->src.u.dma.length;
391                 }
392         }
393 }
394
395 static void ccp_process_data(struct ccp_data *src, struct ccp_data *dst,
396                              struct ccp_op *op)
397 {
398         op->init = 0;
399
400         if (dst) {
401                 if (op->dst.u.dma.address == dst->dm_wa.dma.address)
402                         ccp_empty_queue_buf(dst);
403                 else
404                         ccp_update_sg_workarea(&dst->sg_wa,
405                                                op->dst.u.dma.length);
406         }
407 }
408
409 static int ccp_copy_to_from_sb(struct ccp_cmd_queue *cmd_q,
410                                struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
411                                u32 byte_swap, bool from)
412 {
413         struct ccp_op op;
414
415         memset(&op, 0, sizeof(op));
416
417         op.cmd_q = cmd_q;
418         op.jobid = jobid;
419         op.eom = 1;
420
421         if (from) {
422                 op.soc = 1;
423                 op.src.type = CCP_MEMTYPE_SB;
424                 op.src.u.sb = sb;
425                 op.dst.type = CCP_MEMTYPE_SYSTEM;
426                 op.dst.u.dma.address = wa->dma.address;
427                 op.dst.u.dma.length = wa->length;
428         } else {
429                 op.src.type = CCP_MEMTYPE_SYSTEM;
430                 op.src.u.dma.address = wa->dma.address;
431                 op.src.u.dma.length = wa->length;
432                 op.dst.type = CCP_MEMTYPE_SB;
433                 op.dst.u.sb = sb;
434         }
435
436         op.u.passthru.byte_swap = byte_swap;
437
438         return cmd_q->ccp->vdata->perform->passthru(&op);
439 }
440
441 static int ccp_copy_to_sb(struct ccp_cmd_queue *cmd_q,
442                           struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
443                           u32 byte_swap)
444 {
445         return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, false);
446 }
447
448 static int ccp_copy_from_sb(struct ccp_cmd_queue *cmd_q,
449                             struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
450                             u32 byte_swap)
451 {
452         return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, true);
453 }
454
455 static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q,
456                                 struct ccp_cmd *cmd)
457 {
458         struct ccp_aes_engine *aes = &cmd->u.aes;
459         struct ccp_dm_workarea key, ctx;
460         struct ccp_data src;
461         struct ccp_op op;
462         unsigned int dm_offset;
463         int ret;
464
465         if (!((aes->key_len == AES_KEYSIZE_128) ||
466               (aes->key_len == AES_KEYSIZE_192) ||
467               (aes->key_len == AES_KEYSIZE_256)))
468                 return -EINVAL;
469
470         if (aes->src_len & (AES_BLOCK_SIZE - 1))
471                 return -EINVAL;
472
473         if (aes->iv_len != AES_BLOCK_SIZE)
474                 return -EINVAL;
475
476         if (!aes->key || !aes->iv || !aes->src)
477                 return -EINVAL;
478
479         if (aes->cmac_final) {
480                 if (aes->cmac_key_len != AES_BLOCK_SIZE)
481                         return -EINVAL;
482
483                 if (!aes->cmac_key)
484                         return -EINVAL;
485         }
486
487         BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT != 1);
488         BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT != 1);
489
490         ret = -EIO;
491         memset(&op, 0, sizeof(op));
492         op.cmd_q = cmd_q;
493         op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
494         op.sb_key = cmd_q->sb_key;
495         op.sb_ctx = cmd_q->sb_ctx;
496         op.init = 1;
497         op.u.aes.type = aes->type;
498         op.u.aes.mode = aes->mode;
499         op.u.aes.action = aes->action;
500
501         /* All supported key sizes fit in a single (32-byte) SB entry
502          * and must be in little endian format. Use the 256-bit byte
503          * swap passthru option to convert from big endian to little
504          * endian.
505          */
506         ret = ccp_init_dm_workarea(&key, cmd_q,
507                                    CCP_AES_KEY_SB_COUNT * CCP_SB_BYTES,
508                                    DMA_TO_DEVICE);
509         if (ret)
510                 return ret;
511
512         dm_offset = CCP_SB_BYTES - aes->key_len;
513         ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
514         ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
515                              CCP_PASSTHRU_BYTESWAP_256BIT);
516         if (ret) {
517                 cmd->engine_error = cmd_q->cmd_error;
518                 goto e_key;
519         }
520
521         /* The AES context fits in a single (32-byte) SB entry and
522          * must be in little endian format. Use the 256-bit byte swap
523          * passthru option to convert from big endian to little endian.
524          */
525         ret = ccp_init_dm_workarea(&ctx, cmd_q,
526                                    CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
527                                    DMA_BIDIRECTIONAL);
528         if (ret)
529                 goto e_key;
530
531         dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
532         ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
533         ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
534                              CCP_PASSTHRU_BYTESWAP_256BIT);
535         if (ret) {
536                 cmd->engine_error = cmd_q->cmd_error;
537                 goto e_ctx;
538         }
539
540         /* Send data to the CCP AES engine */
541         ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len,
542                             AES_BLOCK_SIZE, DMA_TO_DEVICE);
543         if (ret)
544                 goto e_ctx;
545
546         while (src.sg_wa.bytes_left) {
547                 ccp_prepare_data(&src, NULL, &op, AES_BLOCK_SIZE, true);
548                 if (aes->cmac_final && !src.sg_wa.bytes_left) {
549                         op.eom = 1;
550
551                         /* Push the K1/K2 key to the CCP now */
552                         ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid,
553                                                op.sb_ctx,
554                                                CCP_PASSTHRU_BYTESWAP_256BIT);
555                         if (ret) {
556                                 cmd->engine_error = cmd_q->cmd_error;
557                                 goto e_src;
558                         }
559
560                         ccp_set_dm_area(&ctx, 0, aes->cmac_key, 0,
561                                         aes->cmac_key_len);
562                         ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
563                                              CCP_PASSTHRU_BYTESWAP_256BIT);
564                         if (ret) {
565                                 cmd->engine_error = cmd_q->cmd_error;
566                                 goto e_src;
567                         }
568                 }
569
570                 ret = cmd_q->ccp->vdata->perform->aes(&op);
571                 if (ret) {
572                         cmd->engine_error = cmd_q->cmd_error;
573                         goto e_src;
574                 }
575
576                 ccp_process_data(&src, NULL, &op);
577         }
578
579         /* Retrieve the AES context - convert from LE to BE using
580          * 32-byte (256-bit) byteswapping
581          */
582         ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
583                                CCP_PASSTHRU_BYTESWAP_256BIT);
584         if (ret) {
585                 cmd->engine_error = cmd_q->cmd_error;
586                 goto e_src;
587         }
588
589         /* ...but we only need AES_BLOCK_SIZE bytes */
590         dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
591         ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
592
593 e_src:
594         ccp_free_data(&src, cmd_q);
595
596 e_ctx:
597         ccp_dm_free(&ctx);
598
599 e_key:
600         ccp_dm_free(&key);
601
602         return ret;
603 }
604
605 static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
606 {
607         struct ccp_aes_engine *aes = &cmd->u.aes;
608         struct ccp_dm_workarea key, ctx;
609         struct ccp_data src, dst;
610         struct ccp_op op;
611         unsigned int dm_offset;
612         bool in_place = false;
613         int ret;
614
615         if (aes->mode == CCP_AES_MODE_CMAC)
616                 return ccp_run_aes_cmac_cmd(cmd_q, cmd);
617
618         if (!((aes->key_len == AES_KEYSIZE_128) ||
619               (aes->key_len == AES_KEYSIZE_192) ||
620               (aes->key_len == AES_KEYSIZE_256)))
621                 return -EINVAL;
622
623         if (((aes->mode == CCP_AES_MODE_ECB) ||
624              (aes->mode == CCP_AES_MODE_CBC) ||
625              (aes->mode == CCP_AES_MODE_CFB)) &&
626             (aes->src_len & (AES_BLOCK_SIZE - 1)))
627                 return -EINVAL;
628
629         if (!aes->key || !aes->src || !aes->dst)
630                 return -EINVAL;
631
632         if (aes->mode != CCP_AES_MODE_ECB) {
633                 if (aes->iv_len != AES_BLOCK_SIZE)
634                         return -EINVAL;
635
636                 if (!aes->iv)
637                         return -EINVAL;
638         }
639
640         BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT != 1);
641         BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT != 1);
642
643         ret = -EIO;
644         memset(&op, 0, sizeof(op));
645         op.cmd_q = cmd_q;
646         op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
647         op.sb_key = cmd_q->sb_key;
648         op.sb_ctx = cmd_q->sb_ctx;
649         op.init = (aes->mode == CCP_AES_MODE_ECB) ? 0 : 1;
650         op.u.aes.type = aes->type;
651         op.u.aes.mode = aes->mode;
652         op.u.aes.action = aes->action;
653
654         /* All supported key sizes fit in a single (32-byte) SB entry
655          * and must be in little endian format. Use the 256-bit byte
656          * swap passthru option to convert from big endian to little
657          * endian.
658          */
659         ret = ccp_init_dm_workarea(&key, cmd_q,
660                                    CCP_AES_KEY_SB_COUNT * CCP_SB_BYTES,
661                                    DMA_TO_DEVICE);
662         if (ret)
663                 return ret;
664
665         dm_offset = CCP_SB_BYTES - aes->key_len;
666         ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
667         ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
668                              CCP_PASSTHRU_BYTESWAP_256BIT);
669         if (ret) {
670                 cmd->engine_error = cmd_q->cmd_error;
671                 goto e_key;
672         }
673
674         /* The AES context fits in a single (32-byte) SB entry and
675          * must be in little endian format. Use the 256-bit byte swap
676          * passthru option to convert from big endian to little endian.
677          */
678         ret = ccp_init_dm_workarea(&ctx, cmd_q,
679                                    CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
680                                    DMA_BIDIRECTIONAL);
681         if (ret)
682                 goto e_key;
683
684         if (aes->mode != CCP_AES_MODE_ECB) {
685                 /* Load the AES context - convert to LE */
686                 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
687                 ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
688                 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
689                                      CCP_PASSTHRU_BYTESWAP_256BIT);
690                 if (ret) {
691                         cmd->engine_error = cmd_q->cmd_error;
692                         goto e_ctx;
693                 }
694         }
695
696         /* Prepare the input and output data workareas. For in-place
697          * operations we need to set the dma direction to BIDIRECTIONAL
698          * and copy the src workarea to the dst workarea.
699          */
700         if (sg_virt(aes->src) == sg_virt(aes->dst))
701                 in_place = true;
702
703         ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len,
704                             AES_BLOCK_SIZE,
705                             in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
706         if (ret)
707                 goto e_ctx;
708
709         if (in_place) {
710                 dst = src;
711         } else {
712                 ret = ccp_init_data(&dst, cmd_q, aes->dst, aes->src_len,
713                                     AES_BLOCK_SIZE, DMA_FROM_DEVICE);
714                 if (ret)
715                         goto e_src;
716         }
717
718         /* Send data to the CCP AES engine */
719         while (src.sg_wa.bytes_left) {
720                 ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true);
721                 if (!src.sg_wa.bytes_left) {
722                         op.eom = 1;
723
724                         /* Since we don't retrieve the AES context in ECB
725                          * mode we have to wait for the operation to complete
726                          * on the last piece of data
727                          */
728                         if (aes->mode == CCP_AES_MODE_ECB)
729                                 op.soc = 1;
730                 }
731
732                 ret = cmd_q->ccp->vdata->perform->aes(&op);
733                 if (ret) {
734                         cmd->engine_error = cmd_q->cmd_error;
735                         goto e_dst;
736                 }
737
738                 ccp_process_data(&src, &dst, &op);
739         }
740
741         if (aes->mode != CCP_AES_MODE_ECB) {
742                 /* Retrieve the AES context - convert from LE to BE using
743                  * 32-byte (256-bit) byteswapping
744                  */
745                 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
746                                        CCP_PASSTHRU_BYTESWAP_256BIT);
747                 if (ret) {
748                         cmd->engine_error = cmd_q->cmd_error;
749                         goto e_dst;
750                 }
751
752                 /* ...but we only need AES_BLOCK_SIZE bytes */
753                 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
754                 ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
755         }
756
757 e_dst:
758         if (!in_place)
759                 ccp_free_data(&dst, cmd_q);
760
761 e_src:
762         ccp_free_data(&src, cmd_q);
763
764 e_ctx:
765         ccp_dm_free(&ctx);
766
767 e_key:
768         ccp_dm_free(&key);
769
770         return ret;
771 }
772
773 static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
774                                struct ccp_cmd *cmd)
775 {
776         struct ccp_xts_aes_engine *xts = &cmd->u.xts;
777         struct ccp_dm_workarea key, ctx;
778         struct ccp_data src, dst;
779         struct ccp_op op;
780         unsigned int unit_size, dm_offset;
781         bool in_place = false;
782         int ret;
783
784         switch (xts->unit_size) {
785         case CCP_XTS_AES_UNIT_SIZE_16:
786                 unit_size = 16;
787                 break;
788         case CCP_XTS_AES_UNIT_SIZE_512:
789                 unit_size = 512;
790                 break;
791         case CCP_XTS_AES_UNIT_SIZE_1024:
792                 unit_size = 1024;
793                 break;
794         case CCP_XTS_AES_UNIT_SIZE_2048:
795                 unit_size = 2048;
796                 break;
797         case CCP_XTS_AES_UNIT_SIZE_4096:
798                 unit_size = 4096;
799                 break;
800
801         default:
802                 return -EINVAL;
803         }
804
805         if (xts->key_len != AES_KEYSIZE_128)
806                 return -EINVAL;
807
808         if (!xts->final && (xts->src_len & (AES_BLOCK_SIZE - 1)))
809                 return -EINVAL;
810
811         if (xts->iv_len != AES_BLOCK_SIZE)
812                 return -EINVAL;
813
814         if (!xts->key || !xts->iv || !xts->src || !xts->dst)
815                 return -EINVAL;
816
817         BUILD_BUG_ON(CCP_XTS_AES_KEY_SB_COUNT != 1);
818         BUILD_BUG_ON(CCP_XTS_AES_CTX_SB_COUNT != 1);
819
820         ret = -EIO;
821         memset(&op, 0, sizeof(op));
822         op.cmd_q = cmd_q;
823         op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
824         op.sb_key = cmd_q->sb_key;
825         op.sb_ctx = cmd_q->sb_ctx;
826         op.init = 1;
827         op.u.xts.action = xts->action;
828         op.u.xts.unit_size = xts->unit_size;
829
830         /* All supported key sizes fit in a single (32-byte) SB entry
831          * and must be in little endian format. Use the 256-bit byte
832          * swap passthru option to convert from big endian to little
833          * endian.
834          */
835         ret = ccp_init_dm_workarea(&key, cmd_q,
836                                    CCP_XTS_AES_KEY_SB_COUNT * CCP_SB_BYTES,
837                                    DMA_TO_DEVICE);
838         if (ret)
839                 return ret;
840
841         dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128;
842         ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len);
843         ccp_set_dm_area(&key, 0, xts->key, dm_offset, xts->key_len);
844         ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
845                              CCP_PASSTHRU_BYTESWAP_256BIT);
846         if (ret) {
847                 cmd->engine_error = cmd_q->cmd_error;
848                 goto e_key;
849         }
850
851         /* The AES context fits in a single (32-byte) SB entry and
852          * for XTS is already in little endian format so no byte swapping
853          * is needed.
854          */
855         ret = ccp_init_dm_workarea(&ctx, cmd_q,
856                                    CCP_XTS_AES_CTX_SB_COUNT * CCP_SB_BYTES,
857                                    DMA_BIDIRECTIONAL);
858         if (ret)
859                 goto e_key;
860
861         ccp_set_dm_area(&ctx, 0, xts->iv, 0, xts->iv_len);
862         ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
863                              CCP_PASSTHRU_BYTESWAP_NOOP);
864         if (ret) {
865                 cmd->engine_error = cmd_q->cmd_error;
866                 goto e_ctx;
867         }
868
869         /* Prepare the input and output data workareas. For in-place
870          * operations we need to set the dma direction to BIDIRECTIONAL
871          * and copy the src workarea to the dst workarea.
872          */
873         if (sg_virt(xts->src) == sg_virt(xts->dst))
874                 in_place = true;
875
876         ret = ccp_init_data(&src, cmd_q, xts->src, xts->src_len,
877                             unit_size,
878                             in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
879         if (ret)
880                 goto e_ctx;
881
882         if (in_place) {
883                 dst = src;
884         } else {
885                 ret = ccp_init_data(&dst, cmd_q, xts->dst, xts->src_len,
886                                     unit_size, DMA_FROM_DEVICE);
887                 if (ret)
888                         goto e_src;
889         }
890
891         /* Send data to the CCP AES engine */
892         while (src.sg_wa.bytes_left) {
893                 ccp_prepare_data(&src, &dst, &op, unit_size, true);
894                 if (!src.sg_wa.bytes_left)
895                         op.eom = 1;
896
897                 ret = cmd_q->ccp->vdata->perform->xts_aes(&op);
898                 if (ret) {
899                         cmd->engine_error = cmd_q->cmd_error;
900                         goto e_dst;
901                 }
902
903                 ccp_process_data(&src, &dst, &op);
904         }
905
906         /* Retrieve the AES context - convert from LE to BE using
907          * 32-byte (256-bit) byteswapping
908          */
909         ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
910                                CCP_PASSTHRU_BYTESWAP_256BIT);
911         if (ret) {
912                 cmd->engine_error = cmd_q->cmd_error;
913                 goto e_dst;
914         }
915
916         /* ...but we only need AES_BLOCK_SIZE bytes */
917         dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
918         ccp_get_dm_area(&ctx, dm_offset, xts->iv, 0, xts->iv_len);
919
920 e_dst:
921         if (!in_place)
922                 ccp_free_data(&dst, cmd_q);
923
924 e_src:
925         ccp_free_data(&src, cmd_q);
926
927 e_ctx:
928         ccp_dm_free(&ctx);
929
930 e_key:
931         ccp_dm_free(&key);
932
933         return ret;
934 }
935
936 static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
937 {
938         struct ccp_sha_engine *sha = &cmd->u.sha;
939         struct ccp_dm_workarea ctx;
940         struct ccp_data src;
941         struct ccp_op op;
942         unsigned int ioffset, ooffset;
943         unsigned int digest_size;
944         int sb_count;
945         const void *init;
946         u64 block_size;
947         int ctx_size;
948         int ret;
949
950         switch (sha->type) {
951         case CCP_SHA_TYPE_1:
952                 if (sha->ctx_len < SHA1_DIGEST_SIZE)
953                         return -EINVAL;
954                 block_size = SHA1_BLOCK_SIZE;
955                 break;
956         case CCP_SHA_TYPE_224:
957                 if (sha->ctx_len < SHA224_DIGEST_SIZE)
958                         return -EINVAL;
959                 block_size = SHA224_BLOCK_SIZE;
960                 break;
961         case CCP_SHA_TYPE_256:
962                 if (sha->ctx_len < SHA256_DIGEST_SIZE)
963                         return -EINVAL;
964                 block_size = SHA256_BLOCK_SIZE;
965                 break;
966         default:
967                 return -EINVAL;
968         }
969
970         if (!sha->ctx)
971                 return -EINVAL;
972
973         if (!sha->final && (sha->src_len & (block_size - 1)))
974                 return -EINVAL;
975
976         /* The version 3 device can't handle zero-length input */
977         if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) {
978
979                 if (!sha->src_len) {
980                         unsigned int digest_len;
981                         const u8 *sha_zero;
982
983                         /* Not final, just return */
984                         if (!sha->final)
985                                 return 0;
986
987                         /* CCP can't do a zero length sha operation so the
988                          * caller must buffer the data.
989                          */
990                         if (sha->msg_bits)
991                                 return -EINVAL;
992
993                         /* The CCP cannot perform zero-length sha operations
994                          * so the caller is required to buffer data for the
995                          * final operation. However, a sha operation for a
996                          * message with a total length of zero is valid so
997                          * known values are required to supply the result.
998                          */
999                         switch (sha->type) {
1000                         case CCP_SHA_TYPE_1:
1001                                 sha_zero = sha1_zero_message_hash;
1002                                 digest_len = SHA1_DIGEST_SIZE;
1003                                 break;
1004                         case CCP_SHA_TYPE_224:
1005                                 sha_zero = sha224_zero_message_hash;
1006                                 digest_len = SHA224_DIGEST_SIZE;
1007                                 break;
1008                         case CCP_SHA_TYPE_256:
1009                                 sha_zero = sha256_zero_message_hash;
1010                                 digest_len = SHA256_DIGEST_SIZE;
1011                                 break;
1012                         default:
1013                                 return -EINVAL;
1014                         }
1015
1016                         scatterwalk_map_and_copy((void *)sha_zero, sha->ctx, 0,
1017                                                  digest_len, 1);
1018
1019                         return 0;
1020                 }
1021         }
1022
1023         /* Set variables used throughout */
1024         switch (sha->type) {
1025         case CCP_SHA_TYPE_1:
1026                 digest_size = SHA1_DIGEST_SIZE;
1027                 init = (void *) ccp_sha1_init;
1028                 ctx_size = SHA1_DIGEST_SIZE;
1029                 sb_count = 1;
1030                 if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0))
1031                         ooffset = ioffset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
1032                 else
1033                         ooffset = ioffset = 0;
1034                 break;
1035         case CCP_SHA_TYPE_224:
1036                 digest_size = SHA224_DIGEST_SIZE;
1037                 init = (void *) ccp_sha224_init;
1038                 ctx_size = SHA256_DIGEST_SIZE;
1039                 sb_count = 1;
1040                 ioffset = 0;
1041                 if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0))
1042                         ooffset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
1043                 else
1044                         ooffset = 0;
1045                 break;
1046         case CCP_SHA_TYPE_256:
1047                 digest_size = SHA256_DIGEST_SIZE;
1048                 init = (void *) ccp_sha256_init;
1049                 ctx_size = SHA256_DIGEST_SIZE;
1050                 sb_count = 1;
1051                 ooffset = ioffset = 0;
1052                 break;
1053         default:
1054                 ret = -EINVAL;
1055                 goto e_data;
1056         }
1057
1058         /* For zero-length plaintext the src pointer is ignored;
1059          * otherwise both parts must be valid
1060          */
1061         if (sha->src_len && !sha->src)
1062                 return -EINVAL;
1063
1064         memset(&op, 0, sizeof(op));
1065         op.cmd_q = cmd_q;
1066         op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
1067         op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
1068         op.u.sha.type = sha->type;
1069         op.u.sha.msg_bits = sha->msg_bits;
1070
1071         ret = ccp_init_dm_workarea(&ctx, cmd_q, sb_count * CCP_SB_BYTES,
1072                                    DMA_BIDIRECTIONAL);
1073         if (ret)
1074                 return ret;
1075         if (sha->first) {
1076                 switch (sha->type) {
1077                 case CCP_SHA_TYPE_1:
1078                 case CCP_SHA_TYPE_224:
1079                 case CCP_SHA_TYPE_256:
1080                         memcpy(ctx.address + ioffset, init, ctx_size);
1081                         break;
1082                 default:
1083                         ret = -EINVAL;
1084                         goto e_ctx;
1085                 }
1086         } else {
1087                 /* Restore the context */
1088                 ccp_set_dm_area(&ctx, 0, sha->ctx, 0,
1089                                 sb_count * CCP_SB_BYTES);
1090         }
1091
1092         ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1093                              CCP_PASSTHRU_BYTESWAP_256BIT);
1094         if (ret) {
1095                 cmd->engine_error = cmd_q->cmd_error;
1096                 goto e_ctx;
1097         }
1098
1099         if (sha->src) {
1100                 /* Send data to the CCP SHA engine; block_size is set above */
1101                 ret = ccp_init_data(&src, cmd_q, sha->src, sha->src_len,
1102                                     block_size, DMA_TO_DEVICE);
1103                 if (ret)
1104                         goto e_ctx;
1105
1106                 while (src.sg_wa.bytes_left) {
1107                         ccp_prepare_data(&src, NULL, &op, block_size, false);
1108                         if (sha->final && !src.sg_wa.bytes_left)
1109                                 op.eom = 1;
1110
1111                         ret = cmd_q->ccp->vdata->perform->sha(&op);
1112                         if (ret) {
1113                                 cmd->engine_error = cmd_q->cmd_error;
1114                                 goto e_data;
1115                         }
1116
1117                         ccp_process_data(&src, NULL, &op);
1118                 }
1119         } else {
1120                 op.eom = 1;
1121                 ret = cmd_q->ccp->vdata->perform->sha(&op);
1122                 if (ret) {
1123                         cmd->engine_error = cmd_q->cmd_error;
1124                         goto e_data;
1125                 }
1126         }
1127
1128         /* Retrieve the SHA context - convert from LE to BE using
1129          * 32-byte (256-bit) byteswapping to BE
1130          */
1131         ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1132                                CCP_PASSTHRU_BYTESWAP_256BIT);
1133         if (ret) {
1134                 cmd->engine_error = cmd_q->cmd_error;
1135                 goto e_data;
1136         }
1137
1138         if (sha->final) {
1139                 /* Finishing up, so get the digest */
1140                 switch (sha->type) {
1141                 case CCP_SHA_TYPE_1:
1142                 case CCP_SHA_TYPE_224:
1143                 case CCP_SHA_TYPE_256:
1144                         ccp_get_dm_area(&ctx, ooffset,
1145                                         sha->ctx, 0,
1146                                         digest_size);
1147                         break;
1148                 default:
1149                         ret = -EINVAL;
1150                         goto e_ctx;
1151                 }
1152         } else {
1153                 /* Stash the context */
1154                 ccp_get_dm_area(&ctx, 0, sha->ctx, 0,
1155                                 sb_count * CCP_SB_BYTES);
1156         }
1157
1158         if (sha->final && sha->opad) {
1159                 /* HMAC operation, recursively perform final SHA */
1160                 struct ccp_cmd hmac_cmd;
1161                 struct scatterlist sg;
1162                 u8 *hmac_buf;
1163
1164                 if (sha->opad_len != block_size) {
1165                         ret = -EINVAL;
1166                         goto e_data;
1167                 }
1168
1169                 hmac_buf = kmalloc(block_size + digest_size, GFP_KERNEL);
1170                 if (!hmac_buf) {
1171                         ret = -ENOMEM;
1172                         goto e_data;
1173                 }
1174                 sg_init_one(&sg, hmac_buf, block_size + digest_size);
1175
1176                 scatterwalk_map_and_copy(hmac_buf, sha->opad, 0, block_size, 0);
1177                 switch (sha->type) {
1178                 case CCP_SHA_TYPE_1:
1179                 case CCP_SHA_TYPE_224:
1180                 case CCP_SHA_TYPE_256:
1181                         memcpy(hmac_buf + block_size,
1182                                ctx.address + ooffset,
1183                                digest_size);
1184                         break;
1185                 default:
1186                         ret = -EINVAL;
1187                         goto e_ctx;
1188                 }
1189
1190                 memset(&hmac_cmd, 0, sizeof(hmac_cmd));
1191                 hmac_cmd.engine = CCP_ENGINE_SHA;
1192                 hmac_cmd.u.sha.type = sha->type;
1193                 hmac_cmd.u.sha.ctx = sha->ctx;
1194                 hmac_cmd.u.sha.ctx_len = sha->ctx_len;
1195                 hmac_cmd.u.sha.src = &sg;
1196                 hmac_cmd.u.sha.src_len = block_size + digest_size;
1197                 hmac_cmd.u.sha.opad = NULL;
1198                 hmac_cmd.u.sha.opad_len = 0;
1199                 hmac_cmd.u.sha.first = 1;
1200                 hmac_cmd.u.sha.final = 1;
1201                 hmac_cmd.u.sha.msg_bits = (block_size + digest_size) << 3;
1202
1203                 ret = ccp_run_sha_cmd(cmd_q, &hmac_cmd);
1204                 if (ret)
1205                         cmd->engine_error = hmac_cmd.engine_error;
1206
1207                 kfree(hmac_buf);
1208         }
1209
1210 e_data:
1211         if (sha->src)
1212                 ccp_free_data(&src, cmd_q);
1213
1214 e_ctx:
1215         ccp_dm_free(&ctx);
1216
1217         return ret;
1218 }
1219
1220 static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1221 {
1222         struct ccp_rsa_engine *rsa = &cmd->u.rsa;
1223         struct ccp_dm_workarea exp, src;
1224         struct ccp_data dst;
1225         struct ccp_op op;
1226         unsigned int sb_count, i_len, o_len;
1227         int ret;
1228
1229         if (rsa->key_size > CCP_RSA_MAX_WIDTH)
1230                 return -EINVAL;
1231
1232         if (!rsa->exp || !rsa->mod || !rsa->src || !rsa->dst)
1233                 return -EINVAL;
1234
1235         /* The RSA modulus must precede the message being acted upon, so
1236          * it must be copied to a DMA area where the message and the
1237          * modulus can be concatenated.  Therefore the input buffer
1238          * length required is twice the output buffer length (which
1239          * must be a multiple of 256-bits).
1240          */
1241         o_len = ((rsa->key_size + 255) / 256) * 32;
1242         i_len = o_len * 2;
1243
1244         sb_count = o_len / CCP_SB_BYTES;
1245
1246         memset(&op, 0, sizeof(op));
1247         op.cmd_q = cmd_q;
1248         op.jobid = ccp_gen_jobid(cmd_q->ccp);
1249         op.sb_key = cmd_q->ccp->vdata->perform->sballoc(cmd_q, sb_count);
1250
1251         if (!op.sb_key)
1252                 return -EIO;
1253
1254         /* The RSA exponent may span multiple (32-byte) SB entries and must
1255          * be in little endian format. Reverse copy each 32-byte chunk
1256          * of the exponent (En chunk to E0 chunk, E(n-1) chunk to E1 chunk)
1257          * and each byte within that chunk and do not perform any byte swap
1258          * operations on the passthru operation.
1259          */
1260         ret = ccp_init_dm_workarea(&exp, cmd_q, o_len, DMA_TO_DEVICE);
1261         if (ret)
1262                 goto e_sb;
1263
1264         ret = ccp_reverse_set_dm_area(&exp, rsa->exp, rsa->exp_len,
1265                                       CCP_SB_BYTES, false);
1266         if (ret)
1267                 goto e_exp;
1268         ret = ccp_copy_to_sb(cmd_q, &exp, op.jobid, op.sb_key,
1269                              CCP_PASSTHRU_BYTESWAP_NOOP);
1270         if (ret) {
1271                 cmd->engine_error = cmd_q->cmd_error;
1272                 goto e_exp;
1273         }
1274
1275         /* Concatenate the modulus and the message. Both the modulus and
1276          * the operands must be in little endian format.  Since the input
1277          * is in big endian format it must be converted.
1278          */
1279         ret = ccp_init_dm_workarea(&src, cmd_q, i_len, DMA_TO_DEVICE);
1280         if (ret)
1281                 goto e_exp;
1282
1283         ret = ccp_reverse_set_dm_area(&src, rsa->mod, rsa->mod_len,
1284                                       CCP_SB_BYTES, false);
1285         if (ret)
1286                 goto e_src;
1287         src.address += o_len;   /* Adjust the address for the copy operation */
1288         ret = ccp_reverse_set_dm_area(&src, rsa->src, rsa->src_len,
1289                                       CCP_SB_BYTES, false);
1290         if (ret)
1291                 goto e_src;
1292         src.address -= o_len;   /* Reset the address to original value */
1293
1294         /* Prepare the output area for the operation */
1295         ret = ccp_init_data(&dst, cmd_q, rsa->dst, rsa->mod_len,
1296                             o_len, DMA_FROM_DEVICE);
1297         if (ret)
1298                 goto e_src;
1299
1300         op.soc = 1;
1301         op.src.u.dma.address = src.dma.address;
1302         op.src.u.dma.offset = 0;
1303         op.src.u.dma.length = i_len;
1304         op.dst.u.dma.address = dst.dm_wa.dma.address;
1305         op.dst.u.dma.offset = 0;
1306         op.dst.u.dma.length = o_len;
1307
1308         op.u.rsa.mod_size = rsa->key_size;
1309         op.u.rsa.input_len = i_len;
1310
1311         ret = cmd_q->ccp->vdata->perform->rsa(&op);
1312         if (ret) {
1313                 cmd->engine_error = cmd_q->cmd_error;
1314                 goto e_dst;
1315         }
1316
1317         ccp_reverse_get_dm_area(&dst.dm_wa, rsa->dst, rsa->mod_len);
1318
1319 e_dst:
1320         ccp_free_data(&dst, cmd_q);
1321
1322 e_src:
1323         ccp_dm_free(&src);
1324
1325 e_exp:
1326         ccp_dm_free(&exp);
1327
1328 e_sb:
1329         cmd_q->ccp->vdata->perform->sbfree(cmd_q, op.sb_key, sb_count);
1330
1331         return ret;
1332 }
1333
1334 static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q,
1335                                 struct ccp_cmd *cmd)
1336 {
1337         struct ccp_passthru_engine *pt = &cmd->u.passthru;
1338         struct ccp_dm_workarea mask;
1339         struct ccp_data src, dst;
1340         struct ccp_op op;
1341         bool in_place = false;
1342         unsigned int i;
1343         int ret = 0;
1344
1345         if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1)))
1346                 return -EINVAL;
1347
1348         if (!pt->src || !pt->dst)
1349                 return -EINVAL;
1350
1351         if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
1352                 if (pt->mask_len != CCP_PASSTHRU_MASKSIZE)
1353                         return -EINVAL;
1354                 if (!pt->mask)
1355                         return -EINVAL;
1356         }
1357
1358         BUILD_BUG_ON(CCP_PASSTHRU_SB_COUNT != 1);
1359
1360         memset(&op, 0, sizeof(op));
1361         op.cmd_q = cmd_q;
1362         op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
1363
1364         if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
1365                 /* Load the mask */
1366                 op.sb_key = cmd_q->sb_key;
1367
1368                 ret = ccp_init_dm_workarea(&mask, cmd_q,
1369                                            CCP_PASSTHRU_SB_COUNT *
1370                                            CCP_SB_BYTES,
1371                                            DMA_TO_DEVICE);
1372                 if (ret)
1373                         return ret;
1374
1375                 ccp_set_dm_area(&mask, 0, pt->mask, 0, pt->mask_len);
1376                 ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key,
1377                                      CCP_PASSTHRU_BYTESWAP_NOOP);
1378                 if (ret) {
1379                         cmd->engine_error = cmd_q->cmd_error;
1380                         goto e_mask;
1381                 }
1382         }
1383
1384         /* Prepare the input and output data workareas. For in-place
1385          * operations we need to set the dma direction to BIDIRECTIONAL
1386          * and copy the src workarea to the dst workarea.
1387          */
1388         if (sg_virt(pt->src) == sg_virt(pt->dst))
1389                 in_place = true;
1390
1391         ret = ccp_init_data(&src, cmd_q, pt->src, pt->src_len,
1392                             CCP_PASSTHRU_MASKSIZE,
1393                             in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1394         if (ret)
1395                 goto e_mask;
1396
1397         if (in_place) {
1398                 dst = src;
1399         } else {
1400                 ret = ccp_init_data(&dst, cmd_q, pt->dst, pt->src_len,
1401                                     CCP_PASSTHRU_MASKSIZE, DMA_FROM_DEVICE);
1402                 if (ret)
1403                         goto e_src;
1404         }
1405
1406         /* Send data to the CCP Passthru engine
1407          *   Because the CCP engine works on a single source and destination
1408          *   dma address at a time, each entry in the source scatterlist
1409          *   (after the dma_map_sg call) must be less than or equal to the
1410          *   (remaining) length in the destination scatterlist entry and the
1411          *   length must be a multiple of CCP_PASSTHRU_BLOCKSIZE
1412          */
1413         dst.sg_wa.sg_used = 0;
1414         for (i = 1; i <= src.sg_wa.dma_count; i++) {
1415                 if (!dst.sg_wa.sg ||
1416                     (dst.sg_wa.sg->length < src.sg_wa.sg->length)) {
1417                         ret = -EINVAL;
1418                         goto e_dst;
1419                 }
1420
1421                 if (i == src.sg_wa.dma_count) {
1422                         op.eom = 1;
1423                         op.soc = 1;
1424                 }
1425
1426                 op.src.type = CCP_MEMTYPE_SYSTEM;
1427                 op.src.u.dma.address = sg_dma_address(src.sg_wa.sg);
1428                 op.src.u.dma.offset = 0;
1429                 op.src.u.dma.length = sg_dma_len(src.sg_wa.sg);
1430
1431                 op.dst.type = CCP_MEMTYPE_SYSTEM;
1432                 op.dst.u.dma.address = sg_dma_address(dst.sg_wa.sg);
1433                 op.dst.u.dma.offset = dst.sg_wa.sg_used;
1434                 op.dst.u.dma.length = op.src.u.dma.length;
1435
1436                 ret = cmd_q->ccp->vdata->perform->passthru(&op);
1437                 if (ret) {
1438                         cmd->engine_error = cmd_q->cmd_error;
1439                         goto e_dst;
1440                 }
1441
1442                 dst.sg_wa.sg_used += src.sg_wa.sg->length;
1443                 if (dst.sg_wa.sg_used == dst.sg_wa.sg->length) {
1444                         dst.sg_wa.sg = sg_next(dst.sg_wa.sg);
1445                         dst.sg_wa.sg_used = 0;
1446                 }
1447                 src.sg_wa.sg = sg_next(src.sg_wa.sg);
1448         }
1449
1450 e_dst:
1451         if (!in_place)
1452                 ccp_free_data(&dst, cmd_q);
1453
1454 e_src:
1455         ccp_free_data(&src, cmd_q);
1456
1457 e_mask:
1458         if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
1459                 ccp_dm_free(&mask);
1460
1461         return ret;
1462 }
1463
1464 static int ccp_run_passthru_nomap_cmd(struct ccp_cmd_queue *cmd_q,
1465                                       struct ccp_cmd *cmd)
1466 {
1467         struct ccp_passthru_nomap_engine *pt = &cmd->u.passthru_nomap;
1468         struct ccp_dm_workarea mask;
1469         struct ccp_op op;
1470         int ret;
1471
1472         if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1)))
1473                 return -EINVAL;
1474
1475         if (!pt->src_dma || !pt->dst_dma)
1476                 return -EINVAL;
1477
1478         if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
1479                 if (pt->mask_len != CCP_PASSTHRU_MASKSIZE)
1480                         return -EINVAL;
1481                 if (!pt->mask)
1482                         return -EINVAL;
1483         }
1484
1485         BUILD_BUG_ON(CCP_PASSTHRU_SB_COUNT != 1);
1486
1487         memset(&op, 0, sizeof(op));
1488         op.cmd_q = cmd_q;
1489         op.jobid = ccp_gen_jobid(cmd_q->ccp);
1490
1491         if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
1492                 /* Load the mask */
1493                 op.sb_key = cmd_q->sb_key;
1494
1495                 mask.length = pt->mask_len;
1496                 mask.dma.address = pt->mask;
1497                 mask.dma.length = pt->mask_len;
1498
1499                 ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key,
1500                                      CCP_PASSTHRU_BYTESWAP_NOOP);
1501                 if (ret) {
1502                         cmd->engine_error = cmd_q->cmd_error;
1503                         return ret;
1504                 }
1505         }
1506
1507         /* Send data to the CCP Passthru engine */
1508         op.eom = 1;
1509         op.soc = 1;
1510
1511         op.src.type = CCP_MEMTYPE_SYSTEM;
1512         op.src.u.dma.address = pt->src_dma;
1513         op.src.u.dma.offset = 0;
1514         op.src.u.dma.length = pt->src_len;
1515
1516         op.dst.type = CCP_MEMTYPE_SYSTEM;
1517         op.dst.u.dma.address = pt->dst_dma;
1518         op.dst.u.dma.offset = 0;
1519         op.dst.u.dma.length = pt->src_len;
1520
1521         ret = cmd_q->ccp->vdata->perform->passthru(&op);
1522         if (ret)
1523                 cmd->engine_error = cmd_q->cmd_error;
1524
1525         return ret;
1526 }
1527
1528 static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1529 {
1530         struct ccp_ecc_engine *ecc = &cmd->u.ecc;
1531         struct ccp_dm_workarea src, dst;
1532         struct ccp_op op;
1533         int ret;
1534         u8 *save;
1535
1536         if (!ecc->u.mm.operand_1 ||
1537             (ecc->u.mm.operand_1_len > CCP_ECC_MODULUS_BYTES))
1538                 return -EINVAL;
1539
1540         if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT)
1541                 if (!ecc->u.mm.operand_2 ||
1542                     (ecc->u.mm.operand_2_len > CCP_ECC_MODULUS_BYTES))
1543                         return -EINVAL;
1544
1545         if (!ecc->u.mm.result ||
1546             (ecc->u.mm.result_len < CCP_ECC_MODULUS_BYTES))
1547                 return -EINVAL;
1548
1549         memset(&op, 0, sizeof(op));
1550         op.cmd_q = cmd_q;
1551         op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
1552
1553         /* Concatenate the modulus and the operands. Both the modulus and
1554          * the operands must be in little endian format.  Since the input
1555          * is in big endian format it must be converted and placed in a
1556          * fixed length buffer.
1557          */
1558         ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE,
1559                                    DMA_TO_DEVICE);
1560         if (ret)
1561                 return ret;
1562
1563         /* Save the workarea address since it is updated in order to perform
1564          * the concatenation
1565          */
1566         save = src.address;
1567
1568         /* Copy the ECC modulus */
1569         ret = ccp_reverse_set_dm_area(&src, ecc->mod, ecc->mod_len,
1570                                       CCP_ECC_OPERAND_SIZE, false);
1571         if (ret)
1572                 goto e_src;
1573         src.address += CCP_ECC_OPERAND_SIZE;
1574
1575         /* Copy the first operand */
1576         ret = ccp_reverse_set_dm_area(&src, ecc->u.mm.operand_1,
1577                                       ecc->u.mm.operand_1_len,
1578                                       CCP_ECC_OPERAND_SIZE, false);
1579         if (ret)
1580                 goto e_src;
1581         src.address += CCP_ECC_OPERAND_SIZE;
1582
1583         if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT) {
1584                 /* Copy the second operand */
1585                 ret = ccp_reverse_set_dm_area(&src, ecc->u.mm.operand_2,
1586                                               ecc->u.mm.operand_2_len,
1587                                               CCP_ECC_OPERAND_SIZE, false);
1588                 if (ret)
1589                         goto e_src;
1590                 src.address += CCP_ECC_OPERAND_SIZE;
1591         }
1592
1593         /* Restore the workarea address */
1594         src.address = save;
1595
1596         /* Prepare the output area for the operation */
1597         ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE,
1598                                    DMA_FROM_DEVICE);
1599         if (ret)
1600                 goto e_src;
1601
1602         op.soc = 1;
1603         op.src.u.dma.address = src.dma.address;
1604         op.src.u.dma.offset = 0;
1605         op.src.u.dma.length = src.length;
1606         op.dst.u.dma.address = dst.dma.address;
1607         op.dst.u.dma.offset = 0;
1608         op.dst.u.dma.length = dst.length;
1609
1610         op.u.ecc.function = cmd->u.ecc.function;
1611
1612         ret = cmd_q->ccp->vdata->perform->ecc(&op);
1613         if (ret) {
1614                 cmd->engine_error = cmd_q->cmd_error;
1615                 goto e_dst;
1616         }
1617
1618         ecc->ecc_result = le16_to_cpup(
1619                 (const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET));
1620         if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) {
1621                 ret = -EIO;
1622                 goto e_dst;
1623         }
1624
1625         /* Save the ECC result */
1626         ccp_reverse_get_dm_area(&dst, ecc->u.mm.result, CCP_ECC_MODULUS_BYTES);
1627
1628 e_dst:
1629         ccp_dm_free(&dst);
1630
1631 e_src:
1632         ccp_dm_free(&src);
1633
1634         return ret;
1635 }
1636
1637 static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1638 {
1639         struct ccp_ecc_engine *ecc = &cmd->u.ecc;
1640         struct ccp_dm_workarea src, dst;
1641         struct ccp_op op;
1642         int ret;
1643         u8 *save;
1644
1645         if (!ecc->u.pm.point_1.x ||
1646             (ecc->u.pm.point_1.x_len > CCP_ECC_MODULUS_BYTES) ||
1647             !ecc->u.pm.point_1.y ||
1648             (ecc->u.pm.point_1.y_len > CCP_ECC_MODULUS_BYTES))
1649                 return -EINVAL;
1650
1651         if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) {
1652                 if (!ecc->u.pm.point_2.x ||
1653                     (ecc->u.pm.point_2.x_len > CCP_ECC_MODULUS_BYTES) ||
1654                     !ecc->u.pm.point_2.y ||
1655                     (ecc->u.pm.point_2.y_len > CCP_ECC_MODULUS_BYTES))
1656                         return -EINVAL;
1657         } else {
1658                 if (!ecc->u.pm.domain_a ||
1659                     (ecc->u.pm.domain_a_len > CCP_ECC_MODULUS_BYTES))
1660                         return -EINVAL;
1661
1662                 if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT)
1663                         if (!ecc->u.pm.scalar ||
1664                             (ecc->u.pm.scalar_len > CCP_ECC_MODULUS_BYTES))
1665                                 return -EINVAL;
1666         }
1667
1668         if (!ecc->u.pm.result.x ||
1669             (ecc->u.pm.result.x_len < CCP_ECC_MODULUS_BYTES) ||
1670             !ecc->u.pm.result.y ||
1671             (ecc->u.pm.result.y_len < CCP_ECC_MODULUS_BYTES))
1672                 return -EINVAL;
1673
1674         memset(&op, 0, sizeof(op));
1675         op.cmd_q = cmd_q;
1676         op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
1677
1678         /* Concatenate the modulus and the operands. Both the modulus and
1679          * the operands must be in little endian format.  Since the input
1680          * is in big endian format it must be converted and placed in a
1681          * fixed length buffer.
1682          */
1683         ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE,
1684                                    DMA_TO_DEVICE);
1685         if (ret)
1686                 return ret;
1687
1688         /* Save the workarea address since it is updated in order to perform
1689          * the concatenation
1690          */
1691         save = src.address;
1692
1693         /* Copy the ECC modulus */
1694         ret = ccp_reverse_set_dm_area(&src, ecc->mod, ecc->mod_len,
1695                                       CCP_ECC_OPERAND_SIZE, false);
1696         if (ret)
1697                 goto e_src;
1698         src.address += CCP_ECC_OPERAND_SIZE;
1699
1700         /* Copy the first point X and Y coordinate */
1701         ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.point_1.x,
1702                                       ecc->u.pm.point_1.x_len,
1703                                       CCP_ECC_OPERAND_SIZE, false);
1704         if (ret)
1705                 goto e_src;
1706         src.address += CCP_ECC_OPERAND_SIZE;
1707         ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.point_1.y,
1708                                       ecc->u.pm.point_1.y_len,
1709                                       CCP_ECC_OPERAND_SIZE, false);
1710         if (ret)
1711                 goto e_src;
1712         src.address += CCP_ECC_OPERAND_SIZE;
1713
1714         /* Set the first point Z coordinate to 1 */
1715         *src.address = 0x01;
1716         src.address += CCP_ECC_OPERAND_SIZE;
1717
1718         if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) {
1719                 /* Copy the second point X and Y coordinate */
1720                 ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.point_2.x,
1721                                               ecc->u.pm.point_2.x_len,
1722                                               CCP_ECC_OPERAND_SIZE, false);
1723                 if (ret)
1724                         goto e_src;
1725                 src.address += CCP_ECC_OPERAND_SIZE;
1726                 ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.point_2.y,
1727                                               ecc->u.pm.point_2.y_len,
1728                                               CCP_ECC_OPERAND_SIZE, false);
1729                 if (ret)
1730                         goto e_src;
1731                 src.address += CCP_ECC_OPERAND_SIZE;
1732
1733                 /* Set the second point Z coordinate to 1 */
1734                 *src.address = 0x01;
1735                 src.address += CCP_ECC_OPERAND_SIZE;
1736         } else {
1737                 /* Copy the Domain "a" parameter */
1738                 ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.domain_a,
1739                                               ecc->u.pm.domain_a_len,
1740                                               CCP_ECC_OPERAND_SIZE, false);
1741                 if (ret)
1742                         goto e_src;
1743                 src.address += CCP_ECC_OPERAND_SIZE;
1744
1745                 if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT) {
1746                         /* Copy the scalar value */
1747                         ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.scalar,
1748                                                       ecc->u.pm.scalar_len,
1749                                                       CCP_ECC_OPERAND_SIZE,
1750                                                       false);
1751                         if (ret)
1752                                 goto e_src;
1753                         src.address += CCP_ECC_OPERAND_SIZE;
1754                 }
1755         }
1756
1757         /* Restore the workarea address */
1758         src.address = save;
1759
1760         /* Prepare the output area for the operation */
1761         ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE,
1762                                    DMA_FROM_DEVICE);
1763         if (ret)
1764                 goto e_src;
1765
1766         op.soc = 1;
1767         op.src.u.dma.address = src.dma.address;
1768         op.src.u.dma.offset = 0;
1769         op.src.u.dma.length = src.length;
1770         op.dst.u.dma.address = dst.dma.address;
1771         op.dst.u.dma.offset = 0;
1772         op.dst.u.dma.length = dst.length;
1773
1774         op.u.ecc.function = cmd->u.ecc.function;
1775
1776         ret = cmd_q->ccp->vdata->perform->ecc(&op);
1777         if (ret) {
1778                 cmd->engine_error = cmd_q->cmd_error;
1779                 goto e_dst;
1780         }
1781
1782         ecc->ecc_result = le16_to_cpup(
1783                 (const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET));
1784         if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) {
1785                 ret = -EIO;
1786                 goto e_dst;
1787         }
1788
1789         /* Save the workarea address since it is updated as we walk through
1790          * to copy the point math result
1791          */
1792         save = dst.address;
1793
1794         /* Save the ECC result X and Y coordinates */
1795         ccp_reverse_get_dm_area(&dst, ecc->u.pm.result.x,
1796                                 CCP_ECC_MODULUS_BYTES);
1797         dst.address += CCP_ECC_OUTPUT_SIZE;
1798         ccp_reverse_get_dm_area(&dst, ecc->u.pm.result.y,
1799                                 CCP_ECC_MODULUS_BYTES);
1800         dst.address += CCP_ECC_OUTPUT_SIZE;
1801
1802         /* Restore the workarea address */
1803         dst.address = save;
1804
1805 e_dst:
1806         ccp_dm_free(&dst);
1807
1808 e_src:
1809         ccp_dm_free(&src);
1810
1811         return ret;
1812 }
1813
1814 static int ccp_run_ecc_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1815 {
1816         struct ccp_ecc_engine *ecc = &cmd->u.ecc;
1817
1818         ecc->ecc_result = 0;
1819
1820         if (!ecc->mod ||
1821             (ecc->mod_len > CCP_ECC_MODULUS_BYTES))
1822                 return -EINVAL;
1823
1824         switch (ecc->function) {
1825         case CCP_ECC_FUNCTION_MMUL_384BIT:
1826         case CCP_ECC_FUNCTION_MADD_384BIT:
1827         case CCP_ECC_FUNCTION_MINV_384BIT:
1828                 return ccp_run_ecc_mm_cmd(cmd_q, cmd);
1829
1830         case CCP_ECC_FUNCTION_PADD_384BIT:
1831         case CCP_ECC_FUNCTION_PMUL_384BIT:
1832         case CCP_ECC_FUNCTION_PDBL_384BIT:
1833                 return ccp_run_ecc_pm_cmd(cmd_q, cmd);
1834
1835         default:
1836                 return -EINVAL;
1837         }
1838 }
1839
1840 int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1841 {
1842         int ret;
1843
1844         cmd->engine_error = 0;
1845         cmd_q->cmd_error = 0;
1846         cmd_q->int_rcvd = 0;
1847         cmd_q->free_slots = cmd_q->ccp->vdata->perform->get_free_slots(cmd_q);
1848
1849         switch (cmd->engine) {
1850         case CCP_ENGINE_AES:
1851                 ret = ccp_run_aes_cmd(cmd_q, cmd);
1852                 break;
1853         case CCP_ENGINE_XTS_AES_128:
1854                 ret = ccp_run_xts_aes_cmd(cmd_q, cmd);
1855                 break;
1856         case CCP_ENGINE_SHA:
1857                 ret = ccp_run_sha_cmd(cmd_q, cmd);
1858                 break;
1859         case CCP_ENGINE_RSA:
1860                 ret = ccp_run_rsa_cmd(cmd_q, cmd);
1861                 break;
1862         case CCP_ENGINE_PASSTHRU:
1863                 if (cmd->flags & CCP_CMD_PASSTHRU_NO_DMA_MAP)
1864                         ret = ccp_run_passthru_nomap_cmd(cmd_q, cmd);
1865                 else
1866                         ret = ccp_run_passthru_cmd(cmd_q, cmd);
1867                 break;
1868         case CCP_ENGINE_ECC:
1869                 ret = ccp_run_ecc_cmd(cmd_q, cmd);
1870                 break;
1871         default:
1872                 ret = -EINVAL;
1873         }
1874
1875         return ret;
1876 }