crypto/ccp: support AES-GCM
[dpdk.git] / drivers / crypto / ccp / ccp_crypto.c
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
3  */
4
5 #include <dirent.h>
6 #include <fcntl.h>
7 #include <stdio.h>
8 #include <string.h>
9 #include <sys/mman.h>
10 #include <sys/queue.h>
11 #include <sys/types.h>
12 #include <unistd.h>
13 #include <openssl/cmac.h> /*sub key apis*/
14 #include <openssl/evp.h> /*sub key apis*/
15
16 #include <rte_hexdump.h>
17 #include <rte_memzone.h>
18 #include <rte_malloc.h>
19 #include <rte_memory.h>
20 #include <rte_spinlock.h>
21 #include <rte_string_fns.h>
22 #include <rte_cryptodev_pmd.h>
23
24 #include "ccp_dev.h"
25 #include "ccp_crypto.h"
26 #include "ccp_pci.h"
27 #include "ccp_pmd_private.h"
28
29 static enum ccp_cmd_order
30 ccp_get_cmd_id(const struct rte_crypto_sym_xform *xform)
31 {
32         enum ccp_cmd_order res = CCP_CMD_NOT_SUPPORTED;
33
34         if (xform == NULL)
35                 return res;
36         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
37                 if (xform->next == NULL)
38                         return CCP_CMD_AUTH;
39                 else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
40                         return CCP_CMD_HASH_CIPHER;
41         }
42         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
43                 if (xform->next == NULL)
44                         return CCP_CMD_CIPHER;
45                 else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
46                         return CCP_CMD_CIPHER_HASH;
47         }
48         if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
49                 return CCP_CMD_COMBINED;
50         return res;
51 }
52
53 /* prepare temporary keys K1 and K2 */
54 static void prepare_key(unsigned char *k, unsigned char *l, int bl)
55 {
56         int i;
57         /* Shift block to left, including carry */
58         for (i = 0; i < bl; i++) {
59                 k[i] = l[i] << 1;
60                 if (i < bl - 1 && l[i + 1] & 0x80)
61                         k[i] |= 1;
62         }
63         /* If MSB set fixup with R */
64         if (l[0] & 0x80)
65                 k[bl - 1] ^= bl == 16 ? 0x87 : 0x1b;
66 }
67
68 /* subkeys K1 and K2 generation for CMAC */
69 static int
70 generate_cmac_subkeys(struct ccp_session *sess)
71 {
72         const EVP_CIPHER *algo;
73         EVP_CIPHER_CTX *ctx;
74         unsigned char *ccp_ctx;
75         size_t i;
76         int dstlen, totlen;
77         unsigned char zero_iv[AES_BLOCK_SIZE] = {0};
78         unsigned char dst[2 * AES_BLOCK_SIZE] = {0};
79         unsigned char k1[AES_BLOCK_SIZE] = {0};
80         unsigned char k2[AES_BLOCK_SIZE] = {0};
81
82         if (sess->auth.ut.aes_type == CCP_AES_TYPE_128)
83                 algo =  EVP_aes_128_cbc();
84         else if (sess->auth.ut.aes_type == CCP_AES_TYPE_192)
85                 algo =  EVP_aes_192_cbc();
86         else if (sess->auth.ut.aes_type == CCP_AES_TYPE_256)
87                 algo =  EVP_aes_256_cbc();
88         else {
89                 CCP_LOG_ERR("Invalid CMAC type length");
90                 return -1;
91         }
92
93         ctx = EVP_CIPHER_CTX_new();
94         if (!ctx) {
95                 CCP_LOG_ERR("ctx creation failed");
96                 return -1;
97         }
98         if (EVP_EncryptInit(ctx, algo, (unsigned char *)sess->auth.key,
99                             (unsigned char *)zero_iv) <= 0)
100                 goto key_generate_err;
101         if (EVP_CIPHER_CTX_set_padding(ctx, 0) <= 0)
102                 goto key_generate_err;
103         if (EVP_EncryptUpdate(ctx, dst, &dstlen, zero_iv,
104                               AES_BLOCK_SIZE) <= 0)
105                 goto key_generate_err;
106         if (EVP_EncryptFinal_ex(ctx, dst + dstlen, &totlen) <= 0)
107                 goto key_generate_err;
108
109         memset(sess->auth.pre_compute, 0, CCP_SB_BYTES * 2);
110
111         ccp_ctx = (unsigned char *)(sess->auth.pre_compute + CCP_SB_BYTES - 1);
112         prepare_key(k1, dst, AES_BLOCK_SIZE);
113         for (i = 0; i < AES_BLOCK_SIZE;  i++, ccp_ctx--)
114                 *ccp_ctx = k1[i];
115
116         ccp_ctx = (unsigned char *)(sess->auth.pre_compute +
117                                    (2 * CCP_SB_BYTES) - 1);
118         prepare_key(k2, k1, AES_BLOCK_SIZE);
119         for (i = 0; i < AES_BLOCK_SIZE;  i++, ccp_ctx--)
120                 *ccp_ctx = k2[i];
121
122         EVP_CIPHER_CTX_free(ctx);
123
124         return 0;
125
126 key_generate_err:
127         CCP_LOG_ERR("CMAC Init failed");
128                 return -1;
129 }
130
131 /* configure session */
132 static int
133 ccp_configure_session_cipher(struct ccp_session *sess,
134                              const struct rte_crypto_sym_xform *xform)
135 {
136         const struct rte_crypto_cipher_xform *cipher_xform = NULL;
137         size_t i, j, x;
138
139         cipher_xform = &xform->cipher;
140
141         /* set cipher direction */
142         if (cipher_xform->op ==  RTE_CRYPTO_CIPHER_OP_ENCRYPT)
143                 sess->cipher.dir = CCP_CIPHER_DIR_ENCRYPT;
144         else
145                 sess->cipher.dir = CCP_CIPHER_DIR_DECRYPT;
146
147         /* set cipher key */
148         sess->cipher.key_length = cipher_xform->key.length;
149         rte_memcpy(sess->cipher.key, cipher_xform->key.data,
150                    cipher_xform->key.length);
151
152         /* set iv parameters */
153         sess->iv.offset = cipher_xform->iv.offset;
154         sess->iv.length = cipher_xform->iv.length;
155
156         switch (cipher_xform->algo) {
157         case RTE_CRYPTO_CIPHER_AES_CTR:
158                 sess->cipher.algo = CCP_CIPHER_ALGO_AES_CTR;
159                 sess->cipher.um.aes_mode = CCP_AES_MODE_CTR;
160                 sess->cipher.engine = CCP_ENGINE_AES;
161                 break;
162         case RTE_CRYPTO_CIPHER_AES_ECB:
163                 sess->cipher.algo = CCP_CIPHER_ALGO_AES_CBC;
164                 sess->cipher.um.aes_mode = CCP_AES_MODE_ECB;
165                 sess->cipher.engine = CCP_ENGINE_AES;
166                 break;
167         case RTE_CRYPTO_CIPHER_AES_CBC:
168                 sess->cipher.algo = CCP_CIPHER_ALGO_AES_CBC;
169                 sess->cipher.um.aes_mode = CCP_AES_MODE_CBC;
170                 sess->cipher.engine = CCP_ENGINE_AES;
171                 break;
172         case RTE_CRYPTO_CIPHER_3DES_CBC:
173                 sess->cipher.algo = CCP_CIPHER_ALGO_3DES_CBC;
174                 sess->cipher.um.des_mode = CCP_DES_MODE_CBC;
175                 sess->cipher.engine = CCP_ENGINE_3DES;
176                 break;
177         default:
178                 CCP_LOG_ERR("Unsupported cipher algo");
179                 return -1;
180         }
181
182
183         switch (sess->cipher.engine) {
184         case CCP_ENGINE_AES:
185                 if (sess->cipher.key_length == 16)
186                         sess->cipher.ut.aes_type = CCP_AES_TYPE_128;
187                 else if (sess->cipher.key_length == 24)
188                         sess->cipher.ut.aes_type = CCP_AES_TYPE_192;
189                 else if (sess->cipher.key_length == 32)
190                         sess->cipher.ut.aes_type = CCP_AES_TYPE_256;
191                 else {
192                         CCP_LOG_ERR("Invalid cipher key length");
193                         return -1;
194                 }
195                 for (i = 0; i < sess->cipher.key_length ; i++)
196                         sess->cipher.key_ccp[sess->cipher.key_length - i - 1] =
197                                 sess->cipher.key[i];
198                 break;
199         case CCP_ENGINE_3DES:
200                 if (sess->cipher.key_length == 16)
201                         sess->cipher.ut.des_type = CCP_DES_TYPE_128;
202                 else if (sess->cipher.key_length == 24)
203                         sess->cipher.ut.des_type = CCP_DES_TYPE_192;
204                 else {
205                         CCP_LOG_ERR("Invalid cipher key length");
206                         return -1;
207                 }
208                 for (j = 0, x = 0; j < sess->cipher.key_length/8; j++, x += 8)
209                         for (i = 0; i < 8; i++)
210                                 sess->cipher.key_ccp[(8 + x) - i - 1] =
211                                         sess->cipher.key[i + x];
212                 break;
213         default:
214                 CCP_LOG_ERR("Invalid CCP Engine");
215                 return -ENOTSUP;
216         }
217         sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
218         sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
219         return 0;
220 }
221
222 static int
223 ccp_configure_session_auth(struct ccp_session *sess,
224                            const struct rte_crypto_sym_xform *xform)
225 {
226         const struct rte_crypto_auth_xform *auth_xform = NULL;
227         size_t i;
228
229         auth_xform = &xform->auth;
230
231         sess->auth.digest_length = auth_xform->digest_length;
232         if (auth_xform->op ==  RTE_CRYPTO_AUTH_OP_GENERATE)
233                 sess->auth.op = CCP_AUTH_OP_GENERATE;
234         else
235                 sess->auth.op = CCP_AUTH_OP_VERIFY;
236         switch (auth_xform->algo) {
237         case RTE_CRYPTO_AUTH_AES_CMAC:
238                 sess->auth.algo = CCP_AUTH_ALGO_AES_CMAC;
239                 sess->auth.engine = CCP_ENGINE_AES;
240                 sess->auth.um.aes_mode = CCP_AES_MODE_CMAC;
241                 sess->auth.key_length = auth_xform->key.length;
242                 /**<padding and hash result*/
243                 sess->auth.ctx_len = CCP_SB_BYTES << 1;
244                 sess->auth.offset = AES_BLOCK_SIZE;
245                 sess->auth.block_size = AES_BLOCK_SIZE;
246                 if (sess->auth.key_length == 16)
247                         sess->auth.ut.aes_type = CCP_AES_TYPE_128;
248                 else if (sess->auth.key_length == 24)
249                         sess->auth.ut.aes_type = CCP_AES_TYPE_192;
250                 else if (sess->auth.key_length == 32)
251                         sess->auth.ut.aes_type = CCP_AES_TYPE_256;
252                 else {
253                         CCP_LOG_ERR("Invalid CMAC key length");
254                         return -1;
255                 }
256                 rte_memcpy(sess->auth.key, auth_xform->key.data,
257                            sess->auth.key_length);
258                 for (i = 0; i < sess->auth.key_length; i++)
259                         sess->auth.key_ccp[sess->auth.key_length - i - 1] =
260                                 sess->auth.key[i];
261                 if (generate_cmac_subkeys(sess))
262                         return -1;
263                 break;
264         default:
265                 CCP_LOG_ERR("Unsupported hash algo");
266                 return -ENOTSUP;
267         }
268         return 0;
269 }
270
271 static int
272 ccp_configure_session_aead(struct ccp_session *sess,
273                            const struct rte_crypto_sym_xform *xform)
274 {
275         const struct rte_crypto_aead_xform *aead_xform = NULL;
276         size_t i;
277
278         aead_xform = &xform->aead;
279
280         sess->cipher.key_length = aead_xform->key.length;
281         rte_memcpy(sess->cipher.key, aead_xform->key.data,
282                    aead_xform->key.length);
283
284         if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
285                 sess->cipher.dir = CCP_CIPHER_DIR_ENCRYPT;
286                 sess->auth.op = CCP_AUTH_OP_GENERATE;
287         } else {
288                 sess->cipher.dir = CCP_CIPHER_DIR_DECRYPT;
289                 sess->auth.op = CCP_AUTH_OP_VERIFY;
290         }
291         sess->aead_algo = aead_xform->algo;
292         sess->auth.aad_length = aead_xform->aad_length;
293         sess->auth.digest_length = aead_xform->digest_length;
294
295         /* set iv parameters */
296         sess->iv.offset = aead_xform->iv.offset;
297         sess->iv.length = aead_xform->iv.length;
298
299         switch (aead_xform->algo) {
300         case RTE_CRYPTO_AEAD_AES_GCM:
301                 sess->cipher.algo = CCP_CIPHER_ALGO_AES_GCM;
302                 sess->cipher.um.aes_mode = CCP_AES_MODE_GCTR;
303                 sess->cipher.engine = CCP_ENGINE_AES;
304                 if (sess->cipher.key_length == 16)
305                         sess->cipher.ut.aes_type = CCP_AES_TYPE_128;
306                 else if (sess->cipher.key_length == 24)
307                         sess->cipher.ut.aes_type = CCP_AES_TYPE_192;
308                 else if (sess->cipher.key_length == 32)
309                         sess->cipher.ut.aes_type = CCP_AES_TYPE_256;
310                 else {
311                         CCP_LOG_ERR("Invalid aead key length");
312                         return -1;
313                 }
314                 for (i = 0; i < sess->cipher.key_length; i++)
315                         sess->cipher.key_ccp[sess->cipher.key_length - i - 1] =
316                                 sess->cipher.key[i];
317                 sess->auth.algo = CCP_AUTH_ALGO_AES_GCM;
318                 sess->auth.engine = CCP_ENGINE_AES;
319                 sess->auth.um.aes_mode = CCP_AES_MODE_GHASH;
320                 sess->auth.ctx_len = CCP_SB_BYTES;
321                 sess->auth.offset = 0;
322                 sess->auth.block_size = AES_BLOCK_SIZE;
323                 sess->cmd_id = CCP_CMD_COMBINED;
324                 break;
325         default:
326                 CCP_LOG_ERR("Unsupported aead algo");
327                 return -ENOTSUP;
328         }
329         sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
330         sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
331         return 0;
332 }
333
334 int
335 ccp_set_session_parameters(struct ccp_session *sess,
336                            const struct rte_crypto_sym_xform *xform)
337 {
338         const struct rte_crypto_sym_xform *cipher_xform = NULL;
339         const struct rte_crypto_sym_xform *auth_xform = NULL;
340         const struct rte_crypto_sym_xform *aead_xform = NULL;
341         int ret = 0;
342
343         sess->cmd_id = ccp_get_cmd_id(xform);
344
345         switch (sess->cmd_id) {
346         case CCP_CMD_CIPHER:
347                 cipher_xform = xform;
348                 break;
349         case CCP_CMD_AUTH:
350                 auth_xform = xform;
351                 break;
352         case CCP_CMD_CIPHER_HASH:
353                 cipher_xform = xform;
354                 auth_xform = xform->next;
355                 break;
356         case CCP_CMD_HASH_CIPHER:
357                 auth_xform = xform;
358                 cipher_xform = xform->next;
359                 break;
360         case CCP_CMD_COMBINED:
361                 aead_xform = xform;
362                 break;
363         default:
364                 CCP_LOG_ERR("Unsupported cmd_id");
365                 return -1;
366         }
367
368         /* Default IV length = 0 */
369         sess->iv.length = 0;
370         if (cipher_xform) {
371                 ret = ccp_configure_session_cipher(sess, cipher_xform);
372                 if (ret != 0) {
373                         CCP_LOG_ERR("Invalid/unsupported cipher parameters");
374                         return ret;
375                 }
376         }
377         if (auth_xform) {
378                 ret = ccp_configure_session_auth(sess, auth_xform);
379                 if (ret != 0) {
380                         CCP_LOG_ERR("Invalid/unsupported auth parameters");
381                         return ret;
382                 }
383         }
384         if (aead_xform) {
385                 ret = ccp_configure_session_aead(sess, aead_xform);
386                 if (ret != 0) {
387                         CCP_LOG_ERR("Invalid/unsupported aead parameters");
388                         return ret;
389                 }
390         }
391         return ret;
392 }
393
394 /* calculate CCP descriptors requirement */
395 static inline int
396 ccp_cipher_slot(struct ccp_session *session)
397 {
398         int count = 0;
399
400         switch (session->cipher.algo) {
401         case CCP_CIPHER_ALGO_AES_CBC:
402                 count = 2;
403                 /**< op + passthrough for iv */
404                 break;
405         case CCP_CIPHER_ALGO_AES_ECB:
406                 count = 1;
407                 /**<only op*/
408                 break;
409         case CCP_CIPHER_ALGO_AES_CTR:
410                 count = 2;
411                 /**< op + passthrough for iv */
412                 break;
413         case CCP_CIPHER_ALGO_3DES_CBC:
414                 count = 2;
415                 /**< op + passthrough for iv */
416                 break;
417         default:
418                 CCP_LOG_ERR("Unsupported cipher algo %d",
419                             session->cipher.algo);
420         }
421         return count;
422 }
423
424 static inline int
425 ccp_auth_slot(struct ccp_session *session)
426 {
427         int count = 0;
428
429         switch (session->auth.algo) {
430         case CCP_AUTH_ALGO_AES_CMAC:
431                 count = 4;
432                 /**
433                  * op
434                  * extra descriptor in padding case
435                  * (k1/k2(255:128) with iv(127:0))
436                  * Retrieve result
437                  */
438                 break;
439         default:
440                 CCP_LOG_ERR("Unsupported auth algo %d",
441                             session->auth.algo);
442         }
443
444         return count;
445 }
446
447 static int
448 ccp_aead_slot(struct ccp_session *session)
449 {
450         int count = 0;
451
452         switch (session->aead_algo) {
453         case RTE_CRYPTO_AEAD_AES_GCM:
454                 break;
455         default:
456                 CCP_LOG_ERR("Unsupported aead algo %d",
457                             session->aead_algo);
458         }
459         switch (session->auth.algo) {
460         case CCP_AUTH_ALGO_AES_GCM:
461                 count = 5;
462                 /**
463                  * 1. Passthru iv
464                  * 2. Hash AAD
465                  * 3. GCTR
466                  * 4. Reload passthru
467                  * 5. Hash Final tag
468                  */
469                 break;
470         default:
471                 CCP_LOG_ERR("Unsupported combined auth ALGO %d",
472                             session->auth.algo);
473         }
474         return count;
475 }
476
477 int
478 ccp_compute_slot_count(struct ccp_session *session)
479 {
480         int count = 0;
481
482         switch (session->cmd_id) {
483         case CCP_CMD_CIPHER:
484                 count = ccp_cipher_slot(session);
485                 break;
486         case CCP_CMD_AUTH:
487                 count = ccp_auth_slot(session);
488                 break;
489         case CCP_CMD_CIPHER_HASH:
490         case CCP_CMD_HASH_CIPHER:
491                 count = ccp_cipher_slot(session);
492                 count += ccp_auth_slot(session);
493                 break;
494         case CCP_CMD_COMBINED:
495                 count = ccp_aead_slot(session);
496                 break;
497         default:
498                 CCP_LOG_ERR("Unsupported cmd_id");
499
500         }
501
502         return count;
503 }
504
505 static void
506 ccp_perform_passthru(struct ccp_passthru *pst,
507                      struct ccp_queue *cmd_q)
508 {
509         struct ccp_desc *desc;
510         union ccp_function function;
511
512         desc = &cmd_q->qbase_desc[cmd_q->qidx];
513
514         CCP_CMD_ENGINE(desc) = CCP_ENGINE_PASSTHRU;
515
516         CCP_CMD_SOC(desc) = 0;
517         CCP_CMD_IOC(desc) = 0;
518         CCP_CMD_INIT(desc) = 0;
519         CCP_CMD_EOM(desc) = 0;
520         CCP_CMD_PROT(desc) = 0;
521
522         function.raw = 0;
523         CCP_PT_BYTESWAP(&function) = pst->byte_swap;
524         CCP_PT_BITWISE(&function) = pst->bit_mod;
525         CCP_CMD_FUNCTION(desc) = function.raw;
526
527         CCP_CMD_LEN(desc) = pst->len;
528
529         if (pst->dir) {
530                 CCP_CMD_SRC_LO(desc) = (uint32_t)(pst->src_addr);
531                 CCP_CMD_SRC_HI(desc) = high32_value(pst->src_addr);
532                 CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
533
534                 CCP_CMD_DST_LO(desc) = (uint32_t)(pst->dest_addr);
535                 CCP_CMD_DST_HI(desc) = 0;
536                 CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SB;
537
538                 if (pst->bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
539                         CCP_CMD_LSB_ID(desc) = cmd_q->sb_key;
540         } else {
541
542                 CCP_CMD_SRC_LO(desc) = (uint32_t)(pst->src_addr);
543                 CCP_CMD_SRC_HI(desc) = 0;
544                 CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SB;
545
546                 CCP_CMD_DST_LO(desc) = (uint32_t)(pst->dest_addr);
547                 CCP_CMD_DST_HI(desc) = high32_value(pst->dest_addr);
548                 CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
549         }
550
551         cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
552 }
553
554 static int
555 ccp_perform_aes_cmac(struct rte_crypto_op *op,
556                      struct ccp_queue *cmd_q)
557 {
558         struct ccp_session *session;
559         union ccp_function function;
560         struct ccp_passthru pst;
561         struct ccp_desc *desc;
562         uint32_t tail;
563         uint8_t *src_tb, *append_ptr, *ctx_addr;
564         phys_addr_t src_addr, dest_addr, key_addr;
565         int length, non_align_len;
566
567         session = (struct ccp_session *)get_session_private_data(
568                                          op->sym->session,
569                                         ccp_cryptodev_driver_id);
570         key_addr = rte_mem_virt2phy(session->auth.key_ccp);
571
572         src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
573                                               op->sym->auth.data.offset);
574         append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
575                                                 session->auth.ctx_len);
576         dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
577
578         function.raw = 0;
579         CCP_AES_ENCRYPT(&function) = CCP_CIPHER_DIR_ENCRYPT;
580         CCP_AES_MODE(&function) = session->auth.um.aes_mode;
581         CCP_AES_TYPE(&function) = session->auth.ut.aes_type;
582
583         if (op->sym->auth.data.length % session->auth.block_size == 0) {
584
585                 ctx_addr = session->auth.pre_compute;
586                 memset(ctx_addr, 0, AES_BLOCK_SIZE);
587                 pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
588                 pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
589                 pst.len = CCP_SB_BYTES;
590                 pst.dir = 1;
591                 pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
592                 pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
593                 ccp_perform_passthru(&pst, cmd_q);
594
595                 desc = &cmd_q->qbase_desc[cmd_q->qidx];
596                 memset(desc, 0, Q_DESC_SIZE);
597
598                 /* prepare desc for aes-cmac command */
599                 CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
600                 CCP_CMD_EOM(desc) = 1;
601                 CCP_CMD_FUNCTION(desc) = function.raw;
602
603                 CCP_CMD_LEN(desc) = op->sym->auth.data.length;
604                 CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
605                 CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
606                 CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
607
608                 CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
609                 CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
610                 CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
611                 CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
612
613                 cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
614
615                 rte_wmb();
616
617                 tail =
618                 (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
619                 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
620                 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
621                               cmd_q->qcontrol | CMD_Q_RUN);
622         } else {
623                 ctx_addr = session->auth.pre_compute + CCP_SB_BYTES;
624                 memset(ctx_addr, 0, AES_BLOCK_SIZE);
625                 pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
626                 pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
627                 pst.len = CCP_SB_BYTES;
628                 pst.dir = 1;
629                 pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
630                 pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
631                 ccp_perform_passthru(&pst, cmd_q);
632
633                 length = (op->sym->auth.data.length / AES_BLOCK_SIZE);
634                 length *= AES_BLOCK_SIZE;
635                 non_align_len = op->sym->auth.data.length - length;
636                 /* prepare desc for aes-cmac command */
637                 /*Command 1*/
638                 desc = &cmd_q->qbase_desc[cmd_q->qidx];
639                 memset(desc, 0, Q_DESC_SIZE);
640
641                 CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
642                 CCP_CMD_INIT(desc) = 1;
643                 CCP_CMD_FUNCTION(desc) = function.raw;
644
645                 CCP_CMD_LEN(desc) = length;
646                 CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
647                 CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
648                 CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
649
650                 CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
651                 CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
652                 CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
653                 CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
654
655                 cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
656
657                 /*Command 2*/
658                 append_ptr = append_ptr + CCP_SB_BYTES;
659                 memset(append_ptr, 0, AES_BLOCK_SIZE);
660                 src_tb = rte_pktmbuf_mtod_offset(op->sym->m_src,
661                                                  uint8_t *,
662                                                  op->sym->auth.data.offset +
663                                                  length);
664                 rte_memcpy(append_ptr, src_tb, non_align_len);
665                 append_ptr[non_align_len] = CMAC_PAD_VALUE;
666
667                 desc = &cmd_q->qbase_desc[cmd_q->qidx];
668                 memset(desc, 0, Q_DESC_SIZE);
669
670                 CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
671                 CCP_CMD_EOM(desc) = 1;
672                 CCP_CMD_FUNCTION(desc) = function.raw;
673                 CCP_CMD_LEN(desc) = AES_BLOCK_SIZE;
674
675                 CCP_CMD_SRC_LO(desc) = ((uint32_t)(dest_addr + CCP_SB_BYTES));
676                 CCP_CMD_SRC_HI(desc) = high32_value(dest_addr + CCP_SB_BYTES);
677                 CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
678
679                 CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
680                 CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
681                 CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
682                 CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
683
684                 cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
685
686                 rte_wmb();
687                 tail =
688                 (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
689                 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
690                 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
691                               cmd_q->qcontrol | CMD_Q_RUN);
692         }
693         /* Retrieve result */
694         pst.dest_addr = dest_addr;
695         pst.src_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
696         pst.len = CCP_SB_BYTES;
697         pst.dir = 0;
698         pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
699         pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
700         ccp_perform_passthru(&pst, cmd_q);
701
702         op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
703         return 0;
704 }
705
706 static int
707 ccp_perform_aes(struct rte_crypto_op *op,
708                 struct ccp_queue *cmd_q,
709                 struct ccp_batch_info *b_info)
710 {
711         struct ccp_session *session;
712         union ccp_function function;
713         uint8_t *lsb_buf;
714         struct ccp_passthru pst = {0};
715         struct ccp_desc *desc;
716         phys_addr_t src_addr, dest_addr, key_addr;
717         uint8_t *iv;
718
719         session = (struct ccp_session *)get_session_private_data(
720                                          op->sym->session,
721                                         ccp_cryptodev_driver_id);
722         function.raw = 0;
723
724         iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
725         if (session->cipher.um.aes_mode != CCP_AES_MODE_ECB) {
726                 if (session->cipher.um.aes_mode == CCP_AES_MODE_CTR) {
727                         rte_memcpy(session->cipher.nonce + AES_BLOCK_SIZE,
728                                    iv, session->iv.length);
729                         pst.src_addr = (phys_addr_t)session->cipher.nonce_phys;
730                         CCP_AES_SIZE(&function) = 0x1F;
731                 } else {
732                         lsb_buf =
733                         &(b_info->lsb_buf[b_info->lsb_buf_idx*CCP_SB_BYTES]);
734                         rte_memcpy(lsb_buf +
735                                    (CCP_SB_BYTES - session->iv.length),
736                                    iv, session->iv.length);
737                         pst.src_addr = b_info->lsb_buf_phys +
738                                 (b_info->lsb_buf_idx * CCP_SB_BYTES);
739                         b_info->lsb_buf_idx++;
740                 }
741
742                 pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
743                 pst.len = CCP_SB_BYTES;
744                 pst.dir = 1;
745                 pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
746                 pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
747                 ccp_perform_passthru(&pst, cmd_q);
748         }
749
750         desc = &cmd_q->qbase_desc[cmd_q->qidx];
751
752         src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
753                                               op->sym->cipher.data.offset);
754         if (likely(op->sym->m_dst != NULL))
755                 dest_addr = rte_pktmbuf_mtophys_offset(op->sym->m_dst,
756                                                 op->sym->cipher.data.offset);
757         else
758                 dest_addr = src_addr;
759         key_addr = session->cipher.key_phys;
760
761         /* prepare desc for aes command */
762         CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
763         CCP_CMD_INIT(desc) = 1;
764         CCP_CMD_EOM(desc) = 1;
765
766         CCP_AES_ENCRYPT(&function) = session->cipher.dir;
767         CCP_AES_MODE(&function) = session->cipher.um.aes_mode;
768         CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
769         CCP_CMD_FUNCTION(desc) = function.raw;
770
771         CCP_CMD_LEN(desc) = op->sym->cipher.data.length;
772
773         CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
774         CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
775         CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
776
777         CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
778         CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
779         CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
780
781         CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
782         CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
783         CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
784
785         if (session->cipher.um.aes_mode != CCP_AES_MODE_ECB)
786                 CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
787
788         cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
789         op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
790         return 0;
791 }
792
793 static int
794 ccp_perform_3des(struct rte_crypto_op *op,
795                 struct ccp_queue *cmd_q,
796                 struct ccp_batch_info *b_info)
797 {
798         struct ccp_session *session;
799         union ccp_function function;
800         unsigned char *lsb_buf;
801         struct ccp_passthru pst;
802         struct ccp_desc *desc;
803         uint32_t tail;
804         uint8_t *iv;
805         phys_addr_t src_addr, dest_addr, key_addr;
806
807         session = (struct ccp_session *)get_session_private_data(
808                                          op->sym->session,
809                                         ccp_cryptodev_driver_id);
810
811         iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
812         switch (session->cipher.um.des_mode) {
813         case CCP_DES_MODE_CBC:
814                 lsb_buf = &(b_info->lsb_buf[b_info->lsb_buf_idx*CCP_SB_BYTES]);
815                 b_info->lsb_buf_idx++;
816
817                 rte_memcpy(lsb_buf + (CCP_SB_BYTES - session->iv.length),
818                            iv, session->iv.length);
819
820                 pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *) lsb_buf);
821                 pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
822                 pst.len = CCP_SB_BYTES;
823                 pst.dir = 1;
824                 pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
825                 pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
826                 ccp_perform_passthru(&pst, cmd_q);
827                 break;
828         case CCP_DES_MODE_CFB:
829         case CCP_DES_MODE_ECB:
830                 CCP_LOG_ERR("Unsupported DES cipher mode");
831                 return -ENOTSUP;
832         }
833
834         src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
835                                               op->sym->cipher.data.offset);
836         if (unlikely(op->sym->m_dst != NULL))
837                 dest_addr =
838                         rte_pktmbuf_mtophys_offset(op->sym->m_dst,
839                                                    op->sym->cipher.data.offset);
840         else
841                 dest_addr = src_addr;
842
843         key_addr = rte_mem_virt2phy(session->cipher.key_ccp);
844
845         desc = &cmd_q->qbase_desc[cmd_q->qidx];
846
847         memset(desc, 0, Q_DESC_SIZE);
848
849         /* prepare desc for des command */
850         CCP_CMD_ENGINE(desc) = CCP_ENGINE_3DES;
851
852         CCP_CMD_SOC(desc) = 0;
853         CCP_CMD_IOC(desc) = 0;
854         CCP_CMD_INIT(desc) = 1;
855         CCP_CMD_EOM(desc) = 1;
856         CCP_CMD_PROT(desc) = 0;
857
858         function.raw = 0;
859         CCP_DES_ENCRYPT(&function) = session->cipher.dir;
860         CCP_DES_MODE(&function) = session->cipher.um.des_mode;
861         CCP_DES_TYPE(&function) = session->cipher.ut.des_type;
862         CCP_CMD_FUNCTION(desc) = function.raw;
863
864         CCP_CMD_LEN(desc) = op->sym->cipher.data.length;
865
866         CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
867         CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
868         CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
869
870         CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
871         CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
872         CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
873
874         CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
875         CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
876         CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
877
878         if (session->cipher.um.des_mode)
879                 CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
880
881         cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
882
883         rte_wmb();
884
885         /* Write the new tail address back to the queue register */
886         tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
887         CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
888         /* Turn the queue back on using our cached control register */
889         CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
890                       cmd_q->qcontrol | CMD_Q_RUN);
891
892         op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
893         return 0;
894 }
895
896 static int
897 ccp_perform_aes_gcm(struct rte_crypto_op *op, struct ccp_queue *cmd_q)
898 {
899         struct ccp_session *session;
900         union ccp_function function;
901         uint8_t *iv;
902         struct ccp_passthru pst;
903         struct ccp_desc *desc;
904         uint32_t tail;
905         uint64_t *temp;
906         phys_addr_t src_addr, dest_addr, key_addr, aad_addr;
907         phys_addr_t digest_dest_addr;
908         int length, non_align_len;
909
910         session = (struct ccp_session *)get_session_private_data(
911                                          op->sym->session,
912                                          ccp_cryptodev_driver_id);
913         iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
914         key_addr = session->cipher.key_phys;
915
916         src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
917                                               op->sym->aead.data.offset);
918         if (unlikely(op->sym->m_dst != NULL))
919                 dest_addr = rte_pktmbuf_mtophys_offset(op->sym->m_dst,
920                                                 op->sym->aead.data.offset);
921         else
922                 dest_addr = src_addr;
923         rte_pktmbuf_append(op->sym->m_src, session->auth.ctx_len);
924         digest_dest_addr = op->sym->aead.digest.phys_addr;
925         temp = (uint64_t *)(op->sym->aead.digest.data + AES_BLOCK_SIZE);
926         *temp++ = rte_bswap64(session->auth.aad_length << 3);
927         *temp = rte_bswap64(op->sym->aead.data.length << 3);
928
929         non_align_len = op->sym->aead.data.length % AES_BLOCK_SIZE;
930         length = CCP_ALIGN(op->sym->aead.data.length, AES_BLOCK_SIZE);
931
932         aad_addr = op->sym->aead.aad.phys_addr;
933
934         /* CMD1 IV Passthru */
935         rte_memcpy(session->cipher.nonce + AES_BLOCK_SIZE, iv,
936                    session->iv.length);
937         pst.src_addr = session->cipher.nonce_phys;
938         pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
939         pst.len = CCP_SB_BYTES;
940         pst.dir = 1;
941         pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
942         pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
943         ccp_perform_passthru(&pst, cmd_q);
944
945         /* CMD2 GHASH-AAD */
946         function.raw = 0;
947         CCP_AES_ENCRYPT(&function) = CCP_AES_MODE_GHASH_AAD;
948         CCP_AES_MODE(&function) = CCP_AES_MODE_GHASH;
949         CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
950
951         desc = &cmd_q->qbase_desc[cmd_q->qidx];
952         memset(desc, 0, Q_DESC_SIZE);
953
954         CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
955         CCP_CMD_INIT(desc) = 1;
956         CCP_CMD_FUNCTION(desc) = function.raw;
957
958         CCP_CMD_LEN(desc) = session->auth.aad_length;
959
960         CCP_CMD_SRC_LO(desc) = ((uint32_t)aad_addr);
961         CCP_CMD_SRC_HI(desc) = high32_value(aad_addr);
962         CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
963
964         CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
965         CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
966         CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
967
968         CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
969
970         cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
971         rte_wmb();
972
973         tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
974         CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
975         CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
976                       cmd_q->qcontrol | CMD_Q_RUN);
977
978         /* CMD3 : GCTR Plain text */
979         function.raw = 0;
980         CCP_AES_ENCRYPT(&function) = session->cipher.dir;
981         CCP_AES_MODE(&function) = CCP_AES_MODE_GCTR;
982         CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
983         if (non_align_len == 0)
984                 CCP_AES_SIZE(&function) = (AES_BLOCK_SIZE << 3) - 1;
985         else
986                 CCP_AES_SIZE(&function) = (non_align_len << 3) - 1;
987
988
989         desc = &cmd_q->qbase_desc[cmd_q->qidx];
990         memset(desc, 0, Q_DESC_SIZE);
991
992         CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
993         CCP_CMD_EOM(desc) = 1;
994         CCP_CMD_FUNCTION(desc) = function.raw;
995
996         CCP_CMD_LEN(desc) = length;
997
998         CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
999         CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
1000         CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
1001
1002         CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
1003         CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
1004         CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
1005
1006         CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
1007         CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
1008         CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
1009
1010         CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
1011
1012         cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
1013         rte_wmb();
1014
1015         tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
1016         CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
1017         CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
1018                       cmd_q->qcontrol | CMD_Q_RUN);
1019
1020         /* CMD4 : PT to copy IV */
1021         pst.src_addr = session->cipher.nonce_phys;
1022         pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
1023         pst.len = AES_BLOCK_SIZE;
1024         pst.dir = 1;
1025         pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
1026         pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
1027         ccp_perform_passthru(&pst, cmd_q);
1028
1029         /* CMD5 : GHASH-Final */
1030         function.raw = 0;
1031         CCP_AES_ENCRYPT(&function) = CCP_AES_MODE_GHASH_FINAL;
1032         CCP_AES_MODE(&function) = CCP_AES_MODE_GHASH;
1033         CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
1034
1035         desc = &cmd_q->qbase_desc[cmd_q->qidx];
1036         memset(desc, 0, Q_DESC_SIZE);
1037
1038         CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
1039         CCP_CMD_FUNCTION(desc) = function.raw;
1040         /* Last block (AAD_len || PT_len)*/
1041         CCP_CMD_LEN(desc) = AES_BLOCK_SIZE;
1042
1043         CCP_CMD_SRC_LO(desc) = ((uint32_t)digest_dest_addr + AES_BLOCK_SIZE);
1044         CCP_CMD_SRC_HI(desc) = high32_value(digest_dest_addr + AES_BLOCK_SIZE);
1045         CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
1046
1047         CCP_CMD_DST_LO(desc) = ((uint32_t)digest_dest_addr);
1048         CCP_CMD_DST_HI(desc) = high32_value(digest_dest_addr);
1049         CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
1050
1051         CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
1052         CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
1053         CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
1054
1055         CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
1056
1057         cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
1058         rte_wmb();
1059
1060         tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
1061         CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
1062         CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
1063                       cmd_q->qcontrol | CMD_Q_RUN);
1064
1065         op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
1066         return 0;
1067 }
1068
1069 static inline int
1070 ccp_crypto_cipher(struct rte_crypto_op *op,
1071                   struct ccp_queue *cmd_q,
1072                   struct ccp_batch_info *b_info)
1073 {
1074         int result = 0;
1075         struct ccp_session *session;
1076
1077         session = (struct ccp_session *)get_session_private_data(
1078                                          op->sym->session,
1079                                          ccp_cryptodev_driver_id);
1080
1081         switch (session->cipher.algo) {
1082         case CCP_CIPHER_ALGO_AES_CBC:
1083                 result = ccp_perform_aes(op, cmd_q, b_info);
1084                 b_info->desccnt += 2;
1085                 break;
1086         case CCP_CIPHER_ALGO_AES_CTR:
1087                 result = ccp_perform_aes(op, cmd_q, b_info);
1088                 b_info->desccnt += 2;
1089                 break;
1090         case CCP_CIPHER_ALGO_AES_ECB:
1091                 result = ccp_perform_aes(op, cmd_q, b_info);
1092                 b_info->desccnt += 1;
1093                 break;
1094         case CCP_CIPHER_ALGO_3DES_CBC:
1095                 result = ccp_perform_3des(op, cmd_q, b_info);
1096                 b_info->desccnt += 2;
1097                 break;
1098         default:
1099                 CCP_LOG_ERR("Unsupported cipher algo %d",
1100                             session->cipher.algo);
1101                 return -ENOTSUP;
1102         }
1103         return result;
1104 }
1105
1106 static inline int
1107 ccp_crypto_auth(struct rte_crypto_op *op,
1108                 struct ccp_queue *cmd_q,
1109                 struct ccp_batch_info *b_info)
1110 {
1111
1112         int result = 0;
1113         struct ccp_session *session;
1114
1115         session = (struct ccp_session *)get_session_private_data(
1116                                          op->sym->session,
1117                                         ccp_cryptodev_driver_id);
1118
1119         switch (session->auth.algo) {
1120         case CCP_AUTH_ALGO_AES_CMAC:
1121                 result = ccp_perform_aes_cmac(op, cmd_q);
1122                 b_info->desccnt += 4;
1123                 break;
1124         default:
1125                 CCP_LOG_ERR("Unsupported auth algo %d",
1126                             session->auth.algo);
1127                 return -ENOTSUP;
1128         }
1129
1130         return result;
1131 }
1132
1133 static inline int
1134 ccp_crypto_aead(struct rte_crypto_op *op,
1135                 struct ccp_queue *cmd_q,
1136                 struct ccp_batch_info *b_info)
1137 {
1138         int result = 0;
1139         struct ccp_session *session;
1140
1141         session = (struct ccp_session *)get_session_private_data(
1142                                         op->sym->session,
1143                                         ccp_cryptodev_driver_id);
1144
1145         switch (session->auth.algo) {
1146         case CCP_AUTH_ALGO_AES_GCM:
1147                 if (session->cipher.algo != CCP_CIPHER_ALGO_AES_GCM) {
1148                         CCP_LOG_ERR("Incorrect chain order");
1149                         return -1;
1150                 }
1151                 result = ccp_perform_aes_gcm(op, cmd_q);
1152                 b_info->desccnt += 5;
1153                 break;
1154         default:
1155                 CCP_LOG_ERR("Unsupported aead algo %d",
1156                             session->aead_algo);
1157                 return -ENOTSUP;
1158         }
1159         return result;
1160 }
1161
1162 int
1163 process_ops_to_enqueue(const struct ccp_qp *qp,
1164                        struct rte_crypto_op **op,
1165                        struct ccp_queue *cmd_q,
1166                        uint16_t nb_ops,
1167                        int slots_req)
1168 {
1169         int i, result = 0;
1170         struct ccp_batch_info *b_info;
1171         struct ccp_session *session;
1172
1173         if (rte_mempool_get(qp->batch_mp, (void **)&b_info)) {
1174                 CCP_LOG_ERR("batch info allocation failed");
1175                 return 0;
1176         }
1177         /* populate batch info necessary for dequeue */
1178         b_info->op_idx = 0;
1179         b_info->lsb_buf_idx = 0;
1180         b_info->desccnt = 0;
1181         b_info->cmd_q = cmd_q;
1182         b_info->lsb_buf_phys =
1183                 (phys_addr_t)rte_mem_virt2phy((void *)b_info->lsb_buf);
1184         rte_atomic64_sub(&b_info->cmd_q->free_slots, slots_req);
1185
1186         b_info->head_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx *
1187                                          Q_DESC_SIZE);
1188         for (i = 0; i < nb_ops; i++) {
1189                 session = (struct ccp_session *)get_session_private_data(
1190                                                  op[i]->sym->session,
1191                                                  ccp_cryptodev_driver_id);
1192                 switch (session->cmd_id) {
1193                 case CCP_CMD_CIPHER:
1194                         result = ccp_crypto_cipher(op[i], cmd_q, b_info);
1195                         break;
1196                 case CCP_CMD_AUTH:
1197                         result = ccp_crypto_auth(op[i], cmd_q, b_info);
1198                         break;
1199                 case CCP_CMD_CIPHER_HASH:
1200                         result = ccp_crypto_cipher(op[i], cmd_q, b_info);
1201                         if (result)
1202                                 break;
1203                         result = ccp_crypto_auth(op[i], cmd_q, b_info);
1204                         break;
1205                 case CCP_CMD_HASH_CIPHER:
1206                         result = ccp_crypto_auth(op[i], cmd_q, b_info);
1207                         if (result)
1208                                 break;
1209                         result = ccp_crypto_cipher(op[i], cmd_q, b_info);
1210                         break;
1211                 case CCP_CMD_COMBINED:
1212                         result = ccp_crypto_aead(op[i], cmd_q, b_info);
1213                         break;
1214                 default:
1215                         CCP_LOG_ERR("Unsupported cmd_id");
1216                         result = -1;
1217                 }
1218                 if (unlikely(result < 0)) {
1219                         rte_atomic64_add(&b_info->cmd_q->free_slots,
1220                                          (slots_req - b_info->desccnt));
1221                         break;
1222                 }
1223                 b_info->op[i] = op[i];
1224         }
1225
1226         b_info->opcnt = i;
1227         b_info->tail_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx *
1228                                          Q_DESC_SIZE);
1229
1230         rte_wmb();
1231         /* Write the new tail address back to the queue register */
1232         CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE,
1233                               b_info->tail_offset);
1234         /* Turn the queue back on using our cached control register */
1235         CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
1236                               cmd_q->qcontrol | CMD_Q_RUN);
1237
1238         rte_ring_enqueue(qp->processed_pkts, (void *)b_info);
1239
1240         return i;
1241 }
1242
1243 static inline void ccp_auth_dq_prepare(struct rte_crypto_op *op)
1244 {
1245         struct ccp_session *session;
1246         uint8_t *digest_data, *addr;
1247         struct rte_mbuf *m_last;
1248         int offset, digest_offset;
1249         uint8_t digest_le[64];
1250
1251         session = (struct ccp_session *)get_session_private_data(
1252                                          op->sym->session,
1253                                         ccp_cryptodev_driver_id);
1254
1255         if (session->cmd_id == CCP_CMD_COMBINED) {
1256                 digest_data = op->sym->aead.digest.data;
1257                 digest_offset = op->sym->aead.data.offset +
1258                                         op->sym->aead.data.length;
1259         } else {
1260                 digest_data = op->sym->auth.digest.data;
1261                 digest_offset = op->sym->auth.data.offset +
1262                                         op->sym->auth.data.length;
1263         }
1264         m_last = rte_pktmbuf_lastseg(op->sym->m_src);
1265         addr = (uint8_t *)((char *)m_last->buf_addr + m_last->data_off +
1266                            m_last->data_len - session->auth.ctx_len);
1267
1268         rte_mb();
1269         offset = session->auth.offset;
1270
1271         if (session->auth.engine == CCP_ENGINE_SHA)
1272                 if ((session->auth.ut.sha_type != CCP_SHA_TYPE_1) &&
1273                     (session->auth.ut.sha_type != CCP_SHA_TYPE_224) &&
1274                     (session->auth.ut.sha_type != CCP_SHA_TYPE_256)) {
1275                         /* All other algorithms require byte
1276                          * swap done by host
1277                          */
1278                         unsigned int i;
1279
1280                         offset = session->auth.ctx_len -
1281                                 session->auth.offset - 1;
1282                         for (i = 0; i < session->auth.digest_length; i++)
1283                                 digest_le[i] = addr[offset - i];
1284                         offset = 0;
1285                         addr = digest_le;
1286                 }
1287
1288         op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1289         if (session->auth.op == CCP_AUTH_OP_VERIFY) {
1290                 if (memcmp(addr + offset, digest_data,
1291                            session->auth.digest_length) != 0)
1292                         op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
1293
1294         } else {
1295                 if (unlikely(digest_data == 0))
1296                         digest_data = rte_pktmbuf_mtod_offset(
1297                                         op->sym->m_dst, uint8_t *,
1298                                         digest_offset);
1299                 rte_memcpy(digest_data, addr + offset,
1300                            session->auth.digest_length);
1301         }
1302         /* Trim area used for digest from mbuf. */
1303         rte_pktmbuf_trim(op->sym->m_src,
1304                          session->auth.ctx_len);
1305 }
1306
1307 static int
1308 ccp_prepare_ops(struct rte_crypto_op **op_d,
1309                 struct ccp_batch_info *b_info,
1310                 uint16_t nb_ops)
1311 {
1312         int i, min_ops;
1313         struct ccp_session *session;
1314
1315         min_ops = RTE_MIN(nb_ops, b_info->opcnt);
1316
1317         for (i = 0; i < min_ops; i++) {
1318                 op_d[i] = b_info->op[b_info->op_idx++];
1319                 session = (struct ccp_session *)get_session_private_data(
1320                                                  op_d[i]->sym->session,
1321                                                 ccp_cryptodev_driver_id);
1322                 switch (session->cmd_id) {
1323                 case CCP_CMD_CIPHER:
1324                         op_d[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1325                         break;
1326                 case CCP_CMD_AUTH:
1327                 case CCP_CMD_CIPHER_HASH:
1328                 case CCP_CMD_HASH_CIPHER:
1329                 case CCP_CMD_COMBINED:
1330                         ccp_auth_dq_prepare(op_d[i]);
1331                         break;
1332                 default:
1333                         CCP_LOG_ERR("Unsupported cmd_id");
1334                 }
1335         }
1336
1337         b_info->opcnt -= min_ops;
1338         return min_ops;
1339 }
1340
1341 int
1342 process_ops_to_dequeue(struct ccp_qp *qp,
1343                        struct rte_crypto_op **op,
1344                        uint16_t nb_ops)
1345 {
1346         struct ccp_batch_info *b_info;
1347         uint32_t cur_head_offset;
1348
1349         if (qp->b_info != NULL) {
1350                 b_info = qp->b_info;
1351                 if (unlikely(b_info->op_idx > 0))
1352                         goto success;
1353         } else if (rte_ring_dequeue(qp->processed_pkts,
1354                                     (void **)&b_info))
1355                 return 0;
1356         cur_head_offset = CCP_READ_REG(b_info->cmd_q->reg_base,
1357                                        CMD_Q_HEAD_LO_BASE);
1358
1359         if (b_info->head_offset < b_info->tail_offset) {
1360                 if ((cur_head_offset >= b_info->head_offset) &&
1361                     (cur_head_offset < b_info->tail_offset)) {
1362                         qp->b_info = b_info;
1363                         return 0;
1364                 }
1365         } else {
1366                 if ((cur_head_offset >= b_info->head_offset) ||
1367                     (cur_head_offset < b_info->tail_offset)) {
1368                         qp->b_info = b_info;
1369                         return 0;
1370                 }
1371         }
1372
1373
1374 success:
1375         nb_ops = ccp_prepare_ops(op, b_info, nb_ops);
1376         rte_atomic64_add(&b_info->cmd_q->free_slots, b_info->desccnt);
1377         b_info->desccnt = 0;
1378         if (b_info->opcnt > 0) {
1379                 qp->b_info = b_info;
1380         } else {
1381                 rte_mempool_put(qp->batch_mp, (void *)b_info);
1382                 qp->b_info = NULL;
1383         }
1384
1385         return nb_ops;
1386 }