517c2846223ff9469e8347042a4dc0b62dab19b6
[dpdk.git] / drivers / crypto / ccp / ccp_crypto.c
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
3  */
4
5 #include <dirent.h>
6 #include <fcntl.h>
7 #include <stdio.h>
8 #include <string.h>
9 #include <sys/mman.h>
10 #include <sys/queue.h>
11 #include <sys/types.h>
12 #include <unistd.h>
13 #include <openssl/sha.h>
14 #include <openssl/cmac.h> /*sub key apis*/
15 #include <openssl/evp.h> /*sub key apis*/
16
17 #include <rte_hexdump.h>
18 #include <rte_memzone.h>
19 #include <rte_malloc.h>
20 #include <rte_memory.h>
21 #include <rte_spinlock.h>
22 #include <rte_string_fns.h>
23 #include <rte_cryptodev_pmd.h>
24
25 #include "ccp_dev.h"
26 #include "ccp_crypto.h"
27 #include "ccp_pci.h"
28 #include "ccp_pmd_private.h"
29
30 /* SHA initial context values */
31 static uint32_t ccp_sha1_init[SHA_COMMON_DIGEST_SIZE / sizeof(uint32_t)] = {
32         SHA1_H4, SHA1_H3,
33         SHA1_H2, SHA1_H1,
34         SHA1_H0, 0x0U,
35         0x0U, 0x0U,
36 };
37
38 static enum ccp_cmd_order
39 ccp_get_cmd_id(const struct rte_crypto_sym_xform *xform)
40 {
41         enum ccp_cmd_order res = CCP_CMD_NOT_SUPPORTED;
42
43         if (xform == NULL)
44                 return res;
45         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
46                 if (xform->next == NULL)
47                         return CCP_CMD_AUTH;
48                 else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
49                         return CCP_CMD_HASH_CIPHER;
50         }
51         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
52                 if (xform->next == NULL)
53                         return CCP_CMD_CIPHER;
54                 else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
55                         return CCP_CMD_CIPHER_HASH;
56         }
57         if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
58                 return CCP_CMD_COMBINED;
59         return res;
60 }
61
62 /* partial hash using openssl */
63 static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
64 {
65         SHA_CTX ctx;
66
67         if (!SHA1_Init(&ctx))
68                 return -EFAULT;
69         SHA1_Transform(&ctx, data_in);
70         rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
71         return 0;
72 }
73
74 static int generate_partial_hash(struct ccp_session *sess)
75 {
76
77         uint8_t ipad[sess->auth.block_size];
78         uint8_t opad[sess->auth.block_size];
79         uint8_t *ipad_t, *opad_t;
80         uint32_t *hash_value_be32, hash_temp32[8];
81         int i, count;
82
83         opad_t = ipad_t = (uint8_t *)sess->auth.key;
84
85         hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute);
86
87         /* considering key size is always equal to block size of algorithm */
88         for (i = 0; i < sess->auth.block_size; i++) {
89                 ipad[i] = (ipad_t[i] ^ HMAC_IPAD_VALUE);
90                 opad[i] = (opad_t[i] ^ HMAC_OPAD_VALUE);
91         }
92
93         switch (sess->auth.algo) {
94         case CCP_AUTH_ALGO_SHA1_HMAC:
95                 count = SHA1_DIGEST_SIZE >> 2;
96
97                 if (partial_hash_sha1(ipad, (uint8_t *)hash_temp32))
98                         return -1;
99                 for (i = 0; i < count; i++, hash_value_be32++)
100                         *hash_value_be32 = hash_temp32[count - 1 - i];
101
102                 hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute
103                                                + sess->auth.ctx_len);
104                 if (partial_hash_sha1(opad, (uint8_t *)hash_temp32))
105                         return -1;
106                 for (i = 0; i < count; i++, hash_value_be32++)
107                         *hash_value_be32 = hash_temp32[count - 1 - i];
108                 return 0;
109         default:
110                 CCP_LOG_ERR("Invalid auth algo");
111                 return -1;
112         }
113 }
114
115 /* prepare temporary keys K1 and K2 */
116 static void prepare_key(unsigned char *k, unsigned char *l, int bl)
117 {
118         int i;
119         /* Shift block to left, including carry */
120         for (i = 0; i < bl; i++) {
121                 k[i] = l[i] << 1;
122                 if (i < bl - 1 && l[i + 1] & 0x80)
123                         k[i] |= 1;
124         }
125         /* If MSB set fixup with R */
126         if (l[0] & 0x80)
127                 k[bl - 1] ^= bl == 16 ? 0x87 : 0x1b;
128 }
129
130 /* subkeys K1 and K2 generation for CMAC */
131 static int
132 generate_cmac_subkeys(struct ccp_session *sess)
133 {
134         const EVP_CIPHER *algo;
135         EVP_CIPHER_CTX *ctx;
136         unsigned char *ccp_ctx;
137         size_t i;
138         int dstlen, totlen;
139         unsigned char zero_iv[AES_BLOCK_SIZE] = {0};
140         unsigned char dst[2 * AES_BLOCK_SIZE] = {0};
141         unsigned char k1[AES_BLOCK_SIZE] = {0};
142         unsigned char k2[AES_BLOCK_SIZE] = {0};
143
144         if (sess->auth.ut.aes_type == CCP_AES_TYPE_128)
145                 algo =  EVP_aes_128_cbc();
146         else if (sess->auth.ut.aes_type == CCP_AES_TYPE_192)
147                 algo =  EVP_aes_192_cbc();
148         else if (sess->auth.ut.aes_type == CCP_AES_TYPE_256)
149                 algo =  EVP_aes_256_cbc();
150         else {
151                 CCP_LOG_ERR("Invalid CMAC type length");
152                 return -1;
153         }
154
155         ctx = EVP_CIPHER_CTX_new();
156         if (!ctx) {
157                 CCP_LOG_ERR("ctx creation failed");
158                 return -1;
159         }
160         if (EVP_EncryptInit(ctx, algo, (unsigned char *)sess->auth.key,
161                             (unsigned char *)zero_iv) <= 0)
162                 goto key_generate_err;
163         if (EVP_CIPHER_CTX_set_padding(ctx, 0) <= 0)
164                 goto key_generate_err;
165         if (EVP_EncryptUpdate(ctx, dst, &dstlen, zero_iv,
166                               AES_BLOCK_SIZE) <= 0)
167                 goto key_generate_err;
168         if (EVP_EncryptFinal_ex(ctx, dst + dstlen, &totlen) <= 0)
169                 goto key_generate_err;
170
171         memset(sess->auth.pre_compute, 0, CCP_SB_BYTES * 2);
172
173         ccp_ctx = (unsigned char *)(sess->auth.pre_compute + CCP_SB_BYTES - 1);
174         prepare_key(k1, dst, AES_BLOCK_SIZE);
175         for (i = 0; i < AES_BLOCK_SIZE;  i++, ccp_ctx--)
176                 *ccp_ctx = k1[i];
177
178         ccp_ctx = (unsigned char *)(sess->auth.pre_compute +
179                                    (2 * CCP_SB_BYTES) - 1);
180         prepare_key(k2, k1, AES_BLOCK_SIZE);
181         for (i = 0; i < AES_BLOCK_SIZE;  i++, ccp_ctx--)
182                 *ccp_ctx = k2[i];
183
184         EVP_CIPHER_CTX_free(ctx);
185
186         return 0;
187
188 key_generate_err:
189         CCP_LOG_ERR("CMAC Init failed");
190                 return -1;
191 }
192
193 /* configure session */
194 static int
195 ccp_configure_session_cipher(struct ccp_session *sess,
196                              const struct rte_crypto_sym_xform *xform)
197 {
198         const struct rte_crypto_cipher_xform *cipher_xform = NULL;
199         size_t i, j, x;
200
201         cipher_xform = &xform->cipher;
202
203         /* set cipher direction */
204         if (cipher_xform->op ==  RTE_CRYPTO_CIPHER_OP_ENCRYPT)
205                 sess->cipher.dir = CCP_CIPHER_DIR_ENCRYPT;
206         else
207                 sess->cipher.dir = CCP_CIPHER_DIR_DECRYPT;
208
209         /* set cipher key */
210         sess->cipher.key_length = cipher_xform->key.length;
211         rte_memcpy(sess->cipher.key, cipher_xform->key.data,
212                    cipher_xform->key.length);
213
214         /* set iv parameters */
215         sess->iv.offset = cipher_xform->iv.offset;
216         sess->iv.length = cipher_xform->iv.length;
217
218         switch (cipher_xform->algo) {
219         case RTE_CRYPTO_CIPHER_AES_CTR:
220                 sess->cipher.algo = CCP_CIPHER_ALGO_AES_CTR;
221                 sess->cipher.um.aes_mode = CCP_AES_MODE_CTR;
222                 sess->cipher.engine = CCP_ENGINE_AES;
223                 break;
224         case RTE_CRYPTO_CIPHER_AES_ECB:
225                 sess->cipher.algo = CCP_CIPHER_ALGO_AES_CBC;
226                 sess->cipher.um.aes_mode = CCP_AES_MODE_ECB;
227                 sess->cipher.engine = CCP_ENGINE_AES;
228                 break;
229         case RTE_CRYPTO_CIPHER_AES_CBC:
230                 sess->cipher.algo = CCP_CIPHER_ALGO_AES_CBC;
231                 sess->cipher.um.aes_mode = CCP_AES_MODE_CBC;
232                 sess->cipher.engine = CCP_ENGINE_AES;
233                 break;
234         case RTE_CRYPTO_CIPHER_3DES_CBC:
235                 sess->cipher.algo = CCP_CIPHER_ALGO_3DES_CBC;
236                 sess->cipher.um.des_mode = CCP_DES_MODE_CBC;
237                 sess->cipher.engine = CCP_ENGINE_3DES;
238                 break;
239         default:
240                 CCP_LOG_ERR("Unsupported cipher algo");
241                 return -1;
242         }
243
244
245         switch (sess->cipher.engine) {
246         case CCP_ENGINE_AES:
247                 if (sess->cipher.key_length == 16)
248                         sess->cipher.ut.aes_type = CCP_AES_TYPE_128;
249                 else if (sess->cipher.key_length == 24)
250                         sess->cipher.ut.aes_type = CCP_AES_TYPE_192;
251                 else if (sess->cipher.key_length == 32)
252                         sess->cipher.ut.aes_type = CCP_AES_TYPE_256;
253                 else {
254                         CCP_LOG_ERR("Invalid cipher key length");
255                         return -1;
256                 }
257                 for (i = 0; i < sess->cipher.key_length ; i++)
258                         sess->cipher.key_ccp[sess->cipher.key_length - i - 1] =
259                                 sess->cipher.key[i];
260                 break;
261         case CCP_ENGINE_3DES:
262                 if (sess->cipher.key_length == 16)
263                         sess->cipher.ut.des_type = CCP_DES_TYPE_128;
264                 else if (sess->cipher.key_length == 24)
265                         sess->cipher.ut.des_type = CCP_DES_TYPE_192;
266                 else {
267                         CCP_LOG_ERR("Invalid cipher key length");
268                         return -1;
269                 }
270                 for (j = 0, x = 0; j < sess->cipher.key_length/8; j++, x += 8)
271                         for (i = 0; i < 8; i++)
272                                 sess->cipher.key_ccp[(8 + x) - i - 1] =
273                                         sess->cipher.key[i + x];
274                 break;
275         default:
276                 CCP_LOG_ERR("Invalid CCP Engine");
277                 return -ENOTSUP;
278         }
279         sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
280         sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
281         return 0;
282 }
283
284 static int
285 ccp_configure_session_auth(struct ccp_session *sess,
286                            const struct rte_crypto_sym_xform *xform)
287 {
288         const struct rte_crypto_auth_xform *auth_xform = NULL;
289         size_t i;
290
291         auth_xform = &xform->auth;
292
293         sess->auth.digest_length = auth_xform->digest_length;
294         if (auth_xform->op ==  RTE_CRYPTO_AUTH_OP_GENERATE)
295                 sess->auth.op = CCP_AUTH_OP_GENERATE;
296         else
297                 sess->auth.op = CCP_AUTH_OP_VERIFY;
298         switch (auth_xform->algo) {
299         case RTE_CRYPTO_AUTH_SHA1:
300                 sess->auth.engine = CCP_ENGINE_SHA;
301                 sess->auth.algo = CCP_AUTH_ALGO_SHA1;
302                 sess->auth.ut.sha_type = CCP_SHA_TYPE_1;
303                 sess->auth.ctx = (void *)ccp_sha1_init;
304                 sess->auth.ctx_len = CCP_SB_BYTES;
305                 sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
306                 break;
307         case RTE_CRYPTO_AUTH_SHA1_HMAC:
308                 if (auth_xform->key.length > SHA1_BLOCK_SIZE)
309                         return -1;
310                 sess->auth.engine = CCP_ENGINE_SHA;
311                 sess->auth.algo = CCP_AUTH_ALGO_SHA1_HMAC;
312                 sess->auth.ut.sha_type = CCP_SHA_TYPE_1;
313                 sess->auth.ctx_len = CCP_SB_BYTES;
314                 sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
315                 sess->auth.block_size = SHA1_BLOCK_SIZE;
316                 sess->auth.key_length = auth_xform->key.length;
317                 memset(sess->auth.key, 0, sess->auth.block_size);
318                 memset(sess->auth.pre_compute, 0, sess->auth.ctx_len << 1);
319                 rte_memcpy(sess->auth.key, auth_xform->key.data,
320                            auth_xform->key.length);
321                 if (generate_partial_hash(sess))
322                         return -1;
323                 break;
324         case RTE_CRYPTO_AUTH_AES_CMAC:
325                 sess->auth.algo = CCP_AUTH_ALGO_AES_CMAC;
326                 sess->auth.engine = CCP_ENGINE_AES;
327                 sess->auth.um.aes_mode = CCP_AES_MODE_CMAC;
328                 sess->auth.key_length = auth_xform->key.length;
329                 /**<padding and hash result*/
330                 sess->auth.ctx_len = CCP_SB_BYTES << 1;
331                 sess->auth.offset = AES_BLOCK_SIZE;
332                 sess->auth.block_size = AES_BLOCK_SIZE;
333                 if (sess->auth.key_length == 16)
334                         sess->auth.ut.aes_type = CCP_AES_TYPE_128;
335                 else if (sess->auth.key_length == 24)
336                         sess->auth.ut.aes_type = CCP_AES_TYPE_192;
337                 else if (sess->auth.key_length == 32)
338                         sess->auth.ut.aes_type = CCP_AES_TYPE_256;
339                 else {
340                         CCP_LOG_ERR("Invalid CMAC key length");
341                         return -1;
342                 }
343                 rte_memcpy(sess->auth.key, auth_xform->key.data,
344                            sess->auth.key_length);
345                 for (i = 0; i < sess->auth.key_length; i++)
346                         sess->auth.key_ccp[sess->auth.key_length - i - 1] =
347                                 sess->auth.key[i];
348                 if (generate_cmac_subkeys(sess))
349                         return -1;
350                 break;
351         default:
352                 CCP_LOG_ERR("Unsupported hash algo");
353                 return -ENOTSUP;
354         }
355         return 0;
356 }
357
358 static int
359 ccp_configure_session_aead(struct ccp_session *sess,
360                            const struct rte_crypto_sym_xform *xform)
361 {
362         const struct rte_crypto_aead_xform *aead_xform = NULL;
363         size_t i;
364
365         aead_xform = &xform->aead;
366
367         sess->cipher.key_length = aead_xform->key.length;
368         rte_memcpy(sess->cipher.key, aead_xform->key.data,
369                    aead_xform->key.length);
370
371         if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
372                 sess->cipher.dir = CCP_CIPHER_DIR_ENCRYPT;
373                 sess->auth.op = CCP_AUTH_OP_GENERATE;
374         } else {
375                 sess->cipher.dir = CCP_CIPHER_DIR_DECRYPT;
376                 sess->auth.op = CCP_AUTH_OP_VERIFY;
377         }
378         sess->aead_algo = aead_xform->algo;
379         sess->auth.aad_length = aead_xform->aad_length;
380         sess->auth.digest_length = aead_xform->digest_length;
381
382         /* set iv parameters */
383         sess->iv.offset = aead_xform->iv.offset;
384         sess->iv.length = aead_xform->iv.length;
385
386         switch (aead_xform->algo) {
387         case RTE_CRYPTO_AEAD_AES_GCM:
388                 sess->cipher.algo = CCP_CIPHER_ALGO_AES_GCM;
389                 sess->cipher.um.aes_mode = CCP_AES_MODE_GCTR;
390                 sess->cipher.engine = CCP_ENGINE_AES;
391                 if (sess->cipher.key_length == 16)
392                         sess->cipher.ut.aes_type = CCP_AES_TYPE_128;
393                 else if (sess->cipher.key_length == 24)
394                         sess->cipher.ut.aes_type = CCP_AES_TYPE_192;
395                 else if (sess->cipher.key_length == 32)
396                         sess->cipher.ut.aes_type = CCP_AES_TYPE_256;
397                 else {
398                         CCP_LOG_ERR("Invalid aead key length");
399                         return -1;
400                 }
401                 for (i = 0; i < sess->cipher.key_length; i++)
402                         sess->cipher.key_ccp[sess->cipher.key_length - i - 1] =
403                                 sess->cipher.key[i];
404                 sess->auth.algo = CCP_AUTH_ALGO_AES_GCM;
405                 sess->auth.engine = CCP_ENGINE_AES;
406                 sess->auth.um.aes_mode = CCP_AES_MODE_GHASH;
407                 sess->auth.ctx_len = CCP_SB_BYTES;
408                 sess->auth.offset = 0;
409                 sess->auth.block_size = AES_BLOCK_SIZE;
410                 sess->cmd_id = CCP_CMD_COMBINED;
411                 break;
412         default:
413                 CCP_LOG_ERR("Unsupported aead algo");
414                 return -ENOTSUP;
415         }
416         sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
417         sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
418         return 0;
419 }
420
421 int
422 ccp_set_session_parameters(struct ccp_session *sess,
423                            const struct rte_crypto_sym_xform *xform)
424 {
425         const struct rte_crypto_sym_xform *cipher_xform = NULL;
426         const struct rte_crypto_sym_xform *auth_xform = NULL;
427         const struct rte_crypto_sym_xform *aead_xform = NULL;
428         int ret = 0;
429
430         sess->cmd_id = ccp_get_cmd_id(xform);
431
432         switch (sess->cmd_id) {
433         case CCP_CMD_CIPHER:
434                 cipher_xform = xform;
435                 break;
436         case CCP_CMD_AUTH:
437                 auth_xform = xform;
438                 break;
439         case CCP_CMD_CIPHER_HASH:
440                 cipher_xform = xform;
441                 auth_xform = xform->next;
442                 break;
443         case CCP_CMD_HASH_CIPHER:
444                 auth_xform = xform;
445                 cipher_xform = xform->next;
446                 break;
447         case CCP_CMD_COMBINED:
448                 aead_xform = xform;
449                 break;
450         default:
451                 CCP_LOG_ERR("Unsupported cmd_id");
452                 return -1;
453         }
454
455         /* Default IV length = 0 */
456         sess->iv.length = 0;
457         if (cipher_xform) {
458                 ret = ccp_configure_session_cipher(sess, cipher_xform);
459                 if (ret != 0) {
460                         CCP_LOG_ERR("Invalid/unsupported cipher parameters");
461                         return ret;
462                 }
463         }
464         if (auth_xform) {
465                 ret = ccp_configure_session_auth(sess, auth_xform);
466                 if (ret != 0) {
467                         CCP_LOG_ERR("Invalid/unsupported auth parameters");
468                         return ret;
469                 }
470         }
471         if (aead_xform) {
472                 ret = ccp_configure_session_aead(sess, aead_xform);
473                 if (ret != 0) {
474                         CCP_LOG_ERR("Invalid/unsupported aead parameters");
475                         return ret;
476                 }
477         }
478         return ret;
479 }
480
481 /* calculate CCP descriptors requirement */
482 static inline int
483 ccp_cipher_slot(struct ccp_session *session)
484 {
485         int count = 0;
486
487         switch (session->cipher.algo) {
488         case CCP_CIPHER_ALGO_AES_CBC:
489                 count = 2;
490                 /**< op + passthrough for iv */
491                 break;
492         case CCP_CIPHER_ALGO_AES_ECB:
493                 count = 1;
494                 /**<only op*/
495                 break;
496         case CCP_CIPHER_ALGO_AES_CTR:
497                 count = 2;
498                 /**< op + passthrough for iv */
499                 break;
500         case CCP_CIPHER_ALGO_3DES_CBC:
501                 count = 2;
502                 /**< op + passthrough for iv */
503                 break;
504         default:
505                 CCP_LOG_ERR("Unsupported cipher algo %d",
506                             session->cipher.algo);
507         }
508         return count;
509 }
510
511 static inline int
512 ccp_auth_slot(struct ccp_session *session)
513 {
514         int count = 0;
515
516         switch (session->auth.algo) {
517         case CCP_AUTH_ALGO_SHA1:
518                 count = 3;
519                 /**< op + lsb passthrough cpy to/from*/
520                 break;
521         case CCP_AUTH_ALGO_SHA1_HMAC:
522                 count = 6;
523                 break;
524         case CCP_AUTH_ALGO_AES_CMAC:
525                 count = 4;
526                 /**
527                  * op
528                  * extra descriptor in padding case
529                  * (k1/k2(255:128) with iv(127:0))
530                  * Retrieve result
531                  */
532                 break;
533         default:
534                 CCP_LOG_ERR("Unsupported auth algo %d",
535                             session->auth.algo);
536         }
537
538         return count;
539 }
540
541 static int
542 ccp_aead_slot(struct ccp_session *session)
543 {
544         int count = 0;
545
546         switch (session->aead_algo) {
547         case RTE_CRYPTO_AEAD_AES_GCM:
548                 break;
549         default:
550                 CCP_LOG_ERR("Unsupported aead algo %d",
551                             session->aead_algo);
552         }
553         switch (session->auth.algo) {
554         case CCP_AUTH_ALGO_AES_GCM:
555                 count = 5;
556                 /**
557                  * 1. Passthru iv
558                  * 2. Hash AAD
559                  * 3. GCTR
560                  * 4. Reload passthru
561                  * 5. Hash Final tag
562                  */
563                 break;
564         default:
565                 CCP_LOG_ERR("Unsupported combined auth ALGO %d",
566                             session->auth.algo);
567         }
568         return count;
569 }
570
571 int
572 ccp_compute_slot_count(struct ccp_session *session)
573 {
574         int count = 0;
575
576         switch (session->cmd_id) {
577         case CCP_CMD_CIPHER:
578                 count = ccp_cipher_slot(session);
579                 break;
580         case CCP_CMD_AUTH:
581                 count = ccp_auth_slot(session);
582                 break;
583         case CCP_CMD_CIPHER_HASH:
584         case CCP_CMD_HASH_CIPHER:
585                 count = ccp_cipher_slot(session);
586                 count += ccp_auth_slot(session);
587                 break;
588         case CCP_CMD_COMBINED:
589                 count = ccp_aead_slot(session);
590                 break;
591         default:
592                 CCP_LOG_ERR("Unsupported cmd_id");
593
594         }
595
596         return count;
597 }
598
599 static void
600 ccp_perform_passthru(struct ccp_passthru *pst,
601                      struct ccp_queue *cmd_q)
602 {
603         struct ccp_desc *desc;
604         union ccp_function function;
605
606         desc = &cmd_q->qbase_desc[cmd_q->qidx];
607
608         CCP_CMD_ENGINE(desc) = CCP_ENGINE_PASSTHRU;
609
610         CCP_CMD_SOC(desc) = 0;
611         CCP_CMD_IOC(desc) = 0;
612         CCP_CMD_INIT(desc) = 0;
613         CCP_CMD_EOM(desc) = 0;
614         CCP_CMD_PROT(desc) = 0;
615
616         function.raw = 0;
617         CCP_PT_BYTESWAP(&function) = pst->byte_swap;
618         CCP_PT_BITWISE(&function) = pst->bit_mod;
619         CCP_CMD_FUNCTION(desc) = function.raw;
620
621         CCP_CMD_LEN(desc) = pst->len;
622
623         if (pst->dir) {
624                 CCP_CMD_SRC_LO(desc) = (uint32_t)(pst->src_addr);
625                 CCP_CMD_SRC_HI(desc) = high32_value(pst->src_addr);
626                 CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
627
628                 CCP_CMD_DST_LO(desc) = (uint32_t)(pst->dest_addr);
629                 CCP_CMD_DST_HI(desc) = 0;
630                 CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SB;
631
632                 if (pst->bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
633                         CCP_CMD_LSB_ID(desc) = cmd_q->sb_key;
634         } else {
635
636                 CCP_CMD_SRC_LO(desc) = (uint32_t)(pst->src_addr);
637                 CCP_CMD_SRC_HI(desc) = 0;
638                 CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SB;
639
640                 CCP_CMD_DST_LO(desc) = (uint32_t)(pst->dest_addr);
641                 CCP_CMD_DST_HI(desc) = high32_value(pst->dest_addr);
642                 CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
643         }
644
645         cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
646 }
647
648 static int
649 ccp_perform_hmac(struct rte_crypto_op *op,
650                  struct ccp_queue *cmd_q)
651 {
652
653         struct ccp_session *session;
654         union ccp_function function;
655         struct ccp_desc *desc;
656         uint32_t tail;
657         phys_addr_t src_addr, dest_addr, dest_addr_t;
658         struct ccp_passthru pst;
659         uint64_t auth_msg_bits;
660         void *append_ptr;
661         uint8_t *addr;
662
663         session = (struct ccp_session *)get_session_private_data(
664                                          op->sym->session,
665                                          ccp_cryptodev_driver_id);
666         addr = session->auth.pre_compute;
667
668         src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
669                                               op->sym->auth.data.offset);
670         append_ptr = (void *)rte_pktmbuf_append(op->sym->m_src,
671                                                 session->auth.ctx_len);
672         dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr);
673         dest_addr_t = dest_addr;
674
675         /** Load PHash1 to LSB*/
676         pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr);
677         pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
678         pst.len = session->auth.ctx_len;
679         pst.dir = 1;
680         pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
681         pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
682         ccp_perform_passthru(&pst, cmd_q);
683
684         /**sha engine command descriptor for IntermediateHash*/
685
686         desc = &cmd_q->qbase_desc[cmd_q->qidx];
687         memset(desc, 0, Q_DESC_SIZE);
688
689         CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
690
691         CCP_CMD_SOC(desc) = 0;
692         CCP_CMD_IOC(desc) = 0;
693         CCP_CMD_INIT(desc) = 1;
694         CCP_CMD_EOM(desc) = 1;
695         CCP_CMD_PROT(desc) = 0;
696
697         function.raw = 0;
698         CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
699         CCP_CMD_FUNCTION(desc) = function.raw;
700
701         CCP_CMD_LEN(desc) = op->sym->auth.data.length;
702         auth_msg_bits = (op->sym->auth.data.length +
703                          session->auth.block_size)  * 8;
704
705         CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
706         CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
707         CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
708
709         CCP_CMD_LSB_ID(desc) = cmd_q->sb_sha;
710         CCP_CMD_SHA_LO(desc) = ((uint32_t)auth_msg_bits);
711         CCP_CMD_SHA_HI(desc) = high32_value(auth_msg_bits);
712
713         cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
714
715         rte_wmb();
716
717         tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
718         CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
719         CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
720                       cmd_q->qcontrol | CMD_Q_RUN);
721
722         /* Intermediate Hash value retrieve */
723         if ((session->auth.ut.sha_type == CCP_SHA_TYPE_384) ||
724             (session->auth.ut.sha_type == CCP_SHA_TYPE_512)) {
725
726                 pst.src_addr =
727                         (phys_addr_t)((cmd_q->sb_sha + 1) * CCP_SB_BYTES);
728                 pst.dest_addr = dest_addr_t;
729                 pst.len = CCP_SB_BYTES;
730                 pst.dir = 0;
731                 pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
732                 pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
733                 ccp_perform_passthru(&pst, cmd_q);
734
735                 pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
736                 pst.dest_addr = dest_addr_t + CCP_SB_BYTES;
737                 pst.len = CCP_SB_BYTES;
738                 pst.dir = 0;
739                 pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
740                 pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
741                 ccp_perform_passthru(&pst, cmd_q);
742
743         } else {
744                 pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
745                 pst.dest_addr = dest_addr_t;
746                 pst.len = session->auth.ctx_len;
747                 pst.dir = 0;
748                 pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
749                 pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
750                 ccp_perform_passthru(&pst, cmd_q);
751
752         }
753
754         /** Load PHash2 to LSB*/
755         addr += session->auth.ctx_len;
756         pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr);
757         pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
758         pst.len = session->auth.ctx_len;
759         pst.dir = 1;
760         pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
761         pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
762         ccp_perform_passthru(&pst, cmd_q);
763
764         /**sha engine command descriptor for FinalHash*/
765         dest_addr_t += session->auth.offset;
766
767         desc = &cmd_q->qbase_desc[cmd_q->qidx];
768         memset(desc, 0, Q_DESC_SIZE);
769
770         CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
771
772         CCP_CMD_SOC(desc) = 0;
773         CCP_CMD_IOC(desc) = 0;
774         CCP_CMD_INIT(desc) = 1;
775         CCP_CMD_EOM(desc) = 1;
776         CCP_CMD_PROT(desc) = 0;
777
778         function.raw = 0;
779         CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
780         CCP_CMD_FUNCTION(desc) = function.raw;
781
782         CCP_CMD_LEN(desc) = (session->auth.ctx_len -
783                              session->auth.offset);
784         auth_msg_bits = (session->auth.block_size +
785                          session->auth.ctx_len -
786                          session->auth.offset) * 8;
787
788         CCP_CMD_SRC_LO(desc) = (uint32_t)(dest_addr_t);
789         CCP_CMD_SRC_HI(desc) = high32_value(dest_addr_t);
790         CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
791
792         CCP_CMD_LSB_ID(desc) = cmd_q->sb_sha;
793         CCP_CMD_SHA_LO(desc) = ((uint32_t)auth_msg_bits);
794         CCP_CMD_SHA_HI(desc) = high32_value(auth_msg_bits);
795
796         cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
797
798         rte_wmb();
799
800         tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
801         CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
802         CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
803                       cmd_q->qcontrol | CMD_Q_RUN);
804
805         /* Retrieve hmac output */
806         pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
807         pst.dest_addr = dest_addr;
808         pst.len = session->auth.ctx_len;
809         pst.dir = 0;
810         pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
811         if ((session->auth.ut.sha_type == CCP_SHA_TYPE_384) ||
812             (session->auth.ut.sha_type == CCP_SHA_TYPE_512))
813                 pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
814         else
815                 pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
816         ccp_perform_passthru(&pst, cmd_q);
817
818         op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
819         return 0;
820
821 }
822
823 static int
824 ccp_perform_sha(struct rte_crypto_op *op,
825                 struct ccp_queue *cmd_q)
826 {
827         struct ccp_session *session;
828         union ccp_function function;
829         struct ccp_desc *desc;
830         uint32_t tail;
831         phys_addr_t src_addr, dest_addr;
832         struct ccp_passthru pst;
833         void *append_ptr;
834         uint64_t auth_msg_bits;
835
836         session = (struct ccp_session *)get_session_private_data(
837                                          op->sym->session,
838                                         ccp_cryptodev_driver_id);
839
840         src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
841                                               op->sym->auth.data.offset);
842
843         append_ptr = (void *)rte_pktmbuf_append(op->sym->m_src,
844                                                 session->auth.ctx_len);
845         dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr);
846
847         /** Passthru sha context*/
848
849         pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)
850                                                      session->auth.ctx);
851         pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
852         pst.len = session->auth.ctx_len;
853         pst.dir = 1;
854         pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
855         pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
856         ccp_perform_passthru(&pst, cmd_q);
857
858         /**prepare sha command descriptor*/
859
860         desc = &cmd_q->qbase_desc[cmd_q->qidx];
861         memset(desc, 0, Q_DESC_SIZE);
862
863         CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
864
865         CCP_CMD_SOC(desc) = 0;
866         CCP_CMD_IOC(desc) = 0;
867         CCP_CMD_INIT(desc) = 1;
868         CCP_CMD_EOM(desc) = 1;
869         CCP_CMD_PROT(desc) = 0;
870
871         function.raw = 0;
872         CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
873         CCP_CMD_FUNCTION(desc) = function.raw;
874
875         CCP_CMD_LEN(desc) = op->sym->auth.data.length;
876         auth_msg_bits = op->sym->auth.data.length * 8;
877
878         CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
879         CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
880         CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
881
882         CCP_CMD_LSB_ID(desc) = cmd_q->sb_sha;
883         CCP_CMD_SHA_LO(desc) = ((uint32_t)auth_msg_bits);
884         CCP_CMD_SHA_HI(desc) = high32_value(auth_msg_bits);
885
886         cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
887
888         rte_wmb();
889
890         tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
891         CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
892         CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
893                       cmd_q->qcontrol | CMD_Q_RUN);
894
895         /* Hash value retrieve */
896         pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
897         pst.dest_addr = dest_addr;
898         pst.len = session->auth.ctx_len;
899         pst.dir = 0;
900         pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
901         if ((session->auth.ut.sha_type == CCP_SHA_TYPE_384) ||
902             (session->auth.ut.sha_type == CCP_SHA_TYPE_512))
903                 pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
904         else
905                 pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
906         ccp_perform_passthru(&pst, cmd_q);
907
908         op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
909         return 0;
910
911 }
912
913 static int
914 ccp_perform_aes_cmac(struct rte_crypto_op *op,
915                      struct ccp_queue *cmd_q)
916 {
917         struct ccp_session *session;
918         union ccp_function function;
919         struct ccp_passthru pst;
920         struct ccp_desc *desc;
921         uint32_t tail;
922         uint8_t *src_tb, *append_ptr, *ctx_addr;
923         phys_addr_t src_addr, dest_addr, key_addr;
924         int length, non_align_len;
925
926         session = (struct ccp_session *)get_session_private_data(
927                                          op->sym->session,
928                                         ccp_cryptodev_driver_id);
929         key_addr = rte_mem_virt2phy(session->auth.key_ccp);
930
931         src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
932                                               op->sym->auth.data.offset);
933         append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
934                                                 session->auth.ctx_len);
935         dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
936
937         function.raw = 0;
938         CCP_AES_ENCRYPT(&function) = CCP_CIPHER_DIR_ENCRYPT;
939         CCP_AES_MODE(&function) = session->auth.um.aes_mode;
940         CCP_AES_TYPE(&function) = session->auth.ut.aes_type;
941
942         if (op->sym->auth.data.length % session->auth.block_size == 0) {
943
944                 ctx_addr = session->auth.pre_compute;
945                 memset(ctx_addr, 0, AES_BLOCK_SIZE);
946                 pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
947                 pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
948                 pst.len = CCP_SB_BYTES;
949                 pst.dir = 1;
950                 pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
951                 pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
952                 ccp_perform_passthru(&pst, cmd_q);
953
954                 desc = &cmd_q->qbase_desc[cmd_q->qidx];
955                 memset(desc, 0, Q_DESC_SIZE);
956
957                 /* prepare desc for aes-cmac command */
958                 CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
959                 CCP_CMD_EOM(desc) = 1;
960                 CCP_CMD_FUNCTION(desc) = function.raw;
961
962                 CCP_CMD_LEN(desc) = op->sym->auth.data.length;
963                 CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
964                 CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
965                 CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
966
967                 CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
968                 CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
969                 CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
970                 CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
971
972                 cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
973
974                 rte_wmb();
975
976                 tail =
977                 (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
978                 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
979                 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
980                               cmd_q->qcontrol | CMD_Q_RUN);
981         } else {
982                 ctx_addr = session->auth.pre_compute + CCP_SB_BYTES;
983                 memset(ctx_addr, 0, AES_BLOCK_SIZE);
984                 pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
985                 pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
986                 pst.len = CCP_SB_BYTES;
987                 pst.dir = 1;
988                 pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
989                 pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
990                 ccp_perform_passthru(&pst, cmd_q);
991
992                 length = (op->sym->auth.data.length / AES_BLOCK_SIZE);
993                 length *= AES_BLOCK_SIZE;
994                 non_align_len = op->sym->auth.data.length - length;
995                 /* prepare desc for aes-cmac command */
996                 /*Command 1*/
997                 desc = &cmd_q->qbase_desc[cmd_q->qidx];
998                 memset(desc, 0, Q_DESC_SIZE);
999
1000                 CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
1001                 CCP_CMD_INIT(desc) = 1;
1002                 CCP_CMD_FUNCTION(desc) = function.raw;
1003
1004                 CCP_CMD_LEN(desc) = length;
1005                 CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
1006                 CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
1007                 CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
1008
1009                 CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
1010                 CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
1011                 CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
1012                 CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
1013
1014                 cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
1015
1016                 /*Command 2*/
1017                 append_ptr = append_ptr + CCP_SB_BYTES;
1018                 memset(append_ptr, 0, AES_BLOCK_SIZE);
1019                 src_tb = rte_pktmbuf_mtod_offset(op->sym->m_src,
1020                                                  uint8_t *,
1021                                                  op->sym->auth.data.offset +
1022                                                  length);
1023                 rte_memcpy(append_ptr, src_tb, non_align_len);
1024                 append_ptr[non_align_len] = CMAC_PAD_VALUE;
1025
1026                 desc = &cmd_q->qbase_desc[cmd_q->qidx];
1027                 memset(desc, 0, Q_DESC_SIZE);
1028
1029                 CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
1030                 CCP_CMD_EOM(desc) = 1;
1031                 CCP_CMD_FUNCTION(desc) = function.raw;
1032                 CCP_CMD_LEN(desc) = AES_BLOCK_SIZE;
1033
1034                 CCP_CMD_SRC_LO(desc) = ((uint32_t)(dest_addr + CCP_SB_BYTES));
1035                 CCP_CMD_SRC_HI(desc) = high32_value(dest_addr + CCP_SB_BYTES);
1036                 CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
1037
1038                 CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
1039                 CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
1040                 CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
1041                 CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
1042
1043                 cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
1044
1045                 rte_wmb();
1046                 tail =
1047                 (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
1048                 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
1049                 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
1050                               cmd_q->qcontrol | CMD_Q_RUN);
1051         }
1052         /* Retrieve result */
1053         pst.dest_addr = dest_addr;
1054         pst.src_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
1055         pst.len = CCP_SB_BYTES;
1056         pst.dir = 0;
1057         pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
1058         pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
1059         ccp_perform_passthru(&pst, cmd_q);
1060
1061         op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
1062         return 0;
1063 }
1064
1065 static int
1066 ccp_perform_aes(struct rte_crypto_op *op,
1067                 struct ccp_queue *cmd_q,
1068                 struct ccp_batch_info *b_info)
1069 {
1070         struct ccp_session *session;
1071         union ccp_function function;
1072         uint8_t *lsb_buf;
1073         struct ccp_passthru pst = {0};
1074         struct ccp_desc *desc;
1075         phys_addr_t src_addr, dest_addr, key_addr;
1076         uint8_t *iv;
1077
1078         session = (struct ccp_session *)get_session_private_data(
1079                                          op->sym->session,
1080                                         ccp_cryptodev_driver_id);
1081         function.raw = 0;
1082
1083         iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
1084         if (session->cipher.um.aes_mode != CCP_AES_MODE_ECB) {
1085                 if (session->cipher.um.aes_mode == CCP_AES_MODE_CTR) {
1086                         rte_memcpy(session->cipher.nonce + AES_BLOCK_SIZE,
1087                                    iv, session->iv.length);
1088                         pst.src_addr = (phys_addr_t)session->cipher.nonce_phys;
1089                         CCP_AES_SIZE(&function) = 0x1F;
1090                 } else {
1091                         lsb_buf =
1092                         &(b_info->lsb_buf[b_info->lsb_buf_idx*CCP_SB_BYTES]);
1093                         rte_memcpy(lsb_buf +
1094                                    (CCP_SB_BYTES - session->iv.length),
1095                                    iv, session->iv.length);
1096                         pst.src_addr = b_info->lsb_buf_phys +
1097                                 (b_info->lsb_buf_idx * CCP_SB_BYTES);
1098                         b_info->lsb_buf_idx++;
1099                 }
1100
1101                 pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
1102                 pst.len = CCP_SB_BYTES;
1103                 pst.dir = 1;
1104                 pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
1105                 pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
1106                 ccp_perform_passthru(&pst, cmd_q);
1107         }
1108
1109         desc = &cmd_q->qbase_desc[cmd_q->qidx];
1110
1111         src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
1112                                               op->sym->cipher.data.offset);
1113         if (likely(op->sym->m_dst != NULL))
1114                 dest_addr = rte_pktmbuf_mtophys_offset(op->sym->m_dst,
1115                                                 op->sym->cipher.data.offset);
1116         else
1117                 dest_addr = src_addr;
1118         key_addr = session->cipher.key_phys;
1119
1120         /* prepare desc for aes command */
1121         CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
1122         CCP_CMD_INIT(desc) = 1;
1123         CCP_CMD_EOM(desc) = 1;
1124
1125         CCP_AES_ENCRYPT(&function) = session->cipher.dir;
1126         CCP_AES_MODE(&function) = session->cipher.um.aes_mode;
1127         CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
1128         CCP_CMD_FUNCTION(desc) = function.raw;
1129
1130         CCP_CMD_LEN(desc) = op->sym->cipher.data.length;
1131
1132         CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
1133         CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
1134         CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
1135
1136         CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
1137         CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
1138         CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
1139
1140         CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
1141         CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
1142         CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
1143
1144         if (session->cipher.um.aes_mode != CCP_AES_MODE_ECB)
1145                 CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
1146
1147         cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
1148         op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
1149         return 0;
1150 }
1151
1152 static int
1153 ccp_perform_3des(struct rte_crypto_op *op,
1154                 struct ccp_queue *cmd_q,
1155                 struct ccp_batch_info *b_info)
1156 {
1157         struct ccp_session *session;
1158         union ccp_function function;
1159         unsigned char *lsb_buf;
1160         struct ccp_passthru pst;
1161         struct ccp_desc *desc;
1162         uint32_t tail;
1163         uint8_t *iv;
1164         phys_addr_t src_addr, dest_addr, key_addr;
1165
1166         session = (struct ccp_session *)get_session_private_data(
1167                                          op->sym->session,
1168                                         ccp_cryptodev_driver_id);
1169
1170         iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
1171         switch (session->cipher.um.des_mode) {
1172         case CCP_DES_MODE_CBC:
1173                 lsb_buf = &(b_info->lsb_buf[b_info->lsb_buf_idx*CCP_SB_BYTES]);
1174                 b_info->lsb_buf_idx++;
1175
1176                 rte_memcpy(lsb_buf + (CCP_SB_BYTES - session->iv.length),
1177                            iv, session->iv.length);
1178
1179                 pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *) lsb_buf);
1180                 pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
1181                 pst.len = CCP_SB_BYTES;
1182                 pst.dir = 1;
1183                 pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
1184                 pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
1185                 ccp_perform_passthru(&pst, cmd_q);
1186                 break;
1187         case CCP_DES_MODE_CFB:
1188         case CCP_DES_MODE_ECB:
1189                 CCP_LOG_ERR("Unsupported DES cipher mode");
1190                 return -ENOTSUP;
1191         }
1192
1193         src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
1194                                               op->sym->cipher.data.offset);
1195         if (unlikely(op->sym->m_dst != NULL))
1196                 dest_addr =
1197                         rte_pktmbuf_mtophys_offset(op->sym->m_dst,
1198                                                    op->sym->cipher.data.offset);
1199         else
1200                 dest_addr = src_addr;
1201
1202         key_addr = rte_mem_virt2phy(session->cipher.key_ccp);
1203
1204         desc = &cmd_q->qbase_desc[cmd_q->qidx];
1205
1206         memset(desc, 0, Q_DESC_SIZE);
1207
1208         /* prepare desc for des command */
1209         CCP_CMD_ENGINE(desc) = CCP_ENGINE_3DES;
1210
1211         CCP_CMD_SOC(desc) = 0;
1212         CCP_CMD_IOC(desc) = 0;
1213         CCP_CMD_INIT(desc) = 1;
1214         CCP_CMD_EOM(desc) = 1;
1215         CCP_CMD_PROT(desc) = 0;
1216
1217         function.raw = 0;
1218         CCP_DES_ENCRYPT(&function) = session->cipher.dir;
1219         CCP_DES_MODE(&function) = session->cipher.um.des_mode;
1220         CCP_DES_TYPE(&function) = session->cipher.ut.des_type;
1221         CCP_CMD_FUNCTION(desc) = function.raw;
1222
1223         CCP_CMD_LEN(desc) = op->sym->cipher.data.length;
1224
1225         CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
1226         CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
1227         CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
1228
1229         CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
1230         CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
1231         CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
1232
1233         CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
1234         CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
1235         CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
1236
1237         if (session->cipher.um.des_mode)
1238                 CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
1239
1240         cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
1241
1242         rte_wmb();
1243
1244         /* Write the new tail address back to the queue register */
1245         tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
1246         CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
1247         /* Turn the queue back on using our cached control register */
1248         CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
1249                       cmd_q->qcontrol | CMD_Q_RUN);
1250
1251         op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
1252         return 0;
1253 }
1254
1255 static int
1256 ccp_perform_aes_gcm(struct rte_crypto_op *op, struct ccp_queue *cmd_q)
1257 {
1258         struct ccp_session *session;
1259         union ccp_function function;
1260         uint8_t *iv;
1261         struct ccp_passthru pst;
1262         struct ccp_desc *desc;
1263         uint32_t tail;
1264         uint64_t *temp;
1265         phys_addr_t src_addr, dest_addr, key_addr, aad_addr;
1266         phys_addr_t digest_dest_addr;
1267         int length, non_align_len;
1268
1269         session = (struct ccp_session *)get_session_private_data(
1270                                          op->sym->session,
1271                                          ccp_cryptodev_driver_id);
1272         iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
1273         key_addr = session->cipher.key_phys;
1274
1275         src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
1276                                               op->sym->aead.data.offset);
1277         if (unlikely(op->sym->m_dst != NULL))
1278                 dest_addr = rte_pktmbuf_mtophys_offset(op->sym->m_dst,
1279                                                 op->sym->aead.data.offset);
1280         else
1281                 dest_addr = src_addr;
1282         rte_pktmbuf_append(op->sym->m_src, session->auth.ctx_len);
1283         digest_dest_addr = op->sym->aead.digest.phys_addr;
1284         temp = (uint64_t *)(op->sym->aead.digest.data + AES_BLOCK_SIZE);
1285         *temp++ = rte_bswap64(session->auth.aad_length << 3);
1286         *temp = rte_bswap64(op->sym->aead.data.length << 3);
1287
1288         non_align_len = op->sym->aead.data.length % AES_BLOCK_SIZE;
1289         length = CCP_ALIGN(op->sym->aead.data.length, AES_BLOCK_SIZE);
1290
1291         aad_addr = op->sym->aead.aad.phys_addr;
1292
1293         /* CMD1 IV Passthru */
1294         rte_memcpy(session->cipher.nonce + AES_BLOCK_SIZE, iv,
1295                    session->iv.length);
1296         pst.src_addr = session->cipher.nonce_phys;
1297         pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
1298         pst.len = CCP_SB_BYTES;
1299         pst.dir = 1;
1300         pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
1301         pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
1302         ccp_perform_passthru(&pst, cmd_q);
1303
1304         /* CMD2 GHASH-AAD */
1305         function.raw = 0;
1306         CCP_AES_ENCRYPT(&function) = CCP_AES_MODE_GHASH_AAD;
1307         CCP_AES_MODE(&function) = CCP_AES_MODE_GHASH;
1308         CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
1309
1310         desc = &cmd_q->qbase_desc[cmd_q->qidx];
1311         memset(desc, 0, Q_DESC_SIZE);
1312
1313         CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
1314         CCP_CMD_INIT(desc) = 1;
1315         CCP_CMD_FUNCTION(desc) = function.raw;
1316
1317         CCP_CMD_LEN(desc) = session->auth.aad_length;
1318
1319         CCP_CMD_SRC_LO(desc) = ((uint32_t)aad_addr);
1320         CCP_CMD_SRC_HI(desc) = high32_value(aad_addr);
1321         CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
1322
1323         CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
1324         CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
1325         CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
1326
1327         CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
1328
1329         cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
1330         rte_wmb();
1331
1332         tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
1333         CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
1334         CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
1335                       cmd_q->qcontrol | CMD_Q_RUN);
1336
1337         /* CMD3 : GCTR Plain text */
1338         function.raw = 0;
1339         CCP_AES_ENCRYPT(&function) = session->cipher.dir;
1340         CCP_AES_MODE(&function) = CCP_AES_MODE_GCTR;
1341         CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
1342         if (non_align_len == 0)
1343                 CCP_AES_SIZE(&function) = (AES_BLOCK_SIZE << 3) - 1;
1344         else
1345                 CCP_AES_SIZE(&function) = (non_align_len << 3) - 1;
1346
1347
1348         desc = &cmd_q->qbase_desc[cmd_q->qidx];
1349         memset(desc, 0, Q_DESC_SIZE);
1350
1351         CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
1352         CCP_CMD_EOM(desc) = 1;
1353         CCP_CMD_FUNCTION(desc) = function.raw;
1354
1355         CCP_CMD_LEN(desc) = length;
1356
1357         CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
1358         CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
1359         CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
1360
1361         CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
1362         CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
1363         CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
1364
1365         CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
1366         CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
1367         CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
1368
1369         CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
1370
1371         cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
1372         rte_wmb();
1373
1374         tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
1375         CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
1376         CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
1377                       cmd_q->qcontrol | CMD_Q_RUN);
1378
1379         /* CMD4 : PT to copy IV */
1380         pst.src_addr = session->cipher.nonce_phys;
1381         pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
1382         pst.len = AES_BLOCK_SIZE;
1383         pst.dir = 1;
1384         pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
1385         pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
1386         ccp_perform_passthru(&pst, cmd_q);
1387
1388         /* CMD5 : GHASH-Final */
1389         function.raw = 0;
1390         CCP_AES_ENCRYPT(&function) = CCP_AES_MODE_GHASH_FINAL;
1391         CCP_AES_MODE(&function) = CCP_AES_MODE_GHASH;
1392         CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
1393
1394         desc = &cmd_q->qbase_desc[cmd_q->qidx];
1395         memset(desc, 0, Q_DESC_SIZE);
1396
1397         CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
1398         CCP_CMD_FUNCTION(desc) = function.raw;
1399         /* Last block (AAD_len || PT_len)*/
1400         CCP_CMD_LEN(desc) = AES_BLOCK_SIZE;
1401
1402         CCP_CMD_SRC_LO(desc) = ((uint32_t)digest_dest_addr + AES_BLOCK_SIZE);
1403         CCP_CMD_SRC_HI(desc) = high32_value(digest_dest_addr + AES_BLOCK_SIZE);
1404         CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
1405
1406         CCP_CMD_DST_LO(desc) = ((uint32_t)digest_dest_addr);
1407         CCP_CMD_DST_HI(desc) = high32_value(digest_dest_addr);
1408         CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
1409
1410         CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
1411         CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
1412         CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
1413
1414         CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
1415
1416         cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
1417         rte_wmb();
1418
1419         tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
1420         CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
1421         CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
1422                       cmd_q->qcontrol | CMD_Q_RUN);
1423
1424         op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
1425         return 0;
1426 }
1427
1428 static inline int
1429 ccp_crypto_cipher(struct rte_crypto_op *op,
1430                   struct ccp_queue *cmd_q,
1431                   struct ccp_batch_info *b_info)
1432 {
1433         int result = 0;
1434         struct ccp_session *session;
1435
1436         session = (struct ccp_session *)get_session_private_data(
1437                                          op->sym->session,
1438                                          ccp_cryptodev_driver_id);
1439
1440         switch (session->cipher.algo) {
1441         case CCP_CIPHER_ALGO_AES_CBC:
1442                 result = ccp_perform_aes(op, cmd_q, b_info);
1443                 b_info->desccnt += 2;
1444                 break;
1445         case CCP_CIPHER_ALGO_AES_CTR:
1446                 result = ccp_perform_aes(op, cmd_q, b_info);
1447                 b_info->desccnt += 2;
1448                 break;
1449         case CCP_CIPHER_ALGO_AES_ECB:
1450                 result = ccp_perform_aes(op, cmd_q, b_info);
1451                 b_info->desccnt += 1;
1452                 break;
1453         case CCP_CIPHER_ALGO_3DES_CBC:
1454                 result = ccp_perform_3des(op, cmd_q, b_info);
1455                 b_info->desccnt += 2;
1456                 break;
1457         default:
1458                 CCP_LOG_ERR("Unsupported cipher algo %d",
1459                             session->cipher.algo);
1460                 return -ENOTSUP;
1461         }
1462         return result;
1463 }
1464
1465 static inline int
1466 ccp_crypto_auth(struct rte_crypto_op *op,
1467                 struct ccp_queue *cmd_q,
1468                 struct ccp_batch_info *b_info)
1469 {
1470
1471         int result = 0;
1472         struct ccp_session *session;
1473
1474         session = (struct ccp_session *)get_session_private_data(
1475                                          op->sym->session,
1476                                         ccp_cryptodev_driver_id);
1477
1478         switch (session->auth.algo) {
1479         case CCP_AUTH_ALGO_SHA1:
1480                 result = ccp_perform_sha(op, cmd_q);
1481                 b_info->desccnt += 3;
1482                 break;
1483         case CCP_AUTH_ALGO_SHA1_HMAC:
1484                 result = ccp_perform_hmac(op, cmd_q);
1485                 b_info->desccnt += 6;
1486                 break;
1487         case CCP_AUTH_ALGO_AES_CMAC:
1488                 result = ccp_perform_aes_cmac(op, cmd_q);
1489                 b_info->desccnt += 4;
1490                 break;
1491         default:
1492                 CCP_LOG_ERR("Unsupported auth algo %d",
1493                             session->auth.algo);
1494                 return -ENOTSUP;
1495         }
1496
1497         return result;
1498 }
1499
1500 static inline int
1501 ccp_crypto_aead(struct rte_crypto_op *op,
1502                 struct ccp_queue *cmd_q,
1503                 struct ccp_batch_info *b_info)
1504 {
1505         int result = 0;
1506         struct ccp_session *session;
1507
1508         session = (struct ccp_session *)get_session_private_data(
1509                                         op->sym->session,
1510                                         ccp_cryptodev_driver_id);
1511
1512         switch (session->auth.algo) {
1513         case CCP_AUTH_ALGO_AES_GCM:
1514                 if (session->cipher.algo != CCP_CIPHER_ALGO_AES_GCM) {
1515                         CCP_LOG_ERR("Incorrect chain order");
1516                         return -1;
1517                 }
1518                 result = ccp_perform_aes_gcm(op, cmd_q);
1519                 b_info->desccnt += 5;
1520                 break;
1521         default:
1522                 CCP_LOG_ERR("Unsupported aead algo %d",
1523                             session->aead_algo);
1524                 return -ENOTSUP;
1525         }
1526         return result;
1527 }
1528
1529 int
1530 process_ops_to_enqueue(const struct ccp_qp *qp,
1531                        struct rte_crypto_op **op,
1532                        struct ccp_queue *cmd_q,
1533                        uint16_t nb_ops,
1534                        int slots_req)
1535 {
1536         int i, result = 0;
1537         struct ccp_batch_info *b_info;
1538         struct ccp_session *session;
1539
1540         if (rte_mempool_get(qp->batch_mp, (void **)&b_info)) {
1541                 CCP_LOG_ERR("batch info allocation failed");
1542                 return 0;
1543         }
1544         /* populate batch info necessary for dequeue */
1545         b_info->op_idx = 0;
1546         b_info->lsb_buf_idx = 0;
1547         b_info->desccnt = 0;
1548         b_info->cmd_q = cmd_q;
1549         b_info->lsb_buf_phys =
1550                 (phys_addr_t)rte_mem_virt2phy((void *)b_info->lsb_buf);
1551         rte_atomic64_sub(&b_info->cmd_q->free_slots, slots_req);
1552
1553         b_info->head_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx *
1554                                          Q_DESC_SIZE);
1555         for (i = 0; i < nb_ops; i++) {
1556                 session = (struct ccp_session *)get_session_private_data(
1557                                                  op[i]->sym->session,
1558                                                  ccp_cryptodev_driver_id);
1559                 switch (session->cmd_id) {
1560                 case CCP_CMD_CIPHER:
1561                         result = ccp_crypto_cipher(op[i], cmd_q, b_info);
1562                         break;
1563                 case CCP_CMD_AUTH:
1564                         result = ccp_crypto_auth(op[i], cmd_q, b_info);
1565                         break;
1566                 case CCP_CMD_CIPHER_HASH:
1567                         result = ccp_crypto_cipher(op[i], cmd_q, b_info);
1568                         if (result)
1569                                 break;
1570                         result = ccp_crypto_auth(op[i], cmd_q, b_info);
1571                         break;
1572                 case CCP_CMD_HASH_CIPHER:
1573                         result = ccp_crypto_auth(op[i], cmd_q, b_info);
1574                         if (result)
1575                                 break;
1576                         result = ccp_crypto_cipher(op[i], cmd_q, b_info);
1577                         break;
1578                 case CCP_CMD_COMBINED:
1579                         result = ccp_crypto_aead(op[i], cmd_q, b_info);
1580                         break;
1581                 default:
1582                         CCP_LOG_ERR("Unsupported cmd_id");
1583                         result = -1;
1584                 }
1585                 if (unlikely(result < 0)) {
1586                         rte_atomic64_add(&b_info->cmd_q->free_slots,
1587                                          (slots_req - b_info->desccnt));
1588                         break;
1589                 }
1590                 b_info->op[i] = op[i];
1591         }
1592
1593         b_info->opcnt = i;
1594         b_info->tail_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx *
1595                                          Q_DESC_SIZE);
1596
1597         rte_wmb();
1598         /* Write the new tail address back to the queue register */
1599         CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE,
1600                               b_info->tail_offset);
1601         /* Turn the queue back on using our cached control register */
1602         CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
1603                               cmd_q->qcontrol | CMD_Q_RUN);
1604
1605         rte_ring_enqueue(qp->processed_pkts, (void *)b_info);
1606
1607         return i;
1608 }
1609
1610 static inline void ccp_auth_dq_prepare(struct rte_crypto_op *op)
1611 {
1612         struct ccp_session *session;
1613         uint8_t *digest_data, *addr;
1614         struct rte_mbuf *m_last;
1615         int offset, digest_offset;
1616         uint8_t digest_le[64];
1617
1618         session = (struct ccp_session *)get_session_private_data(
1619                                          op->sym->session,
1620                                         ccp_cryptodev_driver_id);
1621
1622         if (session->cmd_id == CCP_CMD_COMBINED) {
1623                 digest_data = op->sym->aead.digest.data;
1624                 digest_offset = op->sym->aead.data.offset +
1625                                         op->sym->aead.data.length;
1626         } else {
1627                 digest_data = op->sym->auth.digest.data;
1628                 digest_offset = op->sym->auth.data.offset +
1629                                         op->sym->auth.data.length;
1630         }
1631         m_last = rte_pktmbuf_lastseg(op->sym->m_src);
1632         addr = (uint8_t *)((char *)m_last->buf_addr + m_last->data_off +
1633                            m_last->data_len - session->auth.ctx_len);
1634
1635         rte_mb();
1636         offset = session->auth.offset;
1637
1638         if (session->auth.engine == CCP_ENGINE_SHA)
1639                 if ((session->auth.ut.sha_type != CCP_SHA_TYPE_1) &&
1640                     (session->auth.ut.sha_type != CCP_SHA_TYPE_224) &&
1641                     (session->auth.ut.sha_type != CCP_SHA_TYPE_256)) {
1642                         /* All other algorithms require byte
1643                          * swap done by host
1644                          */
1645                         unsigned int i;
1646
1647                         offset = session->auth.ctx_len -
1648                                 session->auth.offset - 1;
1649                         for (i = 0; i < session->auth.digest_length; i++)
1650                                 digest_le[i] = addr[offset - i];
1651                         offset = 0;
1652                         addr = digest_le;
1653                 }
1654
1655         op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1656         if (session->auth.op == CCP_AUTH_OP_VERIFY) {
1657                 if (memcmp(addr + offset, digest_data,
1658                            session->auth.digest_length) != 0)
1659                         op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
1660
1661         } else {
1662                 if (unlikely(digest_data == 0))
1663                         digest_data = rte_pktmbuf_mtod_offset(
1664                                         op->sym->m_dst, uint8_t *,
1665                                         digest_offset);
1666                 rte_memcpy(digest_data, addr + offset,
1667                            session->auth.digest_length);
1668         }
1669         /* Trim area used for digest from mbuf. */
1670         rte_pktmbuf_trim(op->sym->m_src,
1671                          session->auth.ctx_len);
1672 }
1673
1674 static int
1675 ccp_prepare_ops(struct rte_crypto_op **op_d,
1676                 struct ccp_batch_info *b_info,
1677                 uint16_t nb_ops)
1678 {
1679         int i, min_ops;
1680         struct ccp_session *session;
1681
1682         min_ops = RTE_MIN(nb_ops, b_info->opcnt);
1683
1684         for (i = 0; i < min_ops; i++) {
1685                 op_d[i] = b_info->op[b_info->op_idx++];
1686                 session = (struct ccp_session *)get_session_private_data(
1687                                                  op_d[i]->sym->session,
1688                                                 ccp_cryptodev_driver_id);
1689                 switch (session->cmd_id) {
1690                 case CCP_CMD_CIPHER:
1691                         op_d[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1692                         break;
1693                 case CCP_CMD_AUTH:
1694                 case CCP_CMD_CIPHER_HASH:
1695                 case CCP_CMD_HASH_CIPHER:
1696                 case CCP_CMD_COMBINED:
1697                         ccp_auth_dq_prepare(op_d[i]);
1698                         break;
1699                 default:
1700                         CCP_LOG_ERR("Unsupported cmd_id");
1701                 }
1702         }
1703
1704         b_info->opcnt -= min_ops;
1705         return min_ops;
1706 }
1707
1708 int
1709 process_ops_to_dequeue(struct ccp_qp *qp,
1710                        struct rte_crypto_op **op,
1711                        uint16_t nb_ops)
1712 {
1713         struct ccp_batch_info *b_info;
1714         uint32_t cur_head_offset;
1715
1716         if (qp->b_info != NULL) {
1717                 b_info = qp->b_info;
1718                 if (unlikely(b_info->op_idx > 0))
1719                         goto success;
1720         } else if (rte_ring_dequeue(qp->processed_pkts,
1721                                     (void **)&b_info))
1722                 return 0;
1723         cur_head_offset = CCP_READ_REG(b_info->cmd_q->reg_base,
1724                                        CMD_Q_HEAD_LO_BASE);
1725
1726         if (b_info->head_offset < b_info->tail_offset) {
1727                 if ((cur_head_offset >= b_info->head_offset) &&
1728                     (cur_head_offset < b_info->tail_offset)) {
1729                         qp->b_info = b_info;
1730                         return 0;
1731                 }
1732         } else {
1733                 if ((cur_head_offset >= b_info->head_offset) ||
1734                     (cur_head_offset < b_info->tail_offset)) {
1735                         qp->b_info = b_info;
1736                         return 0;
1737                 }
1738         }
1739
1740
1741 success:
1742         nb_ops = ccp_prepare_ops(op, b_info, nb_ops);
1743         rte_atomic64_add(&b_info->cmd_q->free_slots, b_info->desccnt);
1744         b_info->desccnt = 0;
1745         if (b_info->opcnt > 0) {
1746                 qp->b_info = b_info;
1747         } else {
1748                 rte_mempool_put(qp->batch_mp, (void *)b_info);
1749                 qp->b_info = NULL;
1750         }
1751
1752         return nb_ops;
1753 }