crypto/aesni_mb: support Chacha20-Poly1305
[dpdk.git] / drivers / crypto / aesni_mb / rte_aesni_mb_pmd.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2017 Intel Corporation
3  */
4
5 #include <intel-ipsec-mb.h>
6
7 #include <rte_common.h>
8 #include <rte_hexdump.h>
9 #include <rte_cryptodev.h>
10 #include <rte_cryptodev_pmd.h>
11 #include <rte_bus_vdev.h>
12 #include <rte_malloc.h>
13 #include <rte_cpuflags.h>
14 #include <rte_per_lcore.h>
15 #include <rte_ether.h>
16
17 #include "aesni_mb_pmd_private.h"
18
19 #define AES_CCM_DIGEST_MIN_LEN 4
20 #define AES_CCM_DIGEST_MAX_LEN 16
21 #define HMAC_MAX_BLOCK_SIZE 128
22 static uint8_t cryptodev_driver_id;
23
24 /*
25  * Needed to support CPU-CRYPTO API (rte_cryptodev_sym_cpu_crypto_process),
26  * as we still use JOB based API even for synchronous processing.
27  */
28 static RTE_DEFINE_PER_LCORE(MB_MGR *, sync_mb_mgr);
29
30 typedef void (*hash_one_block_t)(const void *data, void *digest);
31 typedef void (*aes_keyexp_t)(const void *key, void *enc_exp_keys, void *dec_exp_keys);
32
33 /**
34  * Calculate the authentication pre-computes
35  *
36  * @param one_block_hash        Function pointer to calculate digest on ipad/opad
37  * @param ipad                  Inner pad output byte array
38  * @param opad                  Outer pad output byte array
39  * @param hkey                  Authentication key
40  * @param hkey_len              Authentication key length
41  * @param blocksize             Block size of selected hash algo
42  */
43 static void
44 calculate_auth_precomputes(hash_one_block_t one_block_hash,
45                 uint8_t *ipad, uint8_t *opad,
46                 const uint8_t *hkey, uint16_t hkey_len,
47                 uint16_t blocksize)
48 {
49         unsigned i, length;
50
51         uint8_t ipad_buf[blocksize] __rte_aligned(16);
52         uint8_t opad_buf[blocksize] __rte_aligned(16);
53
54         /* Setup inner and outer pads */
55         memset(ipad_buf, HMAC_IPAD_VALUE, blocksize);
56         memset(opad_buf, HMAC_OPAD_VALUE, blocksize);
57
58         /* XOR hash key with inner and outer pads */
59         length = hkey_len > blocksize ? blocksize : hkey_len;
60
61         for (i = 0; i < length; i++) {
62                 ipad_buf[i] ^= hkey[i];
63                 opad_buf[i] ^= hkey[i];
64         }
65
66         /* Compute partial hashes */
67         (*one_block_hash)(ipad_buf, ipad);
68         (*one_block_hash)(opad_buf, opad);
69
70         /* Clean up stack */
71         memset(ipad_buf, 0, blocksize);
72         memset(opad_buf, 0, blocksize);
73 }
74
75 /** Get xform chain order */
76 static enum aesni_mb_operation
77 aesni_mb_get_chain_order(const struct rte_crypto_sym_xform *xform)
78 {
79         if (xform == NULL)
80                 return AESNI_MB_OP_NOT_SUPPORTED;
81
82         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
83                 if (xform->next == NULL)
84                         return AESNI_MB_OP_CIPHER_ONLY;
85                 if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
86                         return AESNI_MB_OP_CIPHER_HASH;
87         }
88
89         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
90                 if (xform->next == NULL)
91                         return AESNI_MB_OP_HASH_ONLY;
92                 if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
93                         return AESNI_MB_OP_HASH_CIPHER;
94         }
95 #if IMB_VERSION_NUM > IMB_VERSION(0, 52, 0)
96         if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
97                 if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
98                         /*
99                          * CCM requires to hash first and cipher later
100                          * when encrypting
101                          */
102                         if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM)
103                                 return AESNI_MB_OP_AEAD_HASH_CIPHER;
104                         else
105                                 return AESNI_MB_OP_AEAD_CIPHER_HASH;
106                 } else {
107                         if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM)
108                                 return AESNI_MB_OP_AEAD_CIPHER_HASH;
109                         else
110                                 return AESNI_MB_OP_AEAD_HASH_CIPHER;
111                 }
112         }
113 #else
114         if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
115                 if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM ||
116                                 xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM) {
117                         if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
118                                 return AESNI_MB_OP_AEAD_CIPHER_HASH;
119                         else
120                                 return AESNI_MB_OP_AEAD_HASH_CIPHER;
121                 }
122         }
123 #endif
124
125         return AESNI_MB_OP_NOT_SUPPORTED;
126 }
127
128 static inline int
129 is_aead_algo(JOB_HASH_ALG hash_alg, JOB_CIPHER_MODE cipher_mode)
130 {
131 #if IMB_VERSION(0, 54, 3) <= IMB_VERSION_NUM
132         return (hash_alg == IMB_AUTH_CHACHA20_POLY1305 || hash_alg == AES_CCM ||
133                 (hash_alg == AES_GMAC && cipher_mode == GCM));
134 #else
135         return ((hash_alg == AES_GMAC && cipher_mode == GCM) ||
136                 hash_alg == AES_CCM);
137 #endif
138 }
139
140 /** Set session authentication parameters */
141 static int
142 aesni_mb_set_session_auth_parameters(const MB_MGR *mb_mgr,
143                 struct aesni_mb_session *sess,
144                 const struct rte_crypto_sym_xform *xform)
145 {
146         hash_one_block_t hash_oneblock_fn = NULL;
147         unsigned int key_larger_block_size = 0;
148         uint8_t hashed_key[HMAC_MAX_BLOCK_SIZE] = { 0 };
149         uint32_t auth_precompute = 1;
150
151         if (xform == NULL) {
152                 sess->auth.algo = NULL_HASH;
153                 return 0;
154         }
155
156         if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
157                 AESNI_MB_LOG(ERR, "Crypto xform struct not of type auth");
158                 return -1;
159         }
160
161         /* Set IV parameters */
162         sess->auth_iv.offset = xform->auth.iv.offset;
163         sess->auth_iv.length = xform->auth.iv.length;
164
165         /* Set the request digest size */
166         sess->auth.req_digest_len = xform->auth.digest_length;
167
168         /* Select auth generate/verify */
169         sess->auth.operation = xform->auth.op;
170
171         /* Set Authentication Parameters */
172         if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_XCBC_MAC) {
173                 sess->auth.algo = AES_XCBC;
174
175                 uint16_t xcbc_mac_digest_len =
176                         get_truncated_digest_byte_length(AES_XCBC);
177                 if (sess->auth.req_digest_len != xcbc_mac_digest_len) {
178                         AESNI_MB_LOG(ERR, "Invalid digest size\n");
179                         return -EINVAL;
180                 }
181                 sess->auth.gen_digest_len = sess->auth.req_digest_len;
182
183                 IMB_AES_XCBC_KEYEXP(mb_mgr, xform->auth.key.data,
184                                 sess->auth.xcbc.k1_expanded,
185                                 sess->auth.xcbc.k2, sess->auth.xcbc.k3);
186                 return 0;
187         }
188
189         if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_CMAC) {
190                 uint32_t dust[4*15];
191
192                 sess->auth.algo = AES_CMAC;
193
194                 uint16_t cmac_digest_len = get_digest_byte_length(AES_CMAC);
195
196                 if (sess->auth.req_digest_len > cmac_digest_len) {
197                         AESNI_MB_LOG(ERR, "Invalid digest size\n");
198                         return -EINVAL;
199                 }
200                 /*
201                  * Multi-buffer lib supports digest sizes from 4 to 16 bytes
202                  * in version 0.50 and sizes of 12 and 16 bytes,
203                  * in version 0.49.
204                  * If size requested is different, generate the full digest
205                  * (16 bytes) in a temporary location and then memcpy
206                  * the requested number of bytes.
207                  */
208                 if (sess->auth.req_digest_len < 4)
209                         sess->auth.gen_digest_len = cmac_digest_len;
210                 else
211                         sess->auth.gen_digest_len = sess->auth.req_digest_len;
212
213                 IMB_AES_KEYEXP_128(mb_mgr, xform->auth.key.data,
214                                 sess->auth.cmac.expkey, dust);
215                 IMB_AES_CMAC_SUBKEY_GEN_128(mb_mgr, sess->auth.cmac.expkey,
216                                 sess->auth.cmac.skey1, sess->auth.cmac.skey2);
217                 return 0;
218         }
219
220         if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
221                 if (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) {
222                         sess->cipher.direction = ENCRYPT;
223                         sess->chain_order = CIPHER_HASH;
224                 } else
225                         sess->cipher.direction = DECRYPT;
226
227                 sess->auth.algo = AES_GMAC;
228                 if (sess->auth.req_digest_len > get_digest_byte_length(AES_GMAC)) {
229                         AESNI_MB_LOG(ERR, "Invalid digest size\n");
230                         return -EINVAL;
231                 }
232                 sess->auth.gen_digest_len = sess->auth.req_digest_len;
233                 sess->iv.length = xform->auth.iv.length;
234                 sess->iv.offset = xform->auth.iv.offset;
235
236                 switch (xform->auth.key.length) {
237                 case AES_128_BYTES:
238                         IMB_AES128_GCM_PRE(mb_mgr, xform->auth.key.data,
239                                 &sess->cipher.gcm_key);
240                         sess->cipher.key_length_in_bytes = AES_128_BYTES;
241                         break;
242                 case AES_192_BYTES:
243                         IMB_AES192_GCM_PRE(mb_mgr, xform->auth.key.data,
244                                 &sess->cipher.gcm_key);
245                         sess->cipher.key_length_in_bytes = AES_192_BYTES;
246                         break;
247                 case AES_256_BYTES:
248                         IMB_AES256_GCM_PRE(mb_mgr, xform->auth.key.data,
249                                 &sess->cipher.gcm_key);
250                         sess->cipher.key_length_in_bytes = AES_256_BYTES;
251                         break;
252                 default:
253                         RTE_LOG(ERR, PMD, "failed to parse test type\n");
254                         return -EINVAL;
255                 }
256
257                 return 0;
258         }
259
260 #if IMB_VERSION(0, 53, 3) <= IMB_VERSION_NUM
261         if (xform->auth.algo == RTE_CRYPTO_AUTH_ZUC_EIA3) {
262                 sess->auth.algo = IMB_AUTH_ZUC_EIA3_BITLEN;
263                 uint16_t zuc_eia3_digest_len =
264                         get_truncated_digest_byte_length(IMB_AUTH_ZUC_EIA3_BITLEN);
265                 if (sess->auth.req_digest_len != zuc_eia3_digest_len) {
266                         AESNI_MB_LOG(ERR, "Invalid digest size\n");
267                         return -EINVAL;
268                 }
269                 sess->auth.gen_digest_len = sess->auth.req_digest_len;
270
271                 memcpy(sess->auth.zuc_auth_key, xform->auth.key.data, 16);
272                 return 0;
273         } else if (xform->auth.algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
274                 sess->auth.algo = IMB_AUTH_SNOW3G_UIA2_BITLEN;
275                 uint16_t snow3g_uia2_digest_len =
276                         get_truncated_digest_byte_length(IMB_AUTH_SNOW3G_UIA2_BITLEN);
277                 if (sess->auth.req_digest_len != snow3g_uia2_digest_len) {
278                         AESNI_MB_LOG(ERR, "Invalid digest size\n");
279                         return -EINVAL;
280                 }
281                 sess->auth.gen_digest_len = sess->auth.req_digest_len;
282
283                 IMB_SNOW3G_INIT_KEY_SCHED(mb_mgr, xform->auth.key.data,
284                                         &sess->auth.pKeySched_snow3g_auth);
285                 return 0;
286         } else if (xform->auth.algo == RTE_CRYPTO_AUTH_KASUMI_F9) {
287                 sess->auth.algo = IMB_AUTH_KASUMI_UIA1;
288                 uint16_t kasumi_f9_digest_len =
289                         get_truncated_digest_byte_length(IMB_AUTH_KASUMI_UIA1);
290                 if (sess->auth.req_digest_len != kasumi_f9_digest_len) {
291                         AESNI_MB_LOG(ERR, "Invalid digest size\n");
292                         return -EINVAL;
293                 }
294                 sess->auth.gen_digest_len = sess->auth.req_digest_len;
295
296                 IMB_KASUMI_INIT_F9_KEY_SCHED(mb_mgr, xform->auth.key.data,
297                                         &sess->auth.pKeySched_kasumi_auth);
298                 return 0;
299         }
300 #endif
301
302         switch (xform->auth.algo) {
303         case RTE_CRYPTO_AUTH_MD5_HMAC:
304                 sess->auth.algo = MD5;
305                 hash_oneblock_fn = mb_mgr->md5_one_block;
306                 break;
307         case RTE_CRYPTO_AUTH_SHA1_HMAC:
308                 sess->auth.algo = SHA1;
309                 hash_oneblock_fn = mb_mgr->sha1_one_block;
310                 if (xform->auth.key.length > get_auth_algo_blocksize(SHA1)) {
311                         IMB_SHA1(mb_mgr,
312                                 xform->auth.key.data,
313                                 xform->auth.key.length,
314                                 hashed_key);
315                         key_larger_block_size = 1;
316                 }
317                 break;
318         case RTE_CRYPTO_AUTH_SHA1:
319                 sess->auth.algo = PLAIN_SHA1;
320                 auth_precompute = 0;
321                 break;
322         case RTE_CRYPTO_AUTH_SHA224_HMAC:
323                 sess->auth.algo = SHA_224;
324                 hash_oneblock_fn = mb_mgr->sha224_one_block;
325                 if (xform->auth.key.length > get_auth_algo_blocksize(SHA_224)) {
326                         IMB_SHA224(mb_mgr,
327                                 xform->auth.key.data,
328                                 xform->auth.key.length,
329                                 hashed_key);
330                         key_larger_block_size = 1;
331                 }
332                 break;
333         case RTE_CRYPTO_AUTH_SHA224:
334                 sess->auth.algo = PLAIN_SHA_224;
335                 auth_precompute = 0;
336                 break;
337         case RTE_CRYPTO_AUTH_SHA256_HMAC:
338                 sess->auth.algo = SHA_256;
339                 hash_oneblock_fn = mb_mgr->sha256_one_block;
340                 if (xform->auth.key.length > get_auth_algo_blocksize(SHA_256)) {
341                         IMB_SHA256(mb_mgr,
342                                 xform->auth.key.data,
343                                 xform->auth.key.length,
344                                 hashed_key);
345                         key_larger_block_size = 1;
346                 }
347                 break;
348         case RTE_CRYPTO_AUTH_SHA256:
349                 sess->auth.algo = PLAIN_SHA_256;
350                 auth_precompute = 0;
351                 break;
352         case RTE_CRYPTO_AUTH_SHA384_HMAC:
353                 sess->auth.algo = SHA_384;
354                 hash_oneblock_fn = mb_mgr->sha384_one_block;
355                 if (xform->auth.key.length > get_auth_algo_blocksize(SHA_384)) {
356                         IMB_SHA384(mb_mgr,
357                                 xform->auth.key.data,
358                                 xform->auth.key.length,
359                                 hashed_key);
360                         key_larger_block_size = 1;
361                 }
362                 break;
363         case RTE_CRYPTO_AUTH_SHA384:
364                 sess->auth.algo = PLAIN_SHA_384;
365                 auth_precompute = 0;
366                 break;
367         case RTE_CRYPTO_AUTH_SHA512_HMAC:
368                 sess->auth.algo = SHA_512;
369                 hash_oneblock_fn = mb_mgr->sha512_one_block;
370                 if (xform->auth.key.length > get_auth_algo_blocksize(SHA_512)) {
371                         IMB_SHA512(mb_mgr,
372                                 xform->auth.key.data,
373                                 xform->auth.key.length,
374                                 hashed_key);
375                         key_larger_block_size = 1;
376                 }
377                 break;
378         case RTE_CRYPTO_AUTH_SHA512:
379                 sess->auth.algo = PLAIN_SHA_512;
380                 auth_precompute = 0;
381                 break;
382         default:
383                 AESNI_MB_LOG(ERR, "Unsupported authentication algorithm selection");
384                 return -ENOTSUP;
385         }
386         uint16_t trunc_digest_size =
387                         get_truncated_digest_byte_length(sess->auth.algo);
388         uint16_t full_digest_size =
389                         get_digest_byte_length(sess->auth.algo);
390
391         if (sess->auth.req_digest_len > full_digest_size ||
392                         sess->auth.req_digest_len == 0) {
393                 AESNI_MB_LOG(ERR, "Invalid digest size\n");
394                 return -EINVAL;
395         }
396
397         if (sess->auth.req_digest_len != trunc_digest_size &&
398                         sess->auth.req_digest_len != full_digest_size)
399                 sess->auth.gen_digest_len = full_digest_size;
400         else
401                 sess->auth.gen_digest_len = sess->auth.req_digest_len;
402
403         /* Plain SHA does not require precompute key */
404         if (auth_precompute == 0)
405                 return 0;
406
407         /* Calculate Authentication precomputes */
408         if (key_larger_block_size) {
409                 calculate_auth_precomputes(hash_oneblock_fn,
410                         sess->auth.pads.inner, sess->auth.pads.outer,
411                         hashed_key,
412                         xform->auth.key.length,
413                         get_auth_algo_blocksize(sess->auth.algo));
414         } else {
415                 calculate_auth_precomputes(hash_oneblock_fn,
416                         sess->auth.pads.inner, sess->auth.pads.outer,
417                         xform->auth.key.data,
418                         xform->auth.key.length,
419                         get_auth_algo_blocksize(sess->auth.algo));
420         }
421
422         return 0;
423 }
424
425 /** Set session cipher parameters */
426 static int
427 aesni_mb_set_session_cipher_parameters(const MB_MGR *mb_mgr,
428                 struct aesni_mb_session *sess,
429                 const struct rte_crypto_sym_xform *xform)
430 {
431         uint8_t is_aes = 0;
432         uint8_t is_3DES = 0;
433         uint8_t is_docsis = 0;
434 #if IMB_VERSION(0, 53, 3) <= IMB_VERSION_NUM
435         uint8_t is_zuc = 0;
436         uint8_t is_snow3g = 0;
437         uint8_t is_kasumi = 0;
438 #endif
439
440         if (xform == NULL) {
441                 sess->cipher.mode = NULL_CIPHER;
442                 return 0;
443         }
444
445         if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
446                 AESNI_MB_LOG(ERR, "Crypto xform struct not of type cipher");
447                 return -EINVAL;
448         }
449
450         /* Select cipher direction */
451         switch (xform->cipher.op) {
452         case RTE_CRYPTO_CIPHER_OP_ENCRYPT:
453                 sess->cipher.direction = ENCRYPT;
454                 break;
455         case RTE_CRYPTO_CIPHER_OP_DECRYPT:
456                 sess->cipher.direction = DECRYPT;
457                 break;
458         default:
459                 AESNI_MB_LOG(ERR, "Invalid cipher operation parameter");
460                 return -EINVAL;
461         }
462
463         /* Select cipher mode */
464         switch (xform->cipher.algo) {
465         case RTE_CRYPTO_CIPHER_AES_CBC:
466                 sess->cipher.mode = CBC;
467                 is_aes = 1;
468                 break;
469         case RTE_CRYPTO_CIPHER_AES_CTR:
470                 sess->cipher.mode = CNTR;
471                 is_aes = 1;
472                 break;
473         case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
474                 sess->cipher.mode = DOCSIS_SEC_BPI;
475                 is_docsis = 1;
476                 break;
477         case RTE_CRYPTO_CIPHER_DES_CBC:
478                 sess->cipher.mode = DES;
479                 break;
480         case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
481                 sess->cipher.mode = DOCSIS_DES;
482                 break;
483         case RTE_CRYPTO_CIPHER_3DES_CBC:
484                 sess->cipher.mode = DES3;
485                 is_3DES = 1;
486                 break;
487 #if IMB_VERSION(0, 53, 0) <= IMB_VERSION_NUM
488         case RTE_CRYPTO_CIPHER_AES_ECB:
489                 sess->cipher.mode = ECB;
490                 is_aes = 1;
491                 break;
492 #endif
493 #if IMB_VERSION(0, 53, 3) <= IMB_VERSION_NUM
494         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
495                 sess->cipher.mode = IMB_CIPHER_ZUC_EEA3;
496                 is_zuc = 1;
497                 break;
498         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
499                 sess->cipher.mode = IMB_CIPHER_SNOW3G_UEA2_BITLEN;
500                 is_snow3g = 1;
501                 break;
502         case RTE_CRYPTO_CIPHER_KASUMI_F8:
503                 sess->cipher.mode = IMB_CIPHER_KASUMI_UEA1_BITLEN;
504                 is_kasumi = 1;
505                 break;
506 #endif
507         default:
508                 AESNI_MB_LOG(ERR, "Unsupported cipher mode parameter");
509                 return -ENOTSUP;
510         }
511
512         /* Set IV parameters */
513         sess->iv.offset = xform->cipher.iv.offset;
514         sess->iv.length = xform->cipher.iv.length;
515
516         /* Check key length and choose key expansion function for AES */
517         if (is_aes) {
518                 switch (xform->cipher.key.length) {
519                 case AES_128_BYTES:
520                         sess->cipher.key_length_in_bytes = AES_128_BYTES;
521                         IMB_AES_KEYEXP_128(mb_mgr, xform->cipher.key.data,
522                                         sess->cipher.expanded_aes_keys.encode,
523                                         sess->cipher.expanded_aes_keys.decode);
524                         break;
525                 case AES_192_BYTES:
526                         sess->cipher.key_length_in_bytes = AES_192_BYTES;
527                         IMB_AES_KEYEXP_192(mb_mgr, xform->cipher.key.data,
528                                         sess->cipher.expanded_aes_keys.encode,
529                                         sess->cipher.expanded_aes_keys.decode);
530                         break;
531                 case AES_256_BYTES:
532                         sess->cipher.key_length_in_bytes = AES_256_BYTES;
533                         IMB_AES_KEYEXP_256(mb_mgr, xform->cipher.key.data,
534                                         sess->cipher.expanded_aes_keys.encode,
535                                         sess->cipher.expanded_aes_keys.decode);
536                         break;
537                 default:
538                         AESNI_MB_LOG(ERR, "Invalid cipher key length");
539                         return -EINVAL;
540                 }
541         } else if (is_docsis) {
542                 switch (xform->cipher.key.length) {
543                 case AES_128_BYTES:
544                         sess->cipher.key_length_in_bytes = AES_128_BYTES;
545                         IMB_AES_KEYEXP_128(mb_mgr, xform->cipher.key.data,
546                                         sess->cipher.expanded_aes_keys.encode,
547                                         sess->cipher.expanded_aes_keys.decode);
548                         break;
549 #if IMB_VERSION(0, 53, 3) <= IMB_VERSION_NUM
550                 case AES_256_BYTES:
551                         sess->cipher.key_length_in_bytes = AES_256_BYTES;
552                         IMB_AES_KEYEXP_256(mb_mgr, xform->cipher.key.data,
553                                         sess->cipher.expanded_aes_keys.encode,
554                                         sess->cipher.expanded_aes_keys.decode);
555                         break;
556 #endif
557                 default:
558                         AESNI_MB_LOG(ERR, "Invalid cipher key length");
559                         return -EINVAL;
560                 }
561         } else if (is_3DES) {
562                 uint64_t *keys[3] = {sess->cipher.exp_3des_keys.key[0],
563                                 sess->cipher.exp_3des_keys.key[1],
564                                 sess->cipher.exp_3des_keys.key[2]};
565
566                 switch (xform->cipher.key.length) {
567                 case  24:
568                         IMB_DES_KEYSCHED(mb_mgr, keys[0],
569                                         xform->cipher.key.data);
570                         IMB_DES_KEYSCHED(mb_mgr, keys[1],
571                                         xform->cipher.key.data + 8);
572                         IMB_DES_KEYSCHED(mb_mgr, keys[2],
573                                         xform->cipher.key.data + 16);
574
575                         /* Initialize keys - 24 bytes: [K1-K2-K3] */
576                         sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
577                         sess->cipher.exp_3des_keys.ks_ptr[1] = keys[1];
578                         sess->cipher.exp_3des_keys.ks_ptr[2] = keys[2];
579                         break;
580                 case 16:
581                         IMB_DES_KEYSCHED(mb_mgr, keys[0],
582                                         xform->cipher.key.data);
583                         IMB_DES_KEYSCHED(mb_mgr, keys[1],
584                                         xform->cipher.key.data + 8);
585                         /* Initialize keys - 16 bytes: [K1=K1,K2=K2,K3=K1] */
586                         sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
587                         sess->cipher.exp_3des_keys.ks_ptr[1] = keys[1];
588                         sess->cipher.exp_3des_keys.ks_ptr[2] = keys[0];
589                         break;
590                 case 8:
591                         IMB_DES_KEYSCHED(mb_mgr, keys[0],
592                                         xform->cipher.key.data);
593
594                         /* Initialize keys - 8 bytes: [K1 = K2 = K3] */
595                         sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
596                         sess->cipher.exp_3des_keys.ks_ptr[1] = keys[0];
597                         sess->cipher.exp_3des_keys.ks_ptr[2] = keys[0];
598                         break;
599                 default:
600                         AESNI_MB_LOG(ERR, "Invalid cipher key length");
601                         return -EINVAL;
602                 }
603
604                 sess->cipher.key_length_in_bytes = 24;
605 #if IMB_VERSION(0, 53, 3) <= IMB_VERSION_NUM
606         } else if (is_zuc) {
607                 if (xform->cipher.key.length != 16) {
608                         AESNI_MB_LOG(ERR, "Invalid cipher key length");
609                         return -EINVAL;
610                 }
611                 sess->cipher.key_length_in_bytes = 16;
612                 memcpy(sess->cipher.zuc_cipher_key, xform->cipher.key.data,
613                         16);
614         } else if (is_snow3g) {
615                 if (xform->cipher.key.length != 16) {
616                         AESNI_MB_LOG(ERR, "Invalid cipher key length");
617                         return -EINVAL;
618                 }
619                 sess->cipher.key_length_in_bytes = 16;
620                 IMB_SNOW3G_INIT_KEY_SCHED(mb_mgr, xform->cipher.key.data,
621                                         &sess->cipher.pKeySched_snow3g_cipher);
622         } else if (is_kasumi) {
623                 if (xform->cipher.key.length != 16) {
624                         AESNI_MB_LOG(ERR, "Invalid cipher key length");
625                         return -EINVAL;
626                 }
627                 sess->cipher.key_length_in_bytes = 16;
628                 IMB_KASUMI_INIT_F8_KEY_SCHED(mb_mgr, xform->cipher.key.data,
629                                         &sess->cipher.pKeySched_kasumi_cipher);
630 #endif
631         } else {
632                 if (xform->cipher.key.length != 8) {
633                         AESNI_MB_LOG(ERR, "Invalid cipher key length");
634                         return -EINVAL;
635                 }
636                 sess->cipher.key_length_in_bytes = 8;
637
638                 IMB_DES_KEYSCHED(mb_mgr,
639                         (uint64_t *)sess->cipher.expanded_aes_keys.encode,
640                                 xform->cipher.key.data);
641                 IMB_DES_KEYSCHED(mb_mgr,
642                         (uint64_t *)sess->cipher.expanded_aes_keys.decode,
643                                 xform->cipher.key.data);
644         }
645
646         return 0;
647 }
648
649 static int
650 aesni_mb_set_session_aead_parameters(const MB_MGR *mb_mgr,
651                 struct aesni_mb_session *sess,
652                 const struct rte_crypto_sym_xform *xform)
653 {
654         switch (xform->aead.op) {
655         case RTE_CRYPTO_AEAD_OP_ENCRYPT:
656                 sess->cipher.direction = ENCRYPT;
657                 sess->auth.operation = RTE_CRYPTO_AUTH_OP_GENERATE;
658                 break;
659         case RTE_CRYPTO_AEAD_OP_DECRYPT:
660                 sess->cipher.direction = DECRYPT;
661                 sess->auth.operation = RTE_CRYPTO_AUTH_OP_VERIFY;
662                 break;
663         default:
664                 AESNI_MB_LOG(ERR, "Invalid aead operation parameter");
665                 return -EINVAL;
666         }
667
668         /* Set IV parameters */
669         sess->iv.offset = xform->aead.iv.offset;
670         sess->iv.length = xform->aead.iv.length;
671
672         /* Set digest sizes */
673         sess->auth.req_digest_len = xform->aead.digest_length;
674         sess->auth.gen_digest_len = sess->auth.req_digest_len;
675
676         switch (xform->aead.algo) {
677         case RTE_CRYPTO_AEAD_AES_CCM:
678                 sess->cipher.mode = CCM;
679                 sess->auth.algo = AES_CCM;
680
681                 /* Check key length and choose key expansion function for AES */
682                 switch (xform->aead.key.length) {
683                 case AES_128_BYTES:
684                         sess->cipher.key_length_in_bytes = AES_128_BYTES;
685                         IMB_AES_KEYEXP_128(mb_mgr, xform->aead.key.data,
686                                         sess->cipher.expanded_aes_keys.encode,
687                                         sess->cipher.expanded_aes_keys.decode);
688                         break;
689                 default:
690                         AESNI_MB_LOG(ERR, "Invalid cipher key length");
691                         return -EINVAL;
692                 }
693
694                 /* CCM digests must be between 4 and 16 and an even number */
695                 if (sess->auth.req_digest_len < AES_CCM_DIGEST_MIN_LEN ||
696                                 sess->auth.req_digest_len > AES_CCM_DIGEST_MAX_LEN ||
697                                 (sess->auth.req_digest_len & 1) == 1) {
698                         AESNI_MB_LOG(ERR, "Invalid digest size\n");
699                         return -EINVAL;
700                 }
701                 break;
702
703         case RTE_CRYPTO_AEAD_AES_GCM:
704                 sess->cipher.mode = GCM;
705                 sess->auth.algo = AES_GMAC;
706
707                 switch (xform->aead.key.length) {
708                 case AES_128_BYTES:
709                         sess->cipher.key_length_in_bytes = AES_128_BYTES;
710                         IMB_AES128_GCM_PRE(mb_mgr, xform->aead.key.data,
711                                 &sess->cipher.gcm_key);
712                         break;
713                 case AES_192_BYTES:
714                         sess->cipher.key_length_in_bytes = AES_192_BYTES;
715                         IMB_AES192_GCM_PRE(mb_mgr, xform->aead.key.data,
716                                 &sess->cipher.gcm_key);
717                         break;
718                 case AES_256_BYTES:
719                         sess->cipher.key_length_in_bytes = AES_256_BYTES;
720                         IMB_AES256_GCM_PRE(mb_mgr, xform->aead.key.data,
721                                 &sess->cipher.gcm_key);
722                         break;
723                 default:
724                         AESNI_MB_LOG(ERR, "Invalid cipher key length");
725                         return -EINVAL;
726                 }
727
728                 /* GCM digest size must be between 1 and 16 */
729                 if (sess->auth.req_digest_len == 0 ||
730                                 sess->auth.req_digest_len > 16) {
731                         AESNI_MB_LOG(ERR, "Invalid digest size\n");
732                         return -EINVAL;
733                 }
734                 break;
735
736 #if IMB_VERSION(0, 54, 3) <= IMB_VERSION_NUM
737         case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
738                 sess->cipher.mode = IMB_CIPHER_CHACHA20_POLY1305;
739                 sess->auth.algo = IMB_AUTH_CHACHA20_POLY1305;
740
741                 if (xform->aead.key.length != 32) {
742                         AESNI_MB_LOG(ERR, "Invalid key length");
743                         return -EINVAL;
744                 }
745                 sess->cipher.key_length_in_bytes = 32;
746                 memcpy(sess->cipher.expanded_aes_keys.encode,
747                         xform->aead.key.data, 32);
748                 if (sess->auth.req_digest_len != 16) {
749                         AESNI_MB_LOG(ERR, "Invalid digest size\n");
750                         return -EINVAL;
751                 }
752                 break;
753 #endif
754         default:
755                 AESNI_MB_LOG(ERR, "Unsupported aead mode parameter");
756                 return -ENOTSUP;
757         }
758
759         return 0;
760 }
761
762 /** Parse crypto xform chain and set private session parameters */
763 int
764 aesni_mb_set_session_parameters(const MB_MGR *mb_mgr,
765                 struct aesni_mb_session *sess,
766                 const struct rte_crypto_sym_xform *xform)
767 {
768         const struct rte_crypto_sym_xform *auth_xform = NULL;
769         const struct rte_crypto_sym_xform *cipher_xform = NULL;
770         const struct rte_crypto_sym_xform *aead_xform = NULL;
771         int ret;
772
773         /* Select Crypto operation - hash then cipher / cipher then hash */
774         switch (aesni_mb_get_chain_order(xform)) {
775         case AESNI_MB_OP_HASH_CIPHER:
776                 sess->chain_order = HASH_CIPHER;
777                 auth_xform = xform;
778                 cipher_xform = xform->next;
779                 break;
780         case AESNI_MB_OP_CIPHER_HASH:
781                 sess->chain_order = CIPHER_HASH;
782                 auth_xform = xform->next;
783                 cipher_xform = xform;
784                 break;
785         case AESNI_MB_OP_HASH_ONLY:
786                 sess->chain_order = HASH_CIPHER;
787                 auth_xform = xform;
788                 cipher_xform = NULL;
789                 break;
790         case AESNI_MB_OP_CIPHER_ONLY:
791                 /*
792                  * Multi buffer library operates only at two modes,
793                  * CIPHER_HASH and HASH_CIPHER. When doing ciphering only,
794                  * chain order depends on cipher operation: encryption is always
795                  * the first operation and decryption the last one.
796                  */
797                 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
798                         sess->chain_order = CIPHER_HASH;
799                 else
800                         sess->chain_order = HASH_CIPHER;
801                 auth_xform = NULL;
802                 cipher_xform = xform;
803                 break;
804         case AESNI_MB_OP_AEAD_CIPHER_HASH:
805                 sess->chain_order = CIPHER_HASH;
806                 sess->aead.aad_len = xform->aead.aad_length;
807                 aead_xform = xform;
808                 break;
809         case AESNI_MB_OP_AEAD_HASH_CIPHER:
810                 sess->chain_order = HASH_CIPHER;
811                 sess->aead.aad_len = xform->aead.aad_length;
812                 aead_xform = xform;
813                 break;
814         case AESNI_MB_OP_NOT_SUPPORTED:
815         default:
816                 AESNI_MB_LOG(ERR, "Unsupported operation chain order parameter");
817                 return -ENOTSUP;
818         }
819
820         /* Default IV length = 0 */
821         sess->iv.length = 0;
822         sess->auth_iv.length = 0;
823
824         ret = aesni_mb_set_session_auth_parameters(mb_mgr, sess, auth_xform);
825         if (ret != 0) {
826                 AESNI_MB_LOG(ERR, "Invalid/unsupported authentication parameters");
827                 return ret;
828         }
829
830         ret = aesni_mb_set_session_cipher_parameters(mb_mgr, sess,
831                         cipher_xform);
832         if (ret != 0) {
833                 AESNI_MB_LOG(ERR, "Invalid/unsupported cipher parameters");
834                 return ret;
835         }
836
837         if (aead_xform) {
838                 ret = aesni_mb_set_session_aead_parameters(mb_mgr, sess,
839                                 aead_xform);
840                 if (ret != 0) {
841                         AESNI_MB_LOG(ERR, "Invalid/unsupported aead parameters");
842                         return ret;
843                 }
844         }
845
846         return 0;
847 }
848
849 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
850 /** Check DOCSIS security session configuration is valid */
851 static int
852 check_docsis_sec_session(struct rte_security_session_conf *conf)
853 {
854         struct rte_crypto_sym_xform *crypto_sym = conf->crypto_xform;
855         struct rte_security_docsis_xform *docsis = &conf->docsis;
856
857         /* Downlink: CRC generate -> Cipher encrypt */
858         if (docsis->direction == RTE_SECURITY_DOCSIS_DOWNLINK) {
859
860                 if (crypto_sym != NULL &&
861                     crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
862                     crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
863                     crypto_sym->cipher.algo ==
864                                         RTE_CRYPTO_CIPHER_AES_DOCSISBPI &&
865                     (crypto_sym->cipher.key.length == IMB_KEY_AES_128_BYTES ||
866                      crypto_sym->cipher.key.length == IMB_KEY_AES_256_BYTES) &&
867                     crypto_sym->cipher.iv.length == AES_BLOCK_SIZE &&
868                     crypto_sym->next == NULL) {
869                         return 0;
870                 }
871         /* Uplink: Cipher decrypt -> CRC verify */
872         } else if (docsis->direction == RTE_SECURITY_DOCSIS_UPLINK) {
873
874                 if (crypto_sym != NULL &&
875                     crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
876                     crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
877                     crypto_sym->cipher.algo ==
878                                         RTE_CRYPTO_CIPHER_AES_DOCSISBPI &&
879                     (crypto_sym->cipher.key.length == IMB_KEY_AES_128_BYTES ||
880                      crypto_sym->cipher.key.length == IMB_KEY_AES_256_BYTES) &&
881                     crypto_sym->cipher.iv.length == AES_BLOCK_SIZE &&
882                     crypto_sym->next == NULL) {
883                         return 0;
884                 }
885         }
886
887         return -EINVAL;
888 }
889
890 /** Set DOCSIS security session auth (CRC) parameters */
891 static int
892 aesni_mb_set_docsis_sec_session_auth_parameters(struct aesni_mb_session *sess,
893                 struct rte_security_docsis_xform *xform)
894 {
895         if (xform == NULL) {
896                 AESNI_MB_LOG(ERR, "Invalid DOCSIS xform");
897                 return -EINVAL;
898         }
899
900         /* Select CRC generate/verify */
901         if (xform->direction == RTE_SECURITY_DOCSIS_UPLINK) {
902                 sess->auth.algo = IMB_AUTH_DOCSIS_CRC32;
903                 sess->auth.operation = RTE_CRYPTO_AUTH_OP_VERIFY;
904         } else if (xform->direction == RTE_SECURITY_DOCSIS_DOWNLINK) {
905                 sess->auth.algo = IMB_AUTH_DOCSIS_CRC32;
906                 sess->auth.operation = RTE_CRYPTO_AUTH_OP_GENERATE;
907         } else {
908                 AESNI_MB_LOG(ERR, "Unsupported DOCSIS direction");
909                 return -ENOTSUP;
910         }
911
912         sess->auth.req_digest_len = RTE_ETHER_CRC_LEN;
913         sess->auth.gen_digest_len = RTE_ETHER_CRC_LEN;
914
915         return 0;
916 }
917
918 /**
919  * Parse DOCSIS security session configuration and set private session
920  * parameters
921  */
922 int
923 aesni_mb_set_docsis_sec_session_parameters(
924                 __rte_unused struct rte_cryptodev *dev,
925                 struct rte_security_session_conf *conf,
926                 void *sess)
927 {
928         struct rte_security_docsis_xform *docsis_xform;
929         struct rte_crypto_sym_xform *cipher_xform;
930         struct aesni_mb_session *aesni_sess = sess;
931         struct aesni_mb_private *internals = dev->data->dev_private;
932         int ret;
933
934         ret = check_docsis_sec_session(conf);
935         if (ret) {
936                 AESNI_MB_LOG(ERR, "Unsupported DOCSIS security configuration");
937                 return ret;
938         }
939
940         switch (conf->docsis.direction) {
941         case RTE_SECURITY_DOCSIS_UPLINK:
942                 aesni_sess->chain_order = IMB_ORDER_CIPHER_HASH;
943                 docsis_xform = &conf->docsis;
944                 cipher_xform = conf->crypto_xform;
945                 break;
946         case RTE_SECURITY_DOCSIS_DOWNLINK:
947                 aesni_sess->chain_order = IMB_ORDER_HASH_CIPHER;
948                 cipher_xform = conf->crypto_xform;
949                 docsis_xform = &conf->docsis;
950                 break;
951         default:
952                 return -EINVAL;
953         }
954
955         /* Default IV length = 0 */
956         aesni_sess->iv.length = 0;
957
958         ret = aesni_mb_set_docsis_sec_session_auth_parameters(aesni_sess,
959                         docsis_xform);
960         if (ret != 0) {
961                 AESNI_MB_LOG(ERR, "Invalid/unsupported DOCSIS parameters");
962                 return -EINVAL;
963         }
964
965         ret = aesni_mb_set_session_cipher_parameters(internals->mb_mgr,
966                         aesni_sess, cipher_xform);
967
968         if (ret != 0) {
969                 AESNI_MB_LOG(ERR, "Invalid/unsupported cipher parameters");
970                 return -EINVAL;
971         }
972
973         return 0;
974 }
975 #endif
976
977 /**
978  * burst enqueue, place crypto operations on ingress queue for processing.
979  *
980  * @param __qp         Queue Pair to process
981  * @param ops          Crypto operations for processing
982  * @param nb_ops       Number of crypto operations for processing
983  *
984  * @return
985  * - Number of crypto operations enqueued
986  */
987 static uint16_t
988 aesni_mb_pmd_enqueue_burst(void *__qp, struct rte_crypto_op **ops,
989                 uint16_t nb_ops)
990 {
991         struct aesni_mb_qp *qp = __qp;
992
993         unsigned int nb_enqueued;
994
995         nb_enqueued = rte_ring_enqueue_burst(qp->ingress_queue,
996                         (void **)ops, nb_ops, NULL);
997
998         qp->stats.enqueued_count += nb_enqueued;
999
1000         return nb_enqueued;
1001 }
1002
1003 /** Get multi buffer session */
1004 static inline struct aesni_mb_session *
1005 get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op)
1006 {
1007         struct aesni_mb_session *sess = NULL;
1008
1009         if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
1010                 if (likely(op->sym->session != NULL))
1011                         sess = (struct aesni_mb_session *)
1012                                         get_sym_session_private_data(
1013                                         op->sym->session,
1014                                         cryptodev_driver_id);
1015 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
1016         } else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
1017                 if (likely(op->sym->sec_session != NULL))
1018                         sess = (struct aesni_mb_session *)
1019                                         get_sec_session_private_data(
1020                                                 op->sym->sec_session);
1021 #endif
1022         } else {
1023                 void *_sess = rte_cryptodev_sym_session_create(qp->sess_mp);
1024                 void *_sess_private_data = NULL;
1025
1026                 if (_sess == NULL)
1027                         return NULL;
1028
1029                 if (rte_mempool_get(qp->sess_mp_priv,
1030                                 (void **)&_sess_private_data))
1031                         return NULL;
1032
1033                 sess = (struct aesni_mb_session *)_sess_private_data;
1034
1035                 if (unlikely(aesni_mb_set_session_parameters(qp->mb_mgr,
1036                                 sess, op->sym->xform) != 0)) {
1037                         rte_mempool_put(qp->sess_mp, _sess);
1038                         rte_mempool_put(qp->sess_mp_priv, _sess_private_data);
1039                         sess = NULL;
1040                 }
1041                 op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
1042                 set_sym_session_private_data(op->sym->session,
1043                                 cryptodev_driver_id, _sess_private_data);
1044         }
1045
1046         if (unlikely(sess == NULL))
1047                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
1048
1049         return sess;
1050 }
1051
1052 static inline uint64_t
1053 auth_start_offset(struct rte_crypto_op *op, struct aesni_mb_session *session,
1054                 uint32_t oop)
1055 {
1056         struct rte_mbuf *m_src, *m_dst;
1057         uint8_t *p_src, *p_dst;
1058         uintptr_t u_src, u_dst;
1059         uint32_t cipher_end, auth_end;
1060
1061         /* Only cipher then hash needs special calculation. */
1062         if (!oop || session->chain_order != CIPHER_HASH)
1063                 return op->sym->auth.data.offset;
1064
1065         m_src = op->sym->m_src;
1066         m_dst = op->sym->m_dst;
1067
1068         p_src = rte_pktmbuf_mtod(m_src, uint8_t *);
1069         p_dst = rte_pktmbuf_mtod(m_dst, uint8_t *);
1070         u_src = (uintptr_t)p_src;
1071         u_dst = (uintptr_t)p_dst + op->sym->auth.data.offset;
1072
1073         /**
1074          * Copy the content between cipher offset and auth offset for generating
1075          * correct digest.
1076          */
1077         if (op->sym->cipher.data.offset > op->sym->auth.data.offset)
1078                 memcpy(p_dst + op->sym->auth.data.offset,
1079                                 p_src + op->sym->auth.data.offset,
1080                                 op->sym->cipher.data.offset -
1081                                 op->sym->auth.data.offset);
1082
1083         /**
1084          * Copy the content between (cipher offset + length) and (auth offset +
1085          * length) for generating correct digest
1086          */
1087         cipher_end = op->sym->cipher.data.offset + op->sym->cipher.data.length;
1088         auth_end = op->sym->auth.data.offset + op->sym->auth.data.length;
1089         if (cipher_end < auth_end)
1090                 memcpy(p_dst + cipher_end, p_src + cipher_end,
1091                                 auth_end - cipher_end);
1092
1093         /**
1094          * Since intel-ipsec-mb only supports positive values,
1095          * we need to deduct the correct offset between src and dst.
1096          */
1097
1098         return u_src < u_dst ? (u_dst - u_src) :
1099                         (UINT64_MAX - u_src + u_dst + 1);
1100 }
1101
1102 static inline void
1103 set_cpu_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_session *session,
1104                 union rte_crypto_sym_ofs sofs, void *buf, uint32_t len,
1105                 void *iv, void *aad, void *digest, void *udata)
1106 {
1107         /* Set crypto operation */
1108         job->chain_order = session->chain_order;
1109
1110         /* Set cipher parameters */
1111         job->cipher_direction = session->cipher.direction;
1112         job->cipher_mode = session->cipher.mode;
1113
1114         job->aes_key_len_in_bytes = session->cipher.key_length_in_bytes;
1115
1116         /* Set authentication parameters */
1117         job->hash_alg = session->auth.algo;
1118         job->iv = iv;
1119
1120         switch (job->hash_alg) {
1121         case AES_XCBC:
1122                 job->u.XCBC._k1_expanded = session->auth.xcbc.k1_expanded;
1123                 job->u.XCBC._k2 = session->auth.xcbc.k2;
1124                 job->u.XCBC._k3 = session->auth.xcbc.k3;
1125
1126                 job->aes_enc_key_expanded =
1127                                 session->cipher.expanded_aes_keys.encode;
1128                 job->aes_dec_key_expanded =
1129                                 session->cipher.expanded_aes_keys.decode;
1130                 break;
1131
1132         case AES_CCM:
1133                 job->u.CCM.aad = (uint8_t *)aad + 18;
1134                 job->u.CCM.aad_len_in_bytes = session->aead.aad_len;
1135                 job->aes_enc_key_expanded =
1136                                 session->cipher.expanded_aes_keys.encode;
1137                 job->aes_dec_key_expanded =
1138                                 session->cipher.expanded_aes_keys.decode;
1139                 job->iv++;
1140                 break;
1141
1142         case AES_CMAC:
1143                 job->u.CMAC._key_expanded = session->auth.cmac.expkey;
1144                 job->u.CMAC._skey1 = session->auth.cmac.skey1;
1145                 job->u.CMAC._skey2 = session->auth.cmac.skey2;
1146                 job->aes_enc_key_expanded =
1147                                 session->cipher.expanded_aes_keys.encode;
1148                 job->aes_dec_key_expanded =
1149                                 session->cipher.expanded_aes_keys.decode;
1150                 break;
1151
1152         case AES_GMAC:
1153                 if (session->cipher.mode == GCM) {
1154                         job->u.GCM.aad = aad;
1155                         job->u.GCM.aad_len_in_bytes = session->aead.aad_len;
1156                 } else {
1157                         /* For GMAC */
1158                         job->u.GCM.aad = buf;
1159                         job->u.GCM.aad_len_in_bytes = len;
1160                         job->cipher_mode = GCM;
1161                 }
1162                 job->aes_enc_key_expanded = &session->cipher.gcm_key;
1163                 job->aes_dec_key_expanded = &session->cipher.gcm_key;
1164                 break;
1165
1166         default:
1167                 job->u.HMAC._hashed_auth_key_xor_ipad =
1168                                 session->auth.pads.inner;
1169                 job->u.HMAC._hashed_auth_key_xor_opad =
1170                                 session->auth.pads.outer;
1171
1172                 if (job->cipher_mode == DES3) {
1173                         job->aes_enc_key_expanded =
1174                                 session->cipher.exp_3des_keys.ks_ptr;
1175                         job->aes_dec_key_expanded =
1176                                 session->cipher.exp_3des_keys.ks_ptr;
1177                 } else {
1178                         job->aes_enc_key_expanded =
1179                                 session->cipher.expanded_aes_keys.encode;
1180                         job->aes_dec_key_expanded =
1181                                 session->cipher.expanded_aes_keys.decode;
1182                 }
1183         }
1184
1185         /*
1186          * Multi-buffer library current only support returning a truncated
1187          * digest length as specified in the relevant IPsec RFCs
1188          */
1189
1190         /* Set digest location and length */
1191         job->auth_tag_output = digest;
1192         job->auth_tag_output_len_in_bytes = session->auth.gen_digest_len;
1193
1194         /* Set IV parameters */
1195         job->iv_len_in_bytes = session->iv.length;
1196
1197         /* Data Parameters */
1198         job->src = buf;
1199         job->dst = (uint8_t *)buf + sofs.ofs.cipher.head;
1200         job->cipher_start_src_offset_in_bytes = sofs.ofs.cipher.head;
1201         job->hash_start_src_offset_in_bytes = sofs.ofs.auth.head;
1202         if (job->hash_alg == AES_GMAC && session->cipher.mode != GCM) {
1203                 job->msg_len_to_hash_in_bytes = 0;
1204                 job->msg_len_to_cipher_in_bytes = 0;
1205         } else {
1206                 job->msg_len_to_hash_in_bytes = len - sofs.ofs.auth.head -
1207                         sofs.ofs.auth.tail;
1208                 job->msg_len_to_cipher_in_bytes = len - sofs.ofs.cipher.head -
1209                         sofs.ofs.cipher.tail;
1210         }
1211
1212         job->user_data = udata;
1213 }
1214
1215 /**
1216  * Process a crypto operation and complete a JOB_AES_HMAC job structure for
1217  * submission to the multi buffer library for processing.
1218  *
1219  * @param       qp      queue pair
1220  * @param       job     JOB_AES_HMAC structure to fill
1221  * @param       m       mbuf to process
1222  *
1223  * @return
1224  * - Completed JOB_AES_HMAC structure pointer on success
1225  * - NULL pointer if completion of JOB_AES_HMAC structure isn't possible
1226  */
1227 static inline int
1228 set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
1229                 struct rte_crypto_op *op, uint8_t *digest_idx)
1230 {
1231         struct rte_mbuf *m_src = op->sym->m_src, *m_dst;
1232         struct aesni_mb_session *session;
1233         uint32_t m_offset, oop;
1234
1235         session = get_session(qp, op);
1236         if (session == NULL) {
1237                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
1238                 return -1;
1239         }
1240
1241         /* Set crypto operation */
1242         job->chain_order = session->chain_order;
1243
1244         /* Set cipher parameters */
1245         job->cipher_direction = session->cipher.direction;
1246         job->cipher_mode = session->cipher.mode;
1247
1248         job->aes_key_len_in_bytes = session->cipher.key_length_in_bytes;
1249
1250         /* Set authentication parameters */
1251         job->hash_alg = session->auth.algo;
1252
1253         const int aead = is_aead_algo(job->hash_alg, job->cipher_mode);
1254
1255         switch (job->hash_alg) {
1256         case AES_XCBC:
1257                 job->u.XCBC._k1_expanded = session->auth.xcbc.k1_expanded;
1258                 job->u.XCBC._k2 = session->auth.xcbc.k2;
1259                 job->u.XCBC._k3 = session->auth.xcbc.k3;
1260
1261                 job->aes_enc_key_expanded =
1262                                 session->cipher.expanded_aes_keys.encode;
1263                 job->aes_dec_key_expanded =
1264                                 session->cipher.expanded_aes_keys.decode;
1265                 break;
1266
1267         case AES_CCM:
1268                 job->u.CCM.aad = op->sym->aead.aad.data + 18;
1269                 job->u.CCM.aad_len_in_bytes = session->aead.aad_len;
1270                 job->aes_enc_key_expanded =
1271                                 session->cipher.expanded_aes_keys.encode;
1272                 job->aes_dec_key_expanded =
1273                                 session->cipher.expanded_aes_keys.decode;
1274                 break;
1275
1276         case AES_CMAC:
1277                 job->u.CMAC._key_expanded = session->auth.cmac.expkey;
1278                 job->u.CMAC._skey1 = session->auth.cmac.skey1;
1279                 job->u.CMAC._skey2 = session->auth.cmac.skey2;
1280                 job->aes_enc_key_expanded =
1281                                 session->cipher.expanded_aes_keys.encode;
1282                 job->aes_dec_key_expanded =
1283                                 session->cipher.expanded_aes_keys.decode;
1284                 break;
1285
1286         case AES_GMAC:
1287                 if (session->cipher.mode == GCM) {
1288                         job->u.GCM.aad = op->sym->aead.aad.data;
1289                         job->u.GCM.aad_len_in_bytes = session->aead.aad_len;
1290                 } else {
1291                         /* For GMAC */
1292                         job->u.GCM.aad = rte_pktmbuf_mtod_offset(m_src,
1293                                         uint8_t *, op->sym->auth.data.offset);
1294                         job->u.GCM.aad_len_in_bytes = op->sym->auth.data.length;
1295                         job->cipher_mode = GCM;
1296                 }
1297                 job->aes_enc_key_expanded = &session->cipher.gcm_key;
1298                 job->aes_dec_key_expanded = &session->cipher.gcm_key;
1299                 break;
1300 #if IMB_VERSION(0, 53, 3) <= IMB_VERSION_NUM
1301         case IMB_AUTH_ZUC_EIA3_BITLEN:
1302                 job->u.ZUC_EIA3._key = session->auth.zuc_auth_key;
1303                 job->u.ZUC_EIA3._iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1304                                                 session->auth_iv.offset);
1305                 break;
1306         case IMB_AUTH_SNOW3G_UIA2_BITLEN:
1307                 job->u.SNOW3G_UIA2._key = (void *) &session->auth.pKeySched_snow3g_auth;
1308                 job->u.SNOW3G_UIA2._iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1309                                                 session->auth_iv.offset);
1310                 break;
1311         case IMB_AUTH_KASUMI_UIA1:
1312                 job->u.KASUMI_UIA1._key = (void *) &session->auth.pKeySched_kasumi_auth;
1313                 break;
1314 #endif
1315 #if IMB_VERSION(0, 54, 3) <= IMB_VERSION_NUM
1316         case IMB_AUTH_CHACHA20_POLY1305:
1317                 job->u.CHACHA20_POLY1305.aad = op->sym->aead.aad.data;
1318                 job->u.CHACHA20_POLY1305.aad_len_in_bytes = session->aead.aad_len;
1319                 job->aes_enc_key_expanded = session->cipher.expanded_aes_keys.encode;
1320                 job->aes_dec_key_expanded = session->cipher.expanded_aes_keys.encode;
1321                 break;
1322 #endif
1323         default:
1324                 job->u.HMAC._hashed_auth_key_xor_ipad = session->auth.pads.inner;
1325                 job->u.HMAC._hashed_auth_key_xor_opad = session->auth.pads.outer;
1326
1327                 if (job->cipher_mode == DES3) {
1328                         job->aes_enc_key_expanded =
1329                                 session->cipher.exp_3des_keys.ks_ptr;
1330                         job->aes_dec_key_expanded =
1331                                 session->cipher.exp_3des_keys.ks_ptr;
1332                 } else {
1333                         job->aes_enc_key_expanded =
1334                                 session->cipher.expanded_aes_keys.encode;
1335                         job->aes_dec_key_expanded =
1336                                 session->cipher.expanded_aes_keys.decode;
1337                 }
1338         }
1339
1340         if (aead)
1341                 m_offset = op->sym->aead.data.offset;
1342         else
1343                 m_offset = op->sym->cipher.data.offset;
1344
1345 #if IMB_VERSION(0, 53, 3) <= IMB_VERSION_NUM
1346         if (job->cipher_mode == IMB_CIPHER_ZUC_EEA3) {
1347                 job->aes_enc_key_expanded = session->cipher.zuc_cipher_key;
1348                 job->aes_dec_key_expanded = session->cipher.zuc_cipher_key;
1349         } else if (job->cipher_mode == IMB_CIPHER_SNOW3G_UEA2_BITLEN) {
1350                 job->enc_keys = &session->cipher.pKeySched_snow3g_cipher;
1351                 m_offset = 0;
1352         } else if (job->cipher_mode == IMB_CIPHER_KASUMI_UEA1_BITLEN) {
1353                 job->enc_keys = &session->cipher.pKeySched_kasumi_cipher;
1354                 m_offset = 0;
1355         }
1356 #endif
1357
1358         if (!op->sym->m_dst) {
1359                 /* in-place operation */
1360                 m_dst = m_src;
1361                 oop = 0;
1362         } else if (op->sym->m_dst == op->sym->m_src) {
1363                 /* in-place operation */
1364                 m_dst = m_src;
1365                 oop = 0;
1366         } else {
1367                 /* out-of-place operation */
1368                 m_dst = op->sym->m_dst;
1369                 oop = 1;
1370         }
1371
1372         /* Set digest output location */
1373         if (job->hash_alg != NULL_HASH &&
1374                         session->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
1375                 job->auth_tag_output = qp->temp_digests[*digest_idx];
1376                 *digest_idx = (*digest_idx + 1) % MAX_JOBS;
1377         } else {
1378                 if (aead)
1379                         job->auth_tag_output = op->sym->aead.digest.data;
1380                 else
1381                         job->auth_tag_output = op->sym->auth.digest.data;
1382
1383                 if (session->auth.req_digest_len != session->auth.gen_digest_len) {
1384                         job->auth_tag_output = qp->temp_digests[*digest_idx];
1385                         *digest_idx = (*digest_idx + 1) % MAX_JOBS;
1386                 }
1387         }
1388         /*
1389          * Multi-buffer library current only support returning a truncated
1390          * digest length as specified in the relevant IPsec RFCs
1391          */
1392
1393         /* Set digest length */
1394         job->auth_tag_output_len_in_bytes = session->auth.gen_digest_len;
1395
1396         /* Set IV parameters */
1397         job->iv_len_in_bytes = session->iv.length;
1398
1399         /* Data Parameters */
1400         job->src = rte_pktmbuf_mtod(m_src, uint8_t *);
1401         job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, m_offset);
1402
1403         switch (job->hash_alg) {
1404         case AES_CCM:
1405                 job->cipher_start_src_offset_in_bytes =
1406                                 op->sym->aead.data.offset;
1407                 job->msg_len_to_cipher_in_bytes = op->sym->aead.data.length;
1408                 job->hash_start_src_offset_in_bytes = op->sym->aead.data.offset;
1409                 job->msg_len_to_hash_in_bytes = op->sym->aead.data.length;
1410
1411                 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1412                         session->iv.offset + 1);
1413                 break;
1414
1415         case AES_GMAC:
1416                 if (session->cipher.mode == GCM) {
1417                         job->cipher_start_src_offset_in_bytes =
1418                                         op->sym->aead.data.offset;
1419                         job->hash_start_src_offset_in_bytes =
1420                                         op->sym->aead.data.offset;
1421                         job->msg_len_to_cipher_in_bytes =
1422                                         op->sym->aead.data.length;
1423                         job->msg_len_to_hash_in_bytes =
1424                                         op->sym->aead.data.length;
1425                 } else {
1426                         job->cipher_start_src_offset_in_bytes =
1427                                         op->sym->auth.data.offset;
1428                         job->hash_start_src_offset_in_bytes =
1429                                         op->sym->auth.data.offset;
1430                         job->msg_len_to_cipher_in_bytes = 0;
1431                         job->msg_len_to_hash_in_bytes = 0;
1432                 }
1433
1434                 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1435                                 session->iv.offset);
1436                 break;
1437
1438 #if IMB_VERSION(0, 54, 3) <= IMB_VERSION_NUM
1439         case IMB_AUTH_CHACHA20_POLY1305:
1440                 job->cipher_start_src_offset_in_bytes = op->sym->aead.data.offset;
1441                 job->hash_start_src_offset_in_bytes = op->sym->aead.data.offset;
1442                 job->msg_len_to_cipher_in_bytes =
1443                                 op->sym->aead.data.length;
1444                 job->msg_len_to_hash_in_bytes =
1445                                         op->sym->aead.data.length;
1446
1447                 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1448                                 session->iv.offset);
1449                 break;
1450 #endif
1451         default:
1452                 /* For SNOW3G, length and offsets are already in bits */
1453                 job->cipher_start_src_offset_in_bytes =
1454                                 op->sym->cipher.data.offset;
1455                 job->msg_len_to_cipher_in_bytes = op->sym->cipher.data.length;
1456
1457                 job->hash_start_src_offset_in_bytes = auth_start_offset(op,
1458                                 session, oop);
1459                 job->msg_len_to_hash_in_bytes = op->sym->auth.data.length;
1460
1461                 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1462                         session->iv.offset);
1463         }
1464
1465 #if IMB_VERSION(0, 53, 3) <= IMB_VERSION_NUM
1466         if (job->cipher_mode == IMB_CIPHER_ZUC_EEA3)
1467                 job->msg_len_to_cipher_in_bytes >>= 3;
1468         else if (job->hash_alg == IMB_AUTH_KASUMI_UIA1)
1469                 job->msg_len_to_hash_in_bytes >>= 3;
1470 #endif
1471
1472         /* Set user data to be crypto operation data struct */
1473         job->user_data = op;
1474
1475         return 0;
1476 }
1477
1478 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
1479 /**
1480  * Process a crypto operation containing a security op and complete a
1481  * JOB_AES_HMAC job structure for submission to the multi buffer library for
1482  * processing.
1483  */
1484 static inline int
1485 set_sec_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
1486                 struct rte_crypto_op *op, uint8_t *digest_idx)
1487 {
1488         struct rte_mbuf *m_src, *m_dst;
1489         struct rte_crypto_sym_op *sym;
1490         struct aesni_mb_session *session;
1491
1492         session = get_session(qp, op);
1493         if (unlikely(session == NULL)) {
1494                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
1495                 return -1;
1496         }
1497
1498         /* Only DOCSIS protocol operations supported now */
1499         if (session->cipher.mode != IMB_CIPHER_DOCSIS_SEC_BPI ||
1500                         session->auth.algo != IMB_AUTH_DOCSIS_CRC32) {
1501                 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
1502                 return -1;
1503         }
1504
1505         sym = op->sym;
1506         m_src = sym->m_src;
1507
1508         if (likely(sym->m_dst == NULL || sym->m_dst == m_src)) {
1509                 /* in-place operation */
1510                 m_dst = m_src;
1511         } else {
1512                 /* out-of-place operation not supported */
1513                 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
1514                 return -ENOTSUP;
1515         }
1516
1517         /* Set crypto operation */
1518         job->chain_order = session->chain_order;
1519
1520         /* Set cipher parameters */
1521         job->cipher_direction = session->cipher.direction;
1522         job->cipher_mode = session->cipher.mode;
1523
1524         job->aes_key_len_in_bytes = session->cipher.key_length_in_bytes;
1525         job->aes_enc_key_expanded = session->cipher.expanded_aes_keys.encode;
1526         job->aes_dec_key_expanded = session->cipher.expanded_aes_keys.decode;
1527
1528         /* Set IV parameters */
1529         job->iv_len_in_bytes = session->iv.length;
1530         job->iv = (uint8_t *)op + session->iv.offset;
1531
1532         /* Set authentication parameters */
1533         job->hash_alg = session->auth.algo;
1534
1535         /* Set digest output location */
1536         job->auth_tag_output = qp->temp_digests[*digest_idx];
1537         *digest_idx = (*digest_idx + 1) % MAX_JOBS;
1538
1539         /* Set digest length */
1540         job->auth_tag_output_len_in_bytes = session->auth.gen_digest_len;
1541
1542         /* Set data parameters */
1543         job->src = rte_pktmbuf_mtod(m_src, uint8_t *);
1544         job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *,
1545                                                 sym->cipher.data.offset);
1546
1547         job->cipher_start_src_offset_in_bytes = sym->cipher.data.offset;
1548         job->msg_len_to_cipher_in_bytes = sym->cipher.data.length;
1549
1550         job->hash_start_src_offset_in_bytes = sym->auth.data.offset;
1551         job->msg_len_to_hash_in_bytes = sym->auth.data.length;
1552
1553         job->user_data = op;
1554
1555         return 0;
1556 }
1557
1558 static inline void
1559 verify_docsis_sec_crc(JOB_AES_HMAC *job, uint8_t *status)
1560 {
1561         uint16_t crc_offset;
1562         uint8_t *crc;
1563
1564         if (!job->msg_len_to_hash_in_bytes)
1565                 return;
1566
1567         crc_offset = job->hash_start_src_offset_in_bytes +
1568                         job->msg_len_to_hash_in_bytes -
1569                         job->cipher_start_src_offset_in_bytes;
1570         crc = job->dst + crc_offset;
1571
1572         /* Verify CRC (at the end of the message) */
1573         if (memcmp(job->auth_tag_output, crc, RTE_ETHER_CRC_LEN) != 0)
1574                 *status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
1575 }
1576 #endif
1577
1578 static inline void
1579 verify_digest(JOB_AES_HMAC *job, void *digest, uint16_t len, uint8_t *status)
1580 {
1581         /* Verify digest if required */
1582         if (memcmp(job->auth_tag_output, digest, len) != 0)
1583                 *status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
1584 }
1585
1586 static inline void
1587 generate_digest(JOB_AES_HMAC *job, struct rte_crypto_op *op,
1588                 struct aesni_mb_session *sess)
1589 {
1590         /* No extra copy needed */
1591         if (likely(sess->auth.req_digest_len == sess->auth.gen_digest_len))
1592                 return;
1593
1594         /*
1595          * This can only happen for HMAC, so only digest
1596          * for authentication algos is required
1597          */
1598         memcpy(op->sym->auth.digest.data, job->auth_tag_output,
1599                         sess->auth.req_digest_len);
1600 }
1601
1602 /**
1603  * Process a completed job and return rte_mbuf which job processed
1604  *
1605  * @param qp            Queue Pair to process
1606  * @param job   JOB_AES_HMAC job to process
1607  *
1608  * @return
1609  * - Returns processed crypto operation.
1610  * - Returns NULL on invalid job
1611  */
1612 static inline struct rte_crypto_op *
1613 post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
1614 {
1615         struct rte_crypto_op *op = (struct rte_crypto_op *)job->user_data;
1616         struct aesni_mb_session *sess = NULL;
1617
1618 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
1619         uint8_t is_docsis_sec = 0;
1620
1621         if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
1622                 /*
1623                  * Assuming at this point that if it's a security type op, that
1624                  * this is for DOCSIS
1625                  */
1626                 is_docsis_sec = 1;
1627                 sess = get_sec_session_private_data(op->sym->sec_session);
1628         } else
1629 #endif
1630         {
1631                 sess = get_sym_session_private_data(op->sym->session,
1632                                                 cryptodev_driver_id);
1633         }
1634
1635         if (unlikely(sess == NULL)) {
1636                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
1637                 return op;
1638         }
1639
1640         if (likely(op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)) {
1641                 switch (job->status) {
1642                 case STS_COMPLETED:
1643                         op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1644
1645                         if (job->hash_alg == NULL_HASH)
1646                                 break;
1647
1648                         if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
1649                                 if (is_aead_algo(job->hash_alg, sess->cipher.mode))
1650                                         verify_digest(job,
1651                                                 op->sym->aead.digest.data,
1652                                                 sess->auth.req_digest_len,
1653                                                 &op->status);
1654 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
1655                                 else if (is_docsis_sec)
1656                                         verify_docsis_sec_crc(job,
1657                                                 &op->status);
1658 #endif
1659                                 else
1660                                         verify_digest(job,
1661                                                 op->sym->auth.digest.data,
1662                                                 sess->auth.req_digest_len,
1663                                                 &op->status);
1664                         } else
1665                                 generate_digest(job, op, sess);
1666                         break;
1667                 default:
1668                         op->status = RTE_CRYPTO_OP_STATUS_ERROR;
1669                 }
1670         }
1671
1672         /* Free session if a session-less crypto op */
1673         if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
1674                 memset(sess, 0, sizeof(struct aesni_mb_session));
1675                 memset(op->sym->session, 0,
1676                         rte_cryptodev_sym_get_existing_header_session_size(
1677                                 op->sym->session));
1678                 rte_mempool_put(qp->sess_mp_priv, sess);
1679                 rte_mempool_put(qp->sess_mp, op->sym->session);
1680                 op->sym->session = NULL;
1681         }
1682
1683         return op;
1684 }
1685
1686 static inline void
1687 post_process_mb_sync_job(JOB_AES_HMAC *job)
1688 {
1689         uint32_t *st;
1690
1691         st = job->user_data;
1692         st[0] = (job->status == STS_COMPLETED) ? 0 : EBADMSG;
1693 }
1694
1695 /**
1696  * Process a completed JOB_AES_HMAC job and keep processing jobs until
1697  * get_completed_job return NULL
1698  *
1699  * @param qp            Queue Pair to process
1700  * @param job           JOB_AES_HMAC job
1701  *
1702  * @return
1703  * - Number of processed jobs
1704  */
1705 static unsigned
1706 handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job,
1707                 struct rte_crypto_op **ops, uint16_t nb_ops)
1708 {
1709         struct rte_crypto_op *op = NULL;
1710         unsigned processed_jobs = 0;
1711
1712         while (job != NULL) {
1713                 op = post_process_mb_job(qp, job);
1714
1715                 if (op) {
1716                         ops[processed_jobs++] = op;
1717                         qp->stats.dequeued_count++;
1718                 } else {
1719                         qp->stats.dequeue_err_count++;
1720                         break;
1721                 }
1722                 if (processed_jobs == nb_ops)
1723                         break;
1724
1725                 job = IMB_GET_COMPLETED_JOB(qp->mb_mgr);
1726         }
1727
1728         return processed_jobs;
1729 }
1730
1731 static inline uint32_t
1732 handle_completed_sync_jobs(JOB_AES_HMAC *job, MB_MGR *mb_mgr)
1733 {
1734         uint32_t i;
1735
1736         for (i = 0; job != NULL; i++, job = IMB_GET_COMPLETED_JOB(mb_mgr))
1737                 post_process_mb_sync_job(job);
1738
1739         return i;
1740 }
1741
1742 static inline uint32_t
1743 flush_mb_sync_mgr(MB_MGR *mb_mgr)
1744 {
1745         JOB_AES_HMAC *job;
1746
1747         job = IMB_FLUSH_JOB(mb_mgr);
1748         return handle_completed_sync_jobs(job, mb_mgr);
1749 }
1750
1751 static inline uint16_t
1752 flush_mb_mgr(struct aesni_mb_qp *qp, struct rte_crypto_op **ops,
1753                 uint16_t nb_ops)
1754 {
1755         int processed_ops = 0;
1756
1757         /* Flush the remaining jobs */
1758         JOB_AES_HMAC *job = IMB_FLUSH_JOB(qp->mb_mgr);
1759
1760         if (job)
1761                 processed_ops += handle_completed_jobs(qp, job,
1762                                 &ops[processed_ops], nb_ops - processed_ops);
1763
1764         return processed_ops;
1765 }
1766
1767 static inline JOB_AES_HMAC *
1768 set_job_null_op(JOB_AES_HMAC *job, struct rte_crypto_op *op)
1769 {
1770         job->chain_order = HASH_CIPHER;
1771         job->cipher_mode = NULL_CIPHER;
1772         job->hash_alg = NULL_HASH;
1773         job->cipher_direction = DECRYPT;
1774
1775         /* Set user data to be crypto operation data struct */
1776         job->user_data = op;
1777
1778         return job;
1779 }
1780
1781 static uint16_t
1782 aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
1783                 uint16_t nb_ops)
1784 {
1785         struct aesni_mb_qp *qp = queue_pair;
1786
1787         struct rte_crypto_op *op;
1788         JOB_AES_HMAC *job;
1789
1790         int retval, processed_jobs = 0;
1791
1792         if (unlikely(nb_ops == 0))
1793                 return 0;
1794
1795         uint8_t digest_idx = qp->digest_idx;
1796         do {
1797                 /* Get next free mb job struct from mb manager */
1798                 job = IMB_GET_NEXT_JOB(qp->mb_mgr);
1799                 if (unlikely(job == NULL)) {
1800                         /* if no free mb job structs we need to flush mb_mgr */
1801                         processed_jobs += flush_mb_mgr(qp,
1802                                         &ops[processed_jobs],
1803                                         nb_ops - processed_jobs);
1804
1805                         if (nb_ops == processed_jobs)
1806                                 break;
1807
1808                         job = IMB_GET_NEXT_JOB(qp->mb_mgr);
1809                 }
1810
1811                 /*
1812                  * Get next operation to process from ingress queue.
1813                  * There is no need to return the job to the MB_MGR
1814                  * if there are no more operations to process, since the MB_MGR
1815                  * can use that pointer again in next get_next calls.
1816                  */
1817                 retval = rte_ring_dequeue(qp->ingress_queue, (void **)&op);
1818                 if (retval < 0)
1819                         break;
1820
1821 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
1822                 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
1823                         retval = set_sec_mb_job_params(job, qp, op,
1824                                                 &digest_idx);
1825                 else
1826 #endif
1827                         retval = set_mb_job_params(job, qp, op, &digest_idx);
1828
1829                 if (unlikely(retval != 0)) {
1830                         qp->stats.dequeue_err_count++;
1831                         set_job_null_op(job, op);
1832                 }
1833
1834                 /* Submit job to multi-buffer for processing */
1835 #ifdef RTE_LIBRTE_PMD_AESNI_MB_DEBUG
1836                 job = IMB_SUBMIT_JOB(qp->mb_mgr);
1837 #else
1838                 job = IMB_SUBMIT_JOB_NOCHECK(qp->mb_mgr);
1839 #endif
1840                 /*
1841                  * If submit returns a processed job then handle it,
1842                  * before submitting subsequent jobs
1843                  */
1844                 if (job)
1845                         processed_jobs += handle_completed_jobs(qp, job,
1846                                         &ops[processed_jobs],
1847                                         nb_ops - processed_jobs);
1848
1849         } while (processed_jobs < nb_ops);
1850
1851         qp->digest_idx = digest_idx;
1852
1853         if (processed_jobs < 1)
1854                 processed_jobs += flush_mb_mgr(qp,
1855                                 &ops[processed_jobs],
1856                                 nb_ops - processed_jobs);
1857
1858         return processed_jobs;
1859 }
1860
1861 static MB_MGR *
1862 alloc_init_mb_mgr(enum aesni_mb_vector_mode vector_mode)
1863 {
1864         MB_MGR *mb_mgr = alloc_mb_mgr(0);
1865         if (mb_mgr == NULL)
1866                 return NULL;
1867
1868         switch (vector_mode) {
1869         case RTE_AESNI_MB_SSE:
1870                 init_mb_mgr_sse(mb_mgr);
1871                 break;
1872         case RTE_AESNI_MB_AVX:
1873                 init_mb_mgr_avx(mb_mgr);
1874                 break;
1875         case RTE_AESNI_MB_AVX2:
1876                 init_mb_mgr_avx2(mb_mgr);
1877                 break;
1878         case RTE_AESNI_MB_AVX512:
1879                 init_mb_mgr_avx512(mb_mgr);
1880                 break;
1881         default:
1882                 AESNI_MB_LOG(ERR, "Unsupported vector mode %u\n", vector_mode);
1883                 free_mb_mgr(mb_mgr);
1884                 return NULL;
1885         }
1886
1887         return mb_mgr;
1888 }
1889
1890 static inline void
1891 aesni_mb_fill_error_code(struct rte_crypto_sym_vec *vec, int32_t err)
1892 {
1893         uint32_t i;
1894
1895         for (i = 0; i != vec->num; ++i)
1896                 vec->status[i] = err;
1897 }
1898
1899 static inline int
1900 check_crypto_sgl(union rte_crypto_sym_ofs so, const struct rte_crypto_sgl *sgl)
1901 {
1902         /* no multi-seg support with current AESNI-MB PMD */
1903         if (sgl->num != 1)
1904                 return ENOTSUP;
1905         else if (so.ofs.cipher.head + so.ofs.cipher.tail > sgl->vec[0].len)
1906                 return EINVAL;
1907         return 0;
1908 }
1909
1910 static inline JOB_AES_HMAC *
1911 submit_sync_job(MB_MGR *mb_mgr)
1912 {
1913 #ifdef RTE_LIBRTE_PMD_AESNI_MB_DEBUG
1914         return IMB_SUBMIT_JOB(mb_mgr);
1915 #else
1916         return IMB_SUBMIT_JOB_NOCHECK(mb_mgr);
1917 #endif
1918 }
1919
1920 static inline uint32_t
1921 generate_sync_dgst(struct rte_crypto_sym_vec *vec,
1922         const uint8_t dgst[][DIGEST_LENGTH_MAX], uint32_t len)
1923 {
1924         uint32_t i, k;
1925
1926         for (i = 0, k = 0; i != vec->num; i++) {
1927                 if (vec->status[i] == 0) {
1928                         memcpy(vec->digest[i], dgst[i], len);
1929                         k++;
1930                 }
1931         }
1932
1933         return k;
1934 }
1935
1936 static inline uint32_t
1937 verify_sync_dgst(struct rte_crypto_sym_vec *vec,
1938         const uint8_t dgst[][DIGEST_LENGTH_MAX], uint32_t len)
1939 {
1940         uint32_t i, k;
1941
1942         for (i = 0, k = 0; i != vec->num; i++) {
1943                 if (vec->status[i] == 0) {
1944                         if (memcmp(vec->digest[i], dgst[i], len) != 0)
1945                                 vec->status[i] = EBADMSG;
1946                         else
1947                                 k++;
1948                 }
1949         }
1950
1951         return k;
1952 }
1953
1954 uint32_t
1955 aesni_mb_cpu_crypto_process_bulk(struct rte_cryptodev *dev,
1956         struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs sofs,
1957         struct rte_crypto_sym_vec *vec)
1958 {
1959         int32_t ret;
1960         uint32_t i, j, k, len;
1961         void *buf;
1962         JOB_AES_HMAC *job;
1963         MB_MGR *mb_mgr;
1964         struct aesni_mb_private *priv;
1965         struct aesni_mb_session *s;
1966         uint8_t tmp_dgst[vec->num][DIGEST_LENGTH_MAX];
1967
1968         s = get_sym_session_private_data(sess, dev->driver_id);
1969         if (s == NULL) {
1970                 aesni_mb_fill_error_code(vec, EINVAL);
1971                 return 0;
1972         }
1973
1974         /* get per-thread MB MGR, create one if needed */
1975         mb_mgr = RTE_PER_LCORE(sync_mb_mgr);
1976         if (mb_mgr == NULL) {
1977
1978                 priv = dev->data->dev_private;
1979                 mb_mgr = alloc_init_mb_mgr(priv->vector_mode);
1980                 if (mb_mgr == NULL) {
1981                         aesni_mb_fill_error_code(vec, ENOMEM);
1982                         return 0;
1983                 }
1984                 RTE_PER_LCORE(sync_mb_mgr) = mb_mgr;
1985         }
1986
1987         for (i = 0, j = 0, k = 0; i != vec->num; i++) {
1988
1989
1990                 ret = check_crypto_sgl(sofs, vec->sgl + i);
1991                 if (ret != 0) {
1992                         vec->status[i] = ret;
1993                         continue;
1994                 }
1995
1996                 buf = vec->sgl[i].vec[0].base;
1997                 len = vec->sgl[i].vec[0].len;
1998
1999                 job = IMB_GET_NEXT_JOB(mb_mgr);
2000                 if (job == NULL) {
2001                         k += flush_mb_sync_mgr(mb_mgr);
2002                         job = IMB_GET_NEXT_JOB(mb_mgr);
2003                         RTE_ASSERT(job != NULL);
2004                 }
2005
2006                 /* Submit job for processing */
2007                 set_cpu_mb_job_params(job, s, sofs, buf, len,
2008                         vec->iv[i], vec->aad[i], tmp_dgst[i],
2009                         &vec->status[i]);
2010                 job = submit_sync_job(mb_mgr);
2011                 j++;
2012
2013                 /* handle completed jobs */
2014                 k += handle_completed_sync_jobs(job, mb_mgr);
2015         }
2016
2017         /* flush remaining jobs */
2018         while (k != j)
2019                 k += flush_mb_sync_mgr(mb_mgr);
2020
2021         /* finish processing for successful jobs: check/update digest */
2022         if (k != 0) {
2023                 if (s->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY)
2024                         k = verify_sync_dgst(vec,
2025                                 (const uint8_t (*)[DIGEST_LENGTH_MAX])tmp_dgst,
2026                                 s->auth.req_digest_len);
2027                 else
2028                         k = generate_sync_dgst(vec,
2029                                 (const uint8_t (*)[DIGEST_LENGTH_MAX])tmp_dgst,
2030                                 s->auth.req_digest_len);
2031         }
2032
2033         return k;
2034 }
2035
2036 static int cryptodev_aesni_mb_remove(struct rte_vdev_device *vdev);
2037
2038 static uint64_t
2039 vec_mode_to_flags(enum aesni_mb_vector_mode mode)
2040 {
2041         switch (mode) {
2042         case RTE_AESNI_MB_SSE:
2043                 return RTE_CRYPTODEV_FF_CPU_SSE;
2044         case RTE_AESNI_MB_AVX:
2045                 return RTE_CRYPTODEV_FF_CPU_AVX;
2046         case RTE_AESNI_MB_AVX2:
2047                 return RTE_CRYPTODEV_FF_CPU_AVX2;
2048         case RTE_AESNI_MB_AVX512:
2049                 return RTE_CRYPTODEV_FF_CPU_AVX512;
2050         default:
2051                 AESNI_MB_LOG(ERR, "Unsupported vector mode %u\n", mode);
2052                 return 0;
2053         }
2054 }
2055
2056 static int
2057 cryptodev_aesni_mb_create(const char *name,
2058                         struct rte_vdev_device *vdev,
2059                         struct rte_cryptodev_pmd_init_params *init_params)
2060 {
2061         struct rte_cryptodev *dev;
2062         struct aesni_mb_private *internals;
2063         enum aesni_mb_vector_mode vector_mode;
2064         MB_MGR *mb_mgr;
2065
2066         dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
2067         if (dev == NULL) {
2068                 AESNI_MB_LOG(ERR, "failed to create cryptodev vdev");
2069                 return -ENODEV;
2070         }
2071
2072         /* Check CPU for supported vector instruction set */
2073         if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
2074                 vector_mode = RTE_AESNI_MB_AVX512;
2075         else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
2076                 vector_mode = RTE_AESNI_MB_AVX2;
2077         else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
2078                 vector_mode = RTE_AESNI_MB_AVX;
2079         else
2080                 vector_mode = RTE_AESNI_MB_SSE;
2081
2082         dev->driver_id = cryptodev_driver_id;
2083         dev->dev_ops = rte_aesni_mb_pmd_ops;
2084
2085         /* register rx/tx burst functions for data path */
2086         dev->dequeue_burst = aesni_mb_pmd_dequeue_burst;
2087         dev->enqueue_burst = aesni_mb_pmd_enqueue_burst;
2088
2089         dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
2090                         RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
2091                         RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
2092                         RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO |
2093                         RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA |
2094                         RTE_CRYPTODEV_FF_SYM_SESSIONLESS;
2095
2096 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
2097         struct rte_security_ctx *security_instance;
2098         security_instance = rte_malloc("aesni_mb_sec",
2099                                 sizeof(struct rte_security_ctx),
2100                                 RTE_CACHE_LINE_SIZE);
2101         if (security_instance == NULL) {
2102                 AESNI_MB_LOG(ERR, "rte_security_ctx memory alloc failed");
2103                 rte_cryptodev_pmd_destroy(dev);
2104                 return -ENOMEM;
2105         }
2106
2107         security_instance->device = (void *)dev;
2108         security_instance->ops = rte_aesni_mb_pmd_sec_ops;
2109         security_instance->sess_cnt = 0;
2110         dev->security_ctx = security_instance;
2111         dev->feature_flags |= RTE_CRYPTODEV_FF_SECURITY;
2112 #endif
2113
2114         /* Check CPU for support for AES instruction set */
2115         if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES))
2116                 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AESNI;
2117         else
2118                 AESNI_MB_LOG(WARNING, "AES instructions not supported by CPU");
2119
2120         dev->feature_flags |= vec_mode_to_flags(vector_mode);
2121
2122         mb_mgr = alloc_init_mb_mgr(vector_mode);
2123         if (mb_mgr == NULL) {
2124 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
2125                 rte_free(dev->security_ctx);
2126                 dev->security_ctx = NULL;
2127 #endif
2128                 rte_cryptodev_pmd_destroy(dev);
2129                 return -ENOMEM;
2130         }
2131
2132         /* Set vector instructions mode supported */
2133         internals = dev->data->dev_private;
2134
2135         internals->vector_mode = vector_mode;
2136         internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
2137         internals->mb_mgr = mb_mgr;
2138
2139         AESNI_MB_LOG(INFO, "IPSec Multi-buffer library version used: %s\n",
2140                         imb_get_version_str());
2141         return 0;
2142 }
2143
2144 static int
2145 cryptodev_aesni_mb_probe(struct rte_vdev_device *vdev)
2146 {
2147         struct rte_cryptodev_pmd_init_params init_params = {
2148                 "",
2149                 sizeof(struct aesni_mb_private),
2150                 rte_socket_id(),
2151                 RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
2152         };
2153         const char *name, *args;
2154         int retval;
2155
2156         name = rte_vdev_device_name(vdev);
2157         if (name == NULL)
2158                 return -EINVAL;
2159
2160         args = rte_vdev_device_args(vdev);
2161
2162         retval = rte_cryptodev_pmd_parse_input_args(&init_params, args);
2163         if (retval) {
2164                 AESNI_MB_LOG(ERR, "Failed to parse initialisation arguments[%s]",
2165                                 args);
2166                 return -EINVAL;
2167         }
2168
2169         return cryptodev_aesni_mb_create(name, vdev, &init_params);
2170 }
2171
2172 static int
2173 cryptodev_aesni_mb_remove(struct rte_vdev_device *vdev)
2174 {
2175         struct rte_cryptodev *cryptodev;
2176         struct aesni_mb_private *internals;
2177         const char *name;
2178
2179         name = rte_vdev_device_name(vdev);
2180         if (name == NULL)
2181                 return -EINVAL;
2182
2183         cryptodev = rte_cryptodev_pmd_get_named_dev(name);
2184         if (cryptodev == NULL)
2185                 return -ENODEV;
2186
2187         internals = cryptodev->data->dev_private;
2188
2189         free_mb_mgr(internals->mb_mgr);
2190         if (RTE_PER_LCORE(sync_mb_mgr)) {
2191                 free_mb_mgr(RTE_PER_LCORE(sync_mb_mgr));
2192                 RTE_PER_LCORE(sync_mb_mgr) = NULL;
2193         }
2194
2195 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
2196         rte_free(cryptodev->security_ctx);
2197         cryptodev->security_ctx = NULL;
2198 #endif
2199
2200         return rte_cryptodev_pmd_destroy(cryptodev);
2201 }
2202
2203 static struct rte_vdev_driver cryptodev_aesni_mb_pmd_drv = {
2204         .probe = cryptodev_aesni_mb_probe,
2205         .remove = cryptodev_aesni_mb_remove
2206 };
2207
2208 static struct cryptodev_driver aesni_mb_crypto_drv;
2209
2210 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd_drv);
2211 RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd);
2212 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_MB_PMD,
2213         "max_nb_queue_pairs=<int> "
2214         "socket_id=<int>");
2215 RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_mb_crypto_drv,
2216                 cryptodev_aesni_mb_pmd_drv.driver,
2217                 cryptodev_driver_id);
2218 RTE_LOG_REGISTER(aesni_mb_logtype_driver, pmd.crypto.aesni_mb, NOTICE);