crypto/octeontx2: check cpt kernel driver version
[dpdk.git] / drivers / crypto / aesni_mb / rte_aesni_mb_pmd.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2017 Intel Corporation
3  */
4
5 #include <intel-ipsec-mb.h>
6
7 #include <rte_common.h>
8 #include <rte_hexdump.h>
9 #include <rte_cryptodev.h>
10 #include <rte_cryptodev_pmd.h>
11 #include <rte_bus_vdev.h>
12 #include <rte_malloc.h>
13 #include <rte_cpuflags.h>
14 #include <rte_per_lcore.h>
15 #include <rte_ether.h>
16
17 #include "aesni_mb_pmd_private.h"
18
19 #define AES_CCM_DIGEST_MIN_LEN 4
20 #define AES_CCM_DIGEST_MAX_LEN 16
21 #define HMAC_MAX_BLOCK_SIZE 128
22 static uint8_t cryptodev_driver_id;
23
24 /*
25  * Needed to support CPU-CRYPTO API (rte_cryptodev_sym_cpu_crypto_process),
26  * as we still use JOB based API even for synchronous processing.
27  */
28 static RTE_DEFINE_PER_LCORE(MB_MGR *, sync_mb_mgr);
29
30 typedef void (*hash_one_block_t)(const void *data, void *digest);
31 typedef void (*aes_keyexp_t)(const void *key, void *enc_exp_keys, void *dec_exp_keys);
32
33 /**
34  * Calculate the authentication pre-computes
35  *
36  * @param one_block_hash        Function pointer to calculate digest on ipad/opad
37  * @param ipad                  Inner pad output byte array
38  * @param opad                  Outer pad output byte array
39  * @param hkey                  Authentication key
40  * @param hkey_len              Authentication key length
41  * @param blocksize             Block size of selected hash algo
42  */
43 static void
44 calculate_auth_precomputes(hash_one_block_t one_block_hash,
45                 uint8_t *ipad, uint8_t *opad,
46                 const uint8_t *hkey, uint16_t hkey_len,
47                 uint16_t blocksize)
48 {
49         unsigned i, length;
50
51         uint8_t ipad_buf[blocksize] __rte_aligned(16);
52         uint8_t opad_buf[blocksize] __rte_aligned(16);
53
54         /* Setup inner and outer pads */
55         memset(ipad_buf, HMAC_IPAD_VALUE, blocksize);
56         memset(opad_buf, HMAC_OPAD_VALUE, blocksize);
57
58         /* XOR hash key with inner and outer pads */
59         length = hkey_len > blocksize ? blocksize : hkey_len;
60
61         for (i = 0; i < length; i++) {
62                 ipad_buf[i] ^= hkey[i];
63                 opad_buf[i] ^= hkey[i];
64         }
65
66         /* Compute partial hashes */
67         (*one_block_hash)(ipad_buf, ipad);
68         (*one_block_hash)(opad_buf, opad);
69
70         /* Clean up stack */
71         memset(ipad_buf, 0, blocksize);
72         memset(opad_buf, 0, blocksize);
73 }
74
75 /** Get xform chain order */
76 static enum aesni_mb_operation
77 aesni_mb_get_chain_order(const struct rte_crypto_sym_xform *xform)
78 {
79         if (xform == NULL)
80                 return AESNI_MB_OP_NOT_SUPPORTED;
81
82         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
83                 if (xform->next == NULL)
84                         return AESNI_MB_OP_CIPHER_ONLY;
85                 if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
86                         return AESNI_MB_OP_CIPHER_HASH;
87         }
88
89         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
90                 if (xform->next == NULL)
91                         return AESNI_MB_OP_HASH_ONLY;
92                 if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
93                         return AESNI_MB_OP_HASH_CIPHER;
94         }
95 #if IMB_VERSION_NUM > IMB_VERSION(0, 52, 0)
96         if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
97                 if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
98                         /*
99                          * CCM requires to hash first and cipher later
100                          * when encrypting
101                          */
102                         if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM)
103                                 return AESNI_MB_OP_AEAD_HASH_CIPHER;
104                         else
105                                 return AESNI_MB_OP_AEAD_CIPHER_HASH;
106                 } else {
107                         if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM)
108                                 return AESNI_MB_OP_AEAD_CIPHER_HASH;
109                         else
110                                 return AESNI_MB_OP_AEAD_HASH_CIPHER;
111                 }
112         }
113 #else
114         if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
115                 if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM ||
116                                 xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM) {
117                         if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
118                                 return AESNI_MB_OP_AEAD_CIPHER_HASH;
119                         else
120                                 return AESNI_MB_OP_AEAD_HASH_CIPHER;
121                 }
122         }
123 #endif
124
125         return AESNI_MB_OP_NOT_SUPPORTED;
126 }
127
128 /** Set session authentication parameters */
129 static int
130 aesni_mb_set_session_auth_parameters(const MB_MGR *mb_mgr,
131                 struct aesni_mb_session *sess,
132                 const struct rte_crypto_sym_xform *xform)
133 {
134         hash_one_block_t hash_oneblock_fn = NULL;
135         unsigned int key_larger_block_size = 0;
136         uint8_t hashed_key[HMAC_MAX_BLOCK_SIZE] = { 0 };
137         uint32_t auth_precompute = 1;
138
139         if (xform == NULL) {
140                 sess->auth.algo = NULL_HASH;
141                 return 0;
142         }
143
144         if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
145                 AESNI_MB_LOG(ERR, "Crypto xform struct not of type auth");
146                 return -1;
147         }
148
149         /* Set the request digest size */
150         sess->auth.req_digest_len = xform->auth.digest_length;
151
152         /* Select auth generate/verify */
153         sess->auth.operation = xform->auth.op;
154
155         /* Set Authentication Parameters */
156         if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_XCBC_MAC) {
157                 sess->auth.algo = AES_XCBC;
158
159                 uint16_t xcbc_mac_digest_len =
160                         get_truncated_digest_byte_length(AES_XCBC);
161                 if (sess->auth.req_digest_len != xcbc_mac_digest_len) {
162                         AESNI_MB_LOG(ERR, "Invalid digest size\n");
163                         return -EINVAL;
164                 }
165                 sess->auth.gen_digest_len = sess->auth.req_digest_len;
166
167                 IMB_AES_XCBC_KEYEXP(mb_mgr, xform->auth.key.data,
168                                 sess->auth.xcbc.k1_expanded,
169                                 sess->auth.xcbc.k2, sess->auth.xcbc.k3);
170                 return 0;
171         }
172
173         if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_CMAC) {
174                 uint32_t dust[4*15];
175
176                 sess->auth.algo = AES_CMAC;
177
178                 uint16_t cmac_digest_len = get_digest_byte_length(AES_CMAC);
179
180                 if (sess->auth.req_digest_len > cmac_digest_len) {
181                         AESNI_MB_LOG(ERR, "Invalid digest size\n");
182                         return -EINVAL;
183                 }
184                 /*
185                  * Multi-buffer lib supports digest sizes from 4 to 16 bytes
186                  * in version 0.50 and sizes of 12 and 16 bytes,
187                  * in version 0.49.
188                  * If size requested is different, generate the full digest
189                  * (16 bytes) in a temporary location and then memcpy
190                  * the requested number of bytes.
191                  */
192                 if (sess->auth.req_digest_len < 4)
193                         sess->auth.gen_digest_len = cmac_digest_len;
194                 else
195                         sess->auth.gen_digest_len = sess->auth.req_digest_len;
196
197                 IMB_AES_KEYEXP_128(mb_mgr, xform->auth.key.data,
198                                 sess->auth.cmac.expkey, dust);
199                 IMB_AES_CMAC_SUBKEY_GEN_128(mb_mgr, sess->auth.cmac.expkey,
200                                 sess->auth.cmac.skey1, sess->auth.cmac.skey2);
201                 return 0;
202         }
203
204         if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
205                 if (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) {
206                         sess->cipher.direction = ENCRYPT;
207                         sess->chain_order = CIPHER_HASH;
208                 } else
209                         sess->cipher.direction = DECRYPT;
210
211                 sess->auth.algo = AES_GMAC;
212                 /*
213                  * Multi-buffer lib supports 8, 12 and 16 bytes of digest.
214                  * If size requested is different, generate the full digest
215                  * (16 bytes) in a temporary location and then memcpy
216                  * the requested number of bytes.
217                  */
218                 if (sess->auth.req_digest_len != 16 &&
219                                 sess->auth.req_digest_len != 12 &&
220                                 sess->auth.req_digest_len != 8) {
221                         sess->auth.gen_digest_len = 16;
222                 } else {
223                         sess->auth.gen_digest_len = sess->auth.req_digest_len;
224                 }
225                 sess->iv.length = xform->auth.iv.length;
226                 sess->iv.offset = xform->auth.iv.offset;
227
228                 switch (xform->auth.key.length) {
229                 case AES_128_BYTES:
230                         IMB_AES128_GCM_PRE(mb_mgr, xform->auth.key.data,
231                                 &sess->cipher.gcm_key);
232                         sess->cipher.key_length_in_bytes = AES_128_BYTES;
233                         break;
234                 case AES_192_BYTES:
235                         IMB_AES192_GCM_PRE(mb_mgr, xform->auth.key.data,
236                                 &sess->cipher.gcm_key);
237                         sess->cipher.key_length_in_bytes = AES_192_BYTES;
238                         break;
239                 case AES_256_BYTES:
240                         IMB_AES256_GCM_PRE(mb_mgr, xform->auth.key.data,
241                                 &sess->cipher.gcm_key);
242                         sess->cipher.key_length_in_bytes = AES_256_BYTES;
243                         break;
244                 default:
245                         RTE_LOG(ERR, PMD, "failed to parse test type\n");
246                         return -EINVAL;
247                 }
248
249                 return 0;
250         }
251
252         switch (xform->auth.algo) {
253         case RTE_CRYPTO_AUTH_MD5_HMAC:
254                 sess->auth.algo = MD5;
255                 hash_oneblock_fn = mb_mgr->md5_one_block;
256                 break;
257         case RTE_CRYPTO_AUTH_SHA1_HMAC:
258                 sess->auth.algo = SHA1;
259                 hash_oneblock_fn = mb_mgr->sha1_one_block;
260                 if (xform->auth.key.length > get_auth_algo_blocksize(SHA1)) {
261                         IMB_SHA1(mb_mgr,
262                                 xform->auth.key.data,
263                                 xform->auth.key.length,
264                                 hashed_key);
265                         key_larger_block_size = 1;
266                 }
267                 break;
268         case RTE_CRYPTO_AUTH_SHA1:
269                 sess->auth.algo = PLAIN_SHA1;
270                 auth_precompute = 0;
271                 break;
272         case RTE_CRYPTO_AUTH_SHA224_HMAC:
273                 sess->auth.algo = SHA_224;
274                 hash_oneblock_fn = mb_mgr->sha224_one_block;
275                 if (xform->auth.key.length > get_auth_algo_blocksize(SHA_224)) {
276                         IMB_SHA224(mb_mgr,
277                                 xform->auth.key.data,
278                                 xform->auth.key.length,
279                                 hashed_key);
280                         key_larger_block_size = 1;
281                 }
282                 break;
283         case RTE_CRYPTO_AUTH_SHA224:
284                 sess->auth.algo = PLAIN_SHA_224;
285                 auth_precompute = 0;
286                 break;
287         case RTE_CRYPTO_AUTH_SHA256_HMAC:
288                 sess->auth.algo = SHA_256;
289                 hash_oneblock_fn = mb_mgr->sha256_one_block;
290                 if (xform->auth.key.length > get_auth_algo_blocksize(SHA_256)) {
291                         IMB_SHA256(mb_mgr,
292                                 xform->auth.key.data,
293                                 xform->auth.key.length,
294                                 hashed_key);
295                         key_larger_block_size = 1;
296                 }
297                 break;
298         case RTE_CRYPTO_AUTH_SHA256:
299                 sess->auth.algo = PLAIN_SHA_256;
300                 auth_precompute = 0;
301                 break;
302         case RTE_CRYPTO_AUTH_SHA384_HMAC:
303                 sess->auth.algo = SHA_384;
304                 hash_oneblock_fn = mb_mgr->sha384_one_block;
305                 if (xform->auth.key.length > get_auth_algo_blocksize(SHA_384)) {
306                         IMB_SHA384(mb_mgr,
307                                 xform->auth.key.data,
308                                 xform->auth.key.length,
309                                 hashed_key);
310                         key_larger_block_size = 1;
311                 }
312                 break;
313         case RTE_CRYPTO_AUTH_SHA384:
314                 sess->auth.algo = PLAIN_SHA_384;
315                 auth_precompute = 0;
316                 break;
317         case RTE_CRYPTO_AUTH_SHA512_HMAC:
318                 sess->auth.algo = SHA_512;
319                 hash_oneblock_fn = mb_mgr->sha512_one_block;
320                 if (xform->auth.key.length > get_auth_algo_blocksize(SHA_512)) {
321                         IMB_SHA512(mb_mgr,
322                                 xform->auth.key.data,
323                                 xform->auth.key.length,
324                                 hashed_key);
325                         key_larger_block_size = 1;
326                 }
327                 break;
328         case RTE_CRYPTO_AUTH_SHA512:
329                 sess->auth.algo = PLAIN_SHA_512;
330                 auth_precompute = 0;
331                 break;
332         default:
333                 AESNI_MB_LOG(ERR, "Unsupported authentication algorithm selection");
334                 return -ENOTSUP;
335         }
336         uint16_t trunc_digest_size =
337                         get_truncated_digest_byte_length(sess->auth.algo);
338         uint16_t full_digest_size =
339                         get_digest_byte_length(sess->auth.algo);
340
341         if (sess->auth.req_digest_len > full_digest_size ||
342                         sess->auth.req_digest_len == 0) {
343                 AESNI_MB_LOG(ERR, "Invalid digest size\n");
344                 return -EINVAL;
345         }
346
347         if (sess->auth.req_digest_len != trunc_digest_size &&
348                         sess->auth.req_digest_len != full_digest_size)
349                 sess->auth.gen_digest_len = full_digest_size;
350         else
351                 sess->auth.gen_digest_len = sess->auth.req_digest_len;
352
353         /* Plain SHA does not require precompute key */
354         if (auth_precompute == 0)
355                 return 0;
356
357         /* Calculate Authentication precomputes */
358         if (key_larger_block_size) {
359                 calculate_auth_precomputes(hash_oneblock_fn,
360                         sess->auth.pads.inner, sess->auth.pads.outer,
361                         hashed_key,
362                         xform->auth.key.length,
363                         get_auth_algo_blocksize(sess->auth.algo));
364         } else {
365                 calculate_auth_precomputes(hash_oneblock_fn,
366                         sess->auth.pads.inner, sess->auth.pads.outer,
367                         xform->auth.key.data,
368                         xform->auth.key.length,
369                         get_auth_algo_blocksize(sess->auth.algo));
370         }
371
372         return 0;
373 }
374
375 /** Set session cipher parameters */
376 static int
377 aesni_mb_set_session_cipher_parameters(const MB_MGR *mb_mgr,
378                 struct aesni_mb_session *sess,
379                 const struct rte_crypto_sym_xform *xform)
380 {
381         uint8_t is_aes = 0;
382         uint8_t is_3DES = 0;
383         uint8_t is_docsis = 0;
384
385         if (xform == NULL) {
386                 sess->cipher.mode = NULL_CIPHER;
387                 return 0;
388         }
389
390         if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
391                 AESNI_MB_LOG(ERR, "Crypto xform struct not of type cipher");
392                 return -EINVAL;
393         }
394
395         /* Select cipher direction */
396         switch (xform->cipher.op) {
397         case RTE_CRYPTO_CIPHER_OP_ENCRYPT:
398                 sess->cipher.direction = ENCRYPT;
399                 break;
400         case RTE_CRYPTO_CIPHER_OP_DECRYPT:
401                 sess->cipher.direction = DECRYPT;
402                 break;
403         default:
404                 AESNI_MB_LOG(ERR, "Invalid cipher operation parameter");
405                 return -EINVAL;
406         }
407
408         /* Select cipher mode */
409         switch (xform->cipher.algo) {
410         case RTE_CRYPTO_CIPHER_AES_CBC:
411                 sess->cipher.mode = CBC;
412                 is_aes = 1;
413                 break;
414         case RTE_CRYPTO_CIPHER_AES_CTR:
415                 sess->cipher.mode = CNTR;
416                 is_aes = 1;
417                 break;
418         case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
419                 sess->cipher.mode = DOCSIS_SEC_BPI;
420                 is_docsis = 1;
421                 break;
422         case RTE_CRYPTO_CIPHER_DES_CBC:
423                 sess->cipher.mode = DES;
424                 break;
425         case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
426                 sess->cipher.mode = DOCSIS_DES;
427                 break;
428         case RTE_CRYPTO_CIPHER_3DES_CBC:
429                 sess->cipher.mode = DES3;
430                 is_3DES = 1;
431                 break;
432 #if IMB_VERSION(0, 53, 0) <= IMB_VERSION_NUM
433         case RTE_CRYPTO_CIPHER_AES_ECB:
434                 sess->cipher.mode = ECB;
435                 is_aes = 1;
436                 break;
437 #endif
438         default:
439                 AESNI_MB_LOG(ERR, "Unsupported cipher mode parameter");
440                 return -ENOTSUP;
441         }
442
443         /* Set IV parameters */
444         sess->iv.offset = xform->cipher.iv.offset;
445         sess->iv.length = xform->cipher.iv.length;
446
447         /* Check key length and choose key expansion function for AES */
448         if (is_aes) {
449                 switch (xform->cipher.key.length) {
450                 case AES_128_BYTES:
451                         sess->cipher.key_length_in_bytes = AES_128_BYTES;
452                         IMB_AES_KEYEXP_128(mb_mgr, xform->cipher.key.data,
453                                         sess->cipher.expanded_aes_keys.encode,
454                                         sess->cipher.expanded_aes_keys.decode);
455                         break;
456                 case AES_192_BYTES:
457                         sess->cipher.key_length_in_bytes = AES_192_BYTES;
458                         IMB_AES_KEYEXP_192(mb_mgr, xform->cipher.key.data,
459                                         sess->cipher.expanded_aes_keys.encode,
460                                         sess->cipher.expanded_aes_keys.decode);
461                         break;
462                 case AES_256_BYTES:
463                         sess->cipher.key_length_in_bytes = AES_256_BYTES;
464                         IMB_AES_KEYEXP_256(mb_mgr, xform->cipher.key.data,
465                                         sess->cipher.expanded_aes_keys.encode,
466                                         sess->cipher.expanded_aes_keys.decode);
467                         break;
468                 default:
469                         AESNI_MB_LOG(ERR, "Invalid cipher key length");
470                         return -EINVAL;
471                 }
472         } else if (is_docsis) {
473                 switch (xform->cipher.key.length) {
474                 case AES_128_BYTES:
475                         sess->cipher.key_length_in_bytes = AES_128_BYTES;
476                         IMB_AES_KEYEXP_128(mb_mgr, xform->cipher.key.data,
477                                         sess->cipher.expanded_aes_keys.encode,
478                                         sess->cipher.expanded_aes_keys.decode);
479                         break;
480 #if IMB_VERSION_NUM >= IMB_VERSION(0, 53, 3)
481                 case AES_256_BYTES:
482                         sess->cipher.key_length_in_bytes = AES_256_BYTES;
483                         IMB_AES_KEYEXP_256(mb_mgr, xform->cipher.key.data,
484                                         sess->cipher.expanded_aes_keys.encode,
485                                         sess->cipher.expanded_aes_keys.decode);
486                         break;
487 #endif
488                 default:
489                         AESNI_MB_LOG(ERR, "Invalid cipher key length");
490                         return -EINVAL;
491                 }
492         } else if (is_3DES) {
493                 uint64_t *keys[3] = {sess->cipher.exp_3des_keys.key[0],
494                                 sess->cipher.exp_3des_keys.key[1],
495                                 sess->cipher.exp_3des_keys.key[2]};
496
497                 switch (xform->cipher.key.length) {
498                 case  24:
499                         IMB_DES_KEYSCHED(mb_mgr, keys[0],
500                                         xform->cipher.key.data);
501                         IMB_DES_KEYSCHED(mb_mgr, keys[1],
502                                         xform->cipher.key.data + 8);
503                         IMB_DES_KEYSCHED(mb_mgr, keys[2],
504                                         xform->cipher.key.data + 16);
505
506                         /* Initialize keys - 24 bytes: [K1-K2-K3] */
507                         sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
508                         sess->cipher.exp_3des_keys.ks_ptr[1] = keys[1];
509                         sess->cipher.exp_3des_keys.ks_ptr[2] = keys[2];
510                         break;
511                 case 16:
512                         IMB_DES_KEYSCHED(mb_mgr, keys[0],
513                                         xform->cipher.key.data);
514                         IMB_DES_KEYSCHED(mb_mgr, keys[1],
515                                         xform->cipher.key.data + 8);
516                         /* Initialize keys - 16 bytes: [K1=K1,K2=K2,K3=K1] */
517                         sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
518                         sess->cipher.exp_3des_keys.ks_ptr[1] = keys[1];
519                         sess->cipher.exp_3des_keys.ks_ptr[2] = keys[0];
520                         break;
521                 case 8:
522                         IMB_DES_KEYSCHED(mb_mgr, keys[0],
523                                         xform->cipher.key.data);
524
525                         /* Initialize keys - 8 bytes: [K1 = K2 = K3] */
526                         sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
527                         sess->cipher.exp_3des_keys.ks_ptr[1] = keys[0];
528                         sess->cipher.exp_3des_keys.ks_ptr[2] = keys[0];
529                         break;
530                 default:
531                         AESNI_MB_LOG(ERR, "Invalid cipher key length");
532                         return -EINVAL;
533                 }
534
535                 sess->cipher.key_length_in_bytes = 24;
536         } else {
537                 if (xform->cipher.key.length != 8) {
538                         AESNI_MB_LOG(ERR, "Invalid cipher key length");
539                         return -EINVAL;
540                 }
541                 sess->cipher.key_length_in_bytes = 8;
542
543                 IMB_DES_KEYSCHED(mb_mgr,
544                         (uint64_t *)sess->cipher.expanded_aes_keys.encode,
545                                 xform->cipher.key.data);
546                 IMB_DES_KEYSCHED(mb_mgr,
547                         (uint64_t *)sess->cipher.expanded_aes_keys.decode,
548                                 xform->cipher.key.data);
549         }
550
551         return 0;
552 }
553
554 static int
555 aesni_mb_set_session_aead_parameters(const MB_MGR *mb_mgr,
556                 struct aesni_mb_session *sess,
557                 const struct rte_crypto_sym_xform *xform)
558 {
559         switch (xform->aead.op) {
560         case RTE_CRYPTO_AEAD_OP_ENCRYPT:
561                 sess->cipher.direction = ENCRYPT;
562                 sess->auth.operation = RTE_CRYPTO_AUTH_OP_GENERATE;
563                 break;
564         case RTE_CRYPTO_AEAD_OP_DECRYPT:
565                 sess->cipher.direction = DECRYPT;
566                 sess->auth.operation = RTE_CRYPTO_AUTH_OP_VERIFY;
567                 break;
568         default:
569                 AESNI_MB_LOG(ERR, "Invalid aead operation parameter");
570                 return -EINVAL;
571         }
572
573         switch (xform->aead.algo) {
574         case RTE_CRYPTO_AEAD_AES_CCM:
575                 sess->cipher.mode = CCM;
576                 sess->auth.algo = AES_CCM;
577
578                 /* Check key length and choose key expansion function for AES */
579                 switch (xform->aead.key.length) {
580                 case AES_128_BYTES:
581                         sess->cipher.key_length_in_bytes = AES_128_BYTES;
582                         IMB_AES_KEYEXP_128(mb_mgr, xform->aead.key.data,
583                                         sess->cipher.expanded_aes_keys.encode,
584                                         sess->cipher.expanded_aes_keys.decode);
585                         break;
586                 default:
587                         AESNI_MB_LOG(ERR, "Invalid cipher key length");
588                         return -EINVAL;
589                 }
590
591                 break;
592
593         case RTE_CRYPTO_AEAD_AES_GCM:
594                 sess->cipher.mode = GCM;
595                 sess->auth.algo = AES_GMAC;
596
597                 switch (xform->aead.key.length) {
598                 case AES_128_BYTES:
599                         sess->cipher.key_length_in_bytes = AES_128_BYTES;
600                         IMB_AES128_GCM_PRE(mb_mgr, xform->aead.key.data,
601                                 &sess->cipher.gcm_key);
602                         break;
603                 case AES_192_BYTES:
604                         sess->cipher.key_length_in_bytes = AES_192_BYTES;
605                         IMB_AES192_GCM_PRE(mb_mgr, xform->aead.key.data,
606                                 &sess->cipher.gcm_key);
607                         break;
608                 case AES_256_BYTES:
609                         sess->cipher.key_length_in_bytes = AES_256_BYTES;
610                         IMB_AES256_GCM_PRE(mb_mgr, xform->aead.key.data,
611                                 &sess->cipher.gcm_key);
612                         break;
613                 default:
614                         AESNI_MB_LOG(ERR, "Invalid cipher key length");
615                         return -EINVAL;
616                 }
617
618                 break;
619
620         default:
621                 AESNI_MB_LOG(ERR, "Unsupported aead mode parameter");
622                 return -ENOTSUP;
623         }
624
625         /* Set IV parameters */
626         sess->iv.offset = xform->aead.iv.offset;
627         sess->iv.length = xform->aead.iv.length;
628
629         sess->auth.req_digest_len = xform->aead.digest_length;
630         /* CCM digests must be between 4 and 16 and an even number */
631         if (sess->auth.req_digest_len < AES_CCM_DIGEST_MIN_LEN ||
632                         sess->auth.req_digest_len > AES_CCM_DIGEST_MAX_LEN ||
633                         (sess->auth.req_digest_len & 1) == 1) {
634                 AESNI_MB_LOG(ERR, "Invalid digest size\n");
635                 return -EINVAL;
636         }
637         sess->auth.gen_digest_len = sess->auth.req_digest_len;
638
639         return 0;
640 }
641
642 /** Parse crypto xform chain and set private session parameters */
643 int
644 aesni_mb_set_session_parameters(const MB_MGR *mb_mgr,
645                 struct aesni_mb_session *sess,
646                 const struct rte_crypto_sym_xform *xform)
647 {
648         const struct rte_crypto_sym_xform *auth_xform = NULL;
649         const struct rte_crypto_sym_xform *cipher_xform = NULL;
650         const struct rte_crypto_sym_xform *aead_xform = NULL;
651         int ret;
652
653         /* Select Crypto operation - hash then cipher / cipher then hash */
654         switch (aesni_mb_get_chain_order(xform)) {
655         case AESNI_MB_OP_HASH_CIPHER:
656                 sess->chain_order = HASH_CIPHER;
657                 auth_xform = xform;
658                 cipher_xform = xform->next;
659                 break;
660         case AESNI_MB_OP_CIPHER_HASH:
661                 sess->chain_order = CIPHER_HASH;
662                 auth_xform = xform->next;
663                 cipher_xform = xform;
664                 break;
665         case AESNI_MB_OP_HASH_ONLY:
666                 sess->chain_order = HASH_CIPHER;
667                 auth_xform = xform;
668                 cipher_xform = NULL;
669                 break;
670         case AESNI_MB_OP_CIPHER_ONLY:
671                 /*
672                  * Multi buffer library operates only at two modes,
673                  * CIPHER_HASH and HASH_CIPHER. When doing ciphering only,
674                  * chain order depends on cipher operation: encryption is always
675                  * the first operation and decryption the last one.
676                  */
677                 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
678                         sess->chain_order = CIPHER_HASH;
679                 else
680                         sess->chain_order = HASH_CIPHER;
681                 auth_xform = NULL;
682                 cipher_xform = xform;
683                 break;
684         case AESNI_MB_OP_AEAD_CIPHER_HASH:
685                 sess->chain_order = CIPHER_HASH;
686                 sess->aead.aad_len = xform->aead.aad_length;
687                 aead_xform = xform;
688                 break;
689         case AESNI_MB_OP_AEAD_HASH_CIPHER:
690                 sess->chain_order = HASH_CIPHER;
691                 sess->aead.aad_len = xform->aead.aad_length;
692                 aead_xform = xform;
693                 break;
694         case AESNI_MB_OP_NOT_SUPPORTED:
695         default:
696                 AESNI_MB_LOG(ERR, "Unsupported operation chain order parameter");
697                 return -ENOTSUP;
698         }
699
700         /* Default IV length = 0 */
701         sess->iv.length = 0;
702
703         ret = aesni_mb_set_session_auth_parameters(mb_mgr, sess, auth_xform);
704         if (ret != 0) {
705                 AESNI_MB_LOG(ERR, "Invalid/unsupported authentication parameters");
706                 return ret;
707         }
708
709         ret = aesni_mb_set_session_cipher_parameters(mb_mgr, sess,
710                         cipher_xform);
711         if (ret != 0) {
712                 AESNI_MB_LOG(ERR, "Invalid/unsupported cipher parameters");
713                 return ret;
714         }
715
716         if (aead_xform) {
717                 ret = aesni_mb_set_session_aead_parameters(mb_mgr, sess,
718                                 aead_xform);
719                 if (ret != 0) {
720                         AESNI_MB_LOG(ERR, "Invalid/unsupported aead parameters");
721                         return ret;
722                 }
723         }
724
725         return 0;
726 }
727
728 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
729 /** Check DOCSIS security session configuration is valid */
730 static int
731 check_docsis_sec_session(struct rte_security_session_conf *conf)
732 {
733         struct rte_crypto_sym_xform *crypto_sym = conf->crypto_xform;
734         struct rte_security_docsis_xform *docsis = &conf->docsis;
735
736         /* Downlink: CRC generate -> Cipher encrypt */
737         if (docsis->direction == RTE_SECURITY_DOCSIS_DOWNLINK) {
738
739                 if (crypto_sym != NULL &&
740                     crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
741                     crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
742                     crypto_sym->cipher.algo ==
743                                         RTE_CRYPTO_CIPHER_AES_DOCSISBPI &&
744                     (crypto_sym->cipher.key.length == IMB_KEY_AES_128_BYTES ||
745                      crypto_sym->cipher.key.length == IMB_KEY_AES_256_BYTES) &&
746                     crypto_sym->cipher.iv.length == AES_BLOCK_SIZE &&
747                     crypto_sym->next == NULL) {
748                         return 0;
749                 }
750         /* Uplink: Cipher decrypt -> CRC verify */
751         } else if (docsis->direction == RTE_SECURITY_DOCSIS_UPLINK) {
752
753                 if (crypto_sym != NULL &&
754                     crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
755                     crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
756                     crypto_sym->cipher.algo ==
757                                         RTE_CRYPTO_CIPHER_AES_DOCSISBPI &&
758                     (crypto_sym->cipher.key.length == IMB_KEY_AES_128_BYTES ||
759                      crypto_sym->cipher.key.length == IMB_KEY_AES_256_BYTES) &&
760                     crypto_sym->cipher.iv.length == AES_BLOCK_SIZE &&
761                     crypto_sym->next == NULL) {
762                         return 0;
763                 }
764         }
765
766         return -EINVAL;
767 }
768
769 /** Set DOCSIS security session auth (CRC) parameters */
770 static int
771 aesni_mb_set_docsis_sec_session_auth_parameters(struct aesni_mb_session *sess,
772                 struct rte_security_docsis_xform *xform)
773 {
774         if (xform == NULL) {
775                 AESNI_MB_LOG(ERR, "Invalid DOCSIS xform");
776                 return -EINVAL;
777         }
778
779         /* Select CRC generate/verify */
780         if (xform->direction == RTE_SECURITY_DOCSIS_UPLINK) {
781                 sess->auth.algo = IMB_AUTH_DOCSIS_CRC32;
782                 sess->auth.operation = RTE_CRYPTO_AUTH_OP_VERIFY;
783         } else if (xform->direction == RTE_SECURITY_DOCSIS_DOWNLINK) {
784                 sess->auth.algo = IMB_AUTH_DOCSIS_CRC32;
785                 sess->auth.operation = RTE_CRYPTO_AUTH_OP_GENERATE;
786         } else {
787                 AESNI_MB_LOG(ERR, "Unsupported DOCSIS direction");
788                 return -ENOTSUP;
789         }
790
791         sess->auth.req_digest_len = RTE_ETHER_CRC_LEN;
792         sess->auth.gen_digest_len = RTE_ETHER_CRC_LEN;
793
794         return 0;
795 }
796
797 /**
798  * Parse DOCSIS security session configuration and set private session
799  * parameters
800  */
801 int
802 aesni_mb_set_docsis_sec_session_parameters(
803                 __rte_unused struct rte_cryptodev *dev,
804                 struct rte_security_session_conf *conf,
805                 void *sess)
806 {
807         struct rte_security_docsis_xform *docsis_xform;
808         struct rte_crypto_sym_xform *cipher_xform;
809         struct aesni_mb_session *aesni_sess = sess;
810         struct aesni_mb_private *internals = dev->data->dev_private;
811         int ret;
812
813         ret = check_docsis_sec_session(conf);
814         if (ret) {
815                 AESNI_MB_LOG(ERR, "Unsupported DOCSIS security configuration");
816                 return ret;
817         }
818
819         switch (conf->docsis.direction) {
820         case RTE_SECURITY_DOCSIS_UPLINK:
821                 aesni_sess->chain_order = IMB_ORDER_CIPHER_HASH;
822                 docsis_xform = &conf->docsis;
823                 cipher_xform = conf->crypto_xform;
824                 break;
825         case RTE_SECURITY_DOCSIS_DOWNLINK:
826                 aesni_sess->chain_order = IMB_ORDER_HASH_CIPHER;
827                 cipher_xform = conf->crypto_xform;
828                 docsis_xform = &conf->docsis;
829                 break;
830         default:
831                 return -EINVAL;
832         }
833
834         /* Default IV length = 0 */
835         aesni_sess->iv.length = 0;
836
837         ret = aesni_mb_set_docsis_sec_session_auth_parameters(aesni_sess,
838                         docsis_xform);
839         if (ret != 0) {
840                 AESNI_MB_LOG(ERR, "Invalid/unsupported DOCSIS parameters");
841                 return -EINVAL;
842         }
843
844         ret = aesni_mb_set_session_cipher_parameters(internals->mb_mgr,
845                         aesni_sess, cipher_xform);
846
847         if (ret != 0) {
848                 AESNI_MB_LOG(ERR, "Invalid/unsupported cipher parameters");
849                 return -EINVAL;
850         }
851
852         return 0;
853 }
854 #endif
855
856 /**
857  * burst enqueue, place crypto operations on ingress queue for processing.
858  *
859  * @param __qp         Queue Pair to process
860  * @param ops          Crypto operations for processing
861  * @param nb_ops       Number of crypto operations for processing
862  *
863  * @return
864  * - Number of crypto operations enqueued
865  */
866 static uint16_t
867 aesni_mb_pmd_enqueue_burst(void *__qp, struct rte_crypto_op **ops,
868                 uint16_t nb_ops)
869 {
870         struct aesni_mb_qp *qp = __qp;
871
872         unsigned int nb_enqueued;
873
874         nb_enqueued = rte_ring_enqueue_burst(qp->ingress_queue,
875                         (void **)ops, nb_ops, NULL);
876
877         qp->stats.enqueued_count += nb_enqueued;
878
879         return nb_enqueued;
880 }
881
882 /** Get multi buffer session */
883 static inline struct aesni_mb_session *
884 get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op)
885 {
886         struct aesni_mb_session *sess = NULL;
887
888         if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
889                 if (likely(op->sym->session != NULL))
890                         sess = (struct aesni_mb_session *)
891                                         get_sym_session_private_data(
892                                         op->sym->session,
893                                         cryptodev_driver_id);
894 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
895         } else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
896                 if (likely(op->sym->sec_session != NULL))
897                         sess = (struct aesni_mb_session *)
898                                         get_sec_session_private_data(
899                                                 op->sym->sec_session);
900 #endif
901         } else {
902                 void *_sess = rte_cryptodev_sym_session_create(qp->sess_mp);
903                 void *_sess_private_data = NULL;
904
905                 if (_sess == NULL)
906                         return NULL;
907
908                 if (rte_mempool_get(qp->sess_mp_priv,
909                                 (void **)&_sess_private_data))
910                         return NULL;
911
912                 sess = (struct aesni_mb_session *)_sess_private_data;
913
914                 if (unlikely(aesni_mb_set_session_parameters(qp->mb_mgr,
915                                 sess, op->sym->xform) != 0)) {
916                         rte_mempool_put(qp->sess_mp, _sess);
917                         rte_mempool_put(qp->sess_mp_priv, _sess_private_data);
918                         sess = NULL;
919                 }
920                 op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
921                 set_sym_session_private_data(op->sym->session,
922                                 cryptodev_driver_id, _sess_private_data);
923         }
924
925         if (unlikely(sess == NULL))
926                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
927
928         return sess;
929 }
930
931 static inline uint64_t
932 auth_start_offset(struct rte_crypto_op *op, struct aesni_mb_session *session,
933                 uint32_t oop)
934 {
935         struct rte_mbuf *m_src, *m_dst;
936         uint8_t *p_src, *p_dst;
937         uintptr_t u_src, u_dst;
938         uint32_t cipher_end, auth_end;
939
940         /* Only cipher then hash needs special calculation. */
941         if (!oop || session->chain_order != CIPHER_HASH)
942                 return op->sym->auth.data.offset;
943
944         m_src = op->sym->m_src;
945         m_dst = op->sym->m_dst;
946
947         p_src = rte_pktmbuf_mtod(m_src, uint8_t *);
948         p_dst = rte_pktmbuf_mtod(m_dst, uint8_t *);
949         u_src = (uintptr_t)p_src;
950         u_dst = (uintptr_t)p_dst + op->sym->auth.data.offset;
951
952         /**
953          * Copy the content between cipher offset and auth offset for generating
954          * correct digest.
955          */
956         if (op->sym->cipher.data.offset > op->sym->auth.data.offset)
957                 memcpy(p_dst + op->sym->auth.data.offset,
958                                 p_src + op->sym->auth.data.offset,
959                                 op->sym->cipher.data.offset -
960                                 op->sym->auth.data.offset);
961
962         /**
963          * Copy the content between (cipher offset + length) and (auth offset +
964          * length) for generating correct digest
965          */
966         cipher_end = op->sym->cipher.data.offset + op->sym->cipher.data.length;
967         auth_end = op->sym->auth.data.offset + op->sym->auth.data.length;
968         if (cipher_end < auth_end)
969                 memcpy(p_dst + cipher_end, p_src + cipher_end,
970                                 auth_end - cipher_end);
971
972         /**
973          * Since intel-ipsec-mb only supports positive values,
974          * we need to deduct the correct offset between src and dst.
975          */
976
977         return u_src < u_dst ? (u_dst - u_src) :
978                         (UINT64_MAX - u_src + u_dst + 1);
979 }
980
981 static inline void
982 set_cpu_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_session *session,
983                 union rte_crypto_sym_ofs sofs, void *buf, uint32_t len,
984                 void *iv, void *aad, void *digest, void *udata)
985 {
986         /* Set crypto operation */
987         job->chain_order = session->chain_order;
988
989         /* Set cipher parameters */
990         job->cipher_direction = session->cipher.direction;
991         job->cipher_mode = session->cipher.mode;
992
993         job->aes_key_len_in_bytes = session->cipher.key_length_in_bytes;
994
995         /* Set authentication parameters */
996         job->hash_alg = session->auth.algo;
997         job->iv = iv;
998
999         switch (job->hash_alg) {
1000         case AES_XCBC:
1001                 job->u.XCBC._k1_expanded = session->auth.xcbc.k1_expanded;
1002                 job->u.XCBC._k2 = session->auth.xcbc.k2;
1003                 job->u.XCBC._k3 = session->auth.xcbc.k3;
1004
1005                 job->aes_enc_key_expanded =
1006                                 session->cipher.expanded_aes_keys.encode;
1007                 job->aes_dec_key_expanded =
1008                                 session->cipher.expanded_aes_keys.decode;
1009                 break;
1010
1011         case AES_CCM:
1012                 job->u.CCM.aad = (uint8_t *)aad + 18;
1013                 job->u.CCM.aad_len_in_bytes = session->aead.aad_len;
1014                 job->aes_enc_key_expanded =
1015                                 session->cipher.expanded_aes_keys.encode;
1016                 job->aes_dec_key_expanded =
1017                                 session->cipher.expanded_aes_keys.decode;
1018                 job->iv++;
1019                 break;
1020
1021         case AES_CMAC:
1022                 job->u.CMAC._key_expanded = session->auth.cmac.expkey;
1023                 job->u.CMAC._skey1 = session->auth.cmac.skey1;
1024                 job->u.CMAC._skey2 = session->auth.cmac.skey2;
1025                 job->aes_enc_key_expanded =
1026                                 session->cipher.expanded_aes_keys.encode;
1027                 job->aes_dec_key_expanded =
1028                                 session->cipher.expanded_aes_keys.decode;
1029                 break;
1030
1031         case AES_GMAC:
1032                 if (session->cipher.mode == GCM) {
1033                         job->u.GCM.aad = aad;
1034                         job->u.GCM.aad_len_in_bytes = session->aead.aad_len;
1035                 } else {
1036                         /* For GMAC */
1037                         job->u.GCM.aad = buf;
1038                         job->u.GCM.aad_len_in_bytes = len;
1039                         job->cipher_mode = GCM;
1040                 }
1041                 job->aes_enc_key_expanded = &session->cipher.gcm_key;
1042                 job->aes_dec_key_expanded = &session->cipher.gcm_key;
1043                 break;
1044
1045         default:
1046                 job->u.HMAC._hashed_auth_key_xor_ipad =
1047                                 session->auth.pads.inner;
1048                 job->u.HMAC._hashed_auth_key_xor_opad =
1049                                 session->auth.pads.outer;
1050
1051                 if (job->cipher_mode == DES3) {
1052                         job->aes_enc_key_expanded =
1053                                 session->cipher.exp_3des_keys.ks_ptr;
1054                         job->aes_dec_key_expanded =
1055                                 session->cipher.exp_3des_keys.ks_ptr;
1056                 } else {
1057                         job->aes_enc_key_expanded =
1058                                 session->cipher.expanded_aes_keys.encode;
1059                         job->aes_dec_key_expanded =
1060                                 session->cipher.expanded_aes_keys.decode;
1061                 }
1062         }
1063
1064         /*
1065          * Multi-buffer library current only support returning a truncated
1066          * digest length as specified in the relevant IPsec RFCs
1067          */
1068
1069         /* Set digest location and length */
1070         job->auth_tag_output = digest;
1071         job->auth_tag_output_len_in_bytes = session->auth.gen_digest_len;
1072
1073         /* Set IV parameters */
1074         job->iv_len_in_bytes = session->iv.length;
1075
1076         /* Data Parameters */
1077         job->src = buf;
1078         job->dst = (uint8_t *)buf + sofs.ofs.cipher.head;
1079         job->cipher_start_src_offset_in_bytes = sofs.ofs.cipher.head;
1080         job->hash_start_src_offset_in_bytes = sofs.ofs.auth.head;
1081         if (job->hash_alg == AES_GMAC && session->cipher.mode != GCM) {
1082                 job->msg_len_to_hash_in_bytes = 0;
1083                 job->msg_len_to_cipher_in_bytes = 0;
1084         } else {
1085                 job->msg_len_to_hash_in_bytes = len - sofs.ofs.auth.head -
1086                         sofs.ofs.auth.tail;
1087                 job->msg_len_to_cipher_in_bytes = len - sofs.ofs.cipher.head -
1088                         sofs.ofs.cipher.tail;
1089         }
1090
1091         job->user_data = udata;
1092 }
1093
1094 /**
1095  * Process a crypto operation and complete a JOB_AES_HMAC job structure for
1096  * submission to the multi buffer library for processing.
1097  *
1098  * @param       qp      queue pair
1099  * @param       job     JOB_AES_HMAC structure to fill
1100  * @param       m       mbuf to process
1101  *
1102  * @return
1103  * - Completed JOB_AES_HMAC structure pointer on success
1104  * - NULL pointer if completion of JOB_AES_HMAC structure isn't possible
1105  */
1106 static inline int
1107 set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
1108                 struct rte_crypto_op *op, uint8_t *digest_idx)
1109 {
1110         struct rte_mbuf *m_src = op->sym->m_src, *m_dst;
1111         struct aesni_mb_session *session;
1112         uint32_t m_offset, oop;
1113
1114         session = get_session(qp, op);
1115         if (session == NULL) {
1116                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
1117                 return -1;
1118         }
1119
1120         /* Set crypto operation */
1121         job->chain_order = session->chain_order;
1122
1123         /* Set cipher parameters */
1124         job->cipher_direction = session->cipher.direction;
1125         job->cipher_mode = session->cipher.mode;
1126
1127         job->aes_key_len_in_bytes = session->cipher.key_length_in_bytes;
1128
1129         /* Set authentication parameters */
1130         job->hash_alg = session->auth.algo;
1131
1132         switch (job->hash_alg) {
1133         case AES_XCBC:
1134                 job->u.XCBC._k1_expanded = session->auth.xcbc.k1_expanded;
1135                 job->u.XCBC._k2 = session->auth.xcbc.k2;
1136                 job->u.XCBC._k3 = session->auth.xcbc.k3;
1137
1138                 job->aes_enc_key_expanded =
1139                                 session->cipher.expanded_aes_keys.encode;
1140                 job->aes_dec_key_expanded =
1141                                 session->cipher.expanded_aes_keys.decode;
1142                 break;
1143
1144         case AES_CCM:
1145                 job->u.CCM.aad = op->sym->aead.aad.data + 18;
1146                 job->u.CCM.aad_len_in_bytes = session->aead.aad_len;
1147                 job->aes_enc_key_expanded =
1148                                 session->cipher.expanded_aes_keys.encode;
1149                 job->aes_dec_key_expanded =
1150                                 session->cipher.expanded_aes_keys.decode;
1151                 break;
1152
1153         case AES_CMAC:
1154                 job->u.CMAC._key_expanded = session->auth.cmac.expkey;
1155                 job->u.CMAC._skey1 = session->auth.cmac.skey1;
1156                 job->u.CMAC._skey2 = session->auth.cmac.skey2;
1157                 job->aes_enc_key_expanded =
1158                                 session->cipher.expanded_aes_keys.encode;
1159                 job->aes_dec_key_expanded =
1160                                 session->cipher.expanded_aes_keys.decode;
1161                 break;
1162
1163         case AES_GMAC:
1164                 if (session->cipher.mode == GCM) {
1165                         job->u.GCM.aad = op->sym->aead.aad.data;
1166                         job->u.GCM.aad_len_in_bytes = session->aead.aad_len;
1167                 } else {
1168                         /* For GMAC */
1169                         job->u.GCM.aad = rte_pktmbuf_mtod_offset(m_src,
1170                                         uint8_t *, op->sym->auth.data.offset);
1171                         job->u.GCM.aad_len_in_bytes = op->sym->auth.data.length;
1172                         job->cipher_mode = GCM;
1173                 }
1174                 job->aes_enc_key_expanded = &session->cipher.gcm_key;
1175                 job->aes_dec_key_expanded = &session->cipher.gcm_key;
1176                 break;
1177
1178         default:
1179                 job->u.HMAC._hashed_auth_key_xor_ipad = session->auth.pads.inner;
1180                 job->u.HMAC._hashed_auth_key_xor_opad = session->auth.pads.outer;
1181
1182                 if (job->cipher_mode == DES3) {
1183                         job->aes_enc_key_expanded =
1184                                 session->cipher.exp_3des_keys.ks_ptr;
1185                         job->aes_dec_key_expanded =
1186                                 session->cipher.exp_3des_keys.ks_ptr;
1187                 } else {
1188                         job->aes_enc_key_expanded =
1189                                 session->cipher.expanded_aes_keys.encode;
1190                         job->aes_dec_key_expanded =
1191                                 session->cipher.expanded_aes_keys.decode;
1192                 }
1193         }
1194
1195         if (!op->sym->m_dst) {
1196                 /* in-place operation */
1197                 m_dst = m_src;
1198                 oop = 0;
1199         } else if (op->sym->m_dst == op->sym->m_src) {
1200                 /* in-place operation */
1201                 m_dst = m_src;
1202                 oop = 0;
1203         } else {
1204                 /* out-of-place operation */
1205                 m_dst = op->sym->m_dst;
1206                 oop = 1;
1207         }
1208
1209         if (job->hash_alg == AES_CCM || (job->hash_alg == AES_GMAC &&
1210                         session->cipher.mode == GCM))
1211                 m_offset = op->sym->aead.data.offset;
1212         else
1213                 m_offset = op->sym->cipher.data.offset;
1214
1215         /* Set digest output location */
1216         if (job->hash_alg != NULL_HASH &&
1217                         session->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
1218                 job->auth_tag_output = qp->temp_digests[*digest_idx];
1219                 *digest_idx = (*digest_idx + 1) % MAX_JOBS;
1220         } else {
1221                 if (job->hash_alg == AES_CCM || (job->hash_alg == AES_GMAC &&
1222                                 session->cipher.mode == GCM))
1223                         job->auth_tag_output = op->sym->aead.digest.data;
1224                 else
1225                         job->auth_tag_output = op->sym->auth.digest.data;
1226
1227                 if (session->auth.req_digest_len != session->auth.gen_digest_len) {
1228                         job->auth_tag_output = qp->temp_digests[*digest_idx];
1229                         *digest_idx = (*digest_idx + 1) % MAX_JOBS;
1230                 }
1231         }
1232         /*
1233          * Multi-buffer library current only support returning a truncated
1234          * digest length as specified in the relevant IPsec RFCs
1235          */
1236
1237         /* Set digest length */
1238         job->auth_tag_output_len_in_bytes = session->auth.gen_digest_len;
1239
1240         /* Set IV parameters */
1241         job->iv_len_in_bytes = session->iv.length;
1242
1243         /* Data Parameters */
1244         job->src = rte_pktmbuf_mtod(m_src, uint8_t *);
1245         job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, m_offset);
1246
1247         switch (job->hash_alg) {
1248         case AES_CCM:
1249                 job->cipher_start_src_offset_in_bytes =
1250                                 op->sym->aead.data.offset;
1251                 job->msg_len_to_cipher_in_bytes = op->sym->aead.data.length;
1252                 job->hash_start_src_offset_in_bytes = op->sym->aead.data.offset;
1253                 job->msg_len_to_hash_in_bytes = op->sym->aead.data.length;
1254
1255                 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1256                         session->iv.offset + 1);
1257                 break;
1258
1259         case AES_GMAC:
1260                 if (session->cipher.mode == GCM) {
1261                         job->cipher_start_src_offset_in_bytes =
1262                                         op->sym->aead.data.offset;
1263                         job->hash_start_src_offset_in_bytes =
1264                                         op->sym->aead.data.offset;
1265                         job->msg_len_to_cipher_in_bytes =
1266                                         op->sym->aead.data.length;
1267                         job->msg_len_to_hash_in_bytes =
1268                                         op->sym->aead.data.length;
1269                 } else {
1270                         job->cipher_start_src_offset_in_bytes =
1271                                         op->sym->auth.data.offset;
1272                         job->hash_start_src_offset_in_bytes =
1273                                         op->sym->auth.data.offset;
1274                         job->msg_len_to_cipher_in_bytes = 0;
1275                         job->msg_len_to_hash_in_bytes = 0;
1276                 }
1277
1278                 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1279                                 session->iv.offset);
1280                 break;
1281
1282         default:
1283                 job->cipher_start_src_offset_in_bytes =
1284                                 op->sym->cipher.data.offset;
1285                 job->msg_len_to_cipher_in_bytes = op->sym->cipher.data.length;
1286
1287                 job->hash_start_src_offset_in_bytes = auth_start_offset(op,
1288                                 session, oop);
1289                 job->msg_len_to_hash_in_bytes = op->sym->auth.data.length;
1290
1291                 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1292                         session->iv.offset);
1293         }
1294
1295         /* Set user data to be crypto operation data struct */
1296         job->user_data = op;
1297
1298         return 0;
1299 }
1300
1301 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
1302 /**
1303  * Process a crypto operation containing a security op and complete a
1304  * JOB_AES_HMAC job structure for submission to the multi buffer library for
1305  * processing.
1306  */
1307 static inline int
1308 set_sec_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
1309                 struct rte_crypto_op *op, uint8_t *digest_idx)
1310 {
1311         struct rte_mbuf *m_src, *m_dst;
1312         struct rte_crypto_sym_op *sym;
1313         struct aesni_mb_session *session;
1314
1315         session = get_session(qp, op);
1316         if (unlikely(session == NULL)) {
1317                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
1318                 return -1;
1319         }
1320
1321         /* Only DOCSIS protocol operations supported now */
1322         if (session->cipher.mode != IMB_CIPHER_DOCSIS_SEC_BPI ||
1323                         session->auth.algo != IMB_AUTH_DOCSIS_CRC32) {
1324                 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
1325                 return -1;
1326         }
1327
1328         sym = op->sym;
1329         m_src = sym->m_src;
1330
1331         if (likely(sym->m_dst == NULL || sym->m_dst == m_src)) {
1332                 /* in-place operation */
1333                 m_dst = m_src;
1334         } else {
1335                 /* out-of-place operation not supported */
1336                 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
1337                 return -ENOTSUP;
1338         }
1339
1340         /* Set crypto operation */
1341         job->chain_order = session->chain_order;
1342
1343         /* Set cipher parameters */
1344         job->cipher_direction = session->cipher.direction;
1345         job->cipher_mode = session->cipher.mode;
1346
1347         job->aes_key_len_in_bytes = session->cipher.key_length_in_bytes;
1348         job->aes_enc_key_expanded = session->cipher.expanded_aes_keys.encode;
1349         job->aes_dec_key_expanded = session->cipher.expanded_aes_keys.decode;
1350
1351         /* Set IV parameters */
1352         job->iv_len_in_bytes = session->iv.length;
1353         job->iv = (uint8_t *)op + session->iv.offset;
1354
1355         /* Set authentication parameters */
1356         job->hash_alg = session->auth.algo;
1357
1358         /* Set digest output location */
1359         job->auth_tag_output = qp->temp_digests[*digest_idx];
1360         *digest_idx = (*digest_idx + 1) % MAX_JOBS;
1361
1362         /* Set digest length */
1363         job->auth_tag_output_len_in_bytes = session->auth.gen_digest_len;
1364
1365         /* Set data parameters */
1366         job->src = rte_pktmbuf_mtod(m_src, uint8_t *);
1367         job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *,
1368                                                 sym->cipher.data.offset);
1369
1370         job->cipher_start_src_offset_in_bytes = sym->cipher.data.offset;
1371         job->msg_len_to_cipher_in_bytes = sym->cipher.data.length;
1372
1373         job->hash_start_src_offset_in_bytes = sym->auth.data.offset;
1374         job->msg_len_to_hash_in_bytes = sym->auth.data.length;
1375
1376         job->user_data = op;
1377
1378         return 0;
1379 }
1380
1381 static inline void
1382 verify_docsis_sec_crc(JOB_AES_HMAC *job, uint8_t *status)
1383 {
1384         uint16_t crc_offset;
1385         uint8_t *crc;
1386
1387         if (!job->msg_len_to_hash_in_bytes)
1388                 return;
1389
1390         crc_offset = job->hash_start_src_offset_in_bytes +
1391                         job->msg_len_to_hash_in_bytes -
1392                         job->cipher_start_src_offset_in_bytes;
1393         crc = job->dst + crc_offset;
1394
1395         /* Verify CRC (at the end of the message) */
1396         if (memcmp(job->auth_tag_output, crc, RTE_ETHER_CRC_LEN) != 0)
1397                 *status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
1398 }
1399 #endif
1400
1401 static inline void
1402 verify_digest(JOB_AES_HMAC *job, void *digest, uint16_t len, uint8_t *status)
1403 {
1404         /* Verify digest if required */
1405         if (memcmp(job->auth_tag_output, digest, len) != 0)
1406                 *status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
1407 }
1408
1409 static inline void
1410 generate_digest(JOB_AES_HMAC *job, struct rte_crypto_op *op,
1411                 struct aesni_mb_session *sess)
1412 {
1413         /* No extra copy needed */
1414         if (likely(sess->auth.req_digest_len == sess->auth.gen_digest_len))
1415                 return;
1416
1417         /*
1418          * This can only happen for HMAC, so only digest
1419          * for authentication algos is required
1420          */
1421         memcpy(op->sym->auth.digest.data, job->auth_tag_output,
1422                         sess->auth.req_digest_len);
1423 }
1424
1425 /**
1426  * Process a completed job and return rte_mbuf which job processed
1427  *
1428  * @param qp            Queue Pair to process
1429  * @param job   JOB_AES_HMAC job to process
1430  *
1431  * @return
1432  * - Returns processed crypto operation.
1433  * - Returns NULL on invalid job
1434  */
1435 static inline struct rte_crypto_op *
1436 post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
1437 {
1438         struct rte_crypto_op *op = (struct rte_crypto_op *)job->user_data;
1439         struct aesni_mb_session *sess = NULL;
1440
1441 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
1442         uint8_t is_docsis_sec = 0;
1443
1444         if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
1445                 /*
1446                  * Assuming at this point that if it's a security type op, that
1447                  * this is for DOCSIS
1448                  */
1449                 is_docsis_sec = 1;
1450                 sess = get_sec_session_private_data(op->sym->sec_session);
1451         } else
1452 #endif
1453         {
1454                 sess = get_sym_session_private_data(op->sym->session,
1455                                                 cryptodev_driver_id);
1456         }
1457
1458         if (unlikely(sess == NULL)) {
1459                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
1460                 return op;
1461         }
1462
1463         if (likely(op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)) {
1464                 switch (job->status) {
1465                 case STS_COMPLETED:
1466                         op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1467
1468                         if (job->hash_alg == NULL_HASH)
1469                                 break;
1470
1471                         if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
1472                                 if (job->hash_alg == AES_CCM ||
1473                                         (job->hash_alg == AES_GMAC &&
1474                                                 sess->cipher.mode == GCM))
1475                                         verify_digest(job,
1476                                                 op->sym->aead.digest.data,
1477                                                 sess->auth.req_digest_len,
1478                                                 &op->status);
1479 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
1480                                 else if (is_docsis_sec)
1481                                         verify_docsis_sec_crc(job,
1482                                                 &op->status);
1483 #endif
1484                                 else
1485                                         verify_digest(job,
1486                                                 op->sym->auth.digest.data,
1487                                                 sess->auth.req_digest_len,
1488                                                 &op->status);
1489                         } else
1490                                 generate_digest(job, op, sess);
1491                         break;
1492                 default:
1493                         op->status = RTE_CRYPTO_OP_STATUS_ERROR;
1494                 }
1495         }
1496
1497         /* Free session if a session-less crypto op */
1498         if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
1499                 memset(sess, 0, sizeof(struct aesni_mb_session));
1500                 memset(op->sym->session, 0,
1501                         rte_cryptodev_sym_get_existing_header_session_size(
1502                                 op->sym->session));
1503                 rte_mempool_put(qp->sess_mp_priv, sess);
1504                 rte_mempool_put(qp->sess_mp, op->sym->session);
1505                 op->sym->session = NULL;
1506         }
1507
1508         return op;
1509 }
1510
1511 static inline void
1512 post_process_mb_sync_job(JOB_AES_HMAC *job)
1513 {
1514         uint32_t *st;
1515
1516         st = job->user_data;
1517         st[0] = (job->status == STS_COMPLETED) ? 0 : EBADMSG;
1518 }
1519
1520 /**
1521  * Process a completed JOB_AES_HMAC job and keep processing jobs until
1522  * get_completed_job return NULL
1523  *
1524  * @param qp            Queue Pair to process
1525  * @param job           JOB_AES_HMAC job
1526  *
1527  * @return
1528  * - Number of processed jobs
1529  */
1530 static unsigned
1531 handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job,
1532                 struct rte_crypto_op **ops, uint16_t nb_ops)
1533 {
1534         struct rte_crypto_op *op = NULL;
1535         unsigned processed_jobs = 0;
1536
1537         while (job != NULL) {
1538                 op = post_process_mb_job(qp, job);
1539
1540                 if (op) {
1541                         ops[processed_jobs++] = op;
1542                         qp->stats.dequeued_count++;
1543                 } else {
1544                         qp->stats.dequeue_err_count++;
1545                         break;
1546                 }
1547                 if (processed_jobs == nb_ops)
1548                         break;
1549
1550                 job = IMB_GET_COMPLETED_JOB(qp->mb_mgr);
1551         }
1552
1553         return processed_jobs;
1554 }
1555
1556 static inline uint32_t
1557 handle_completed_sync_jobs(JOB_AES_HMAC *job, MB_MGR *mb_mgr)
1558 {
1559         uint32_t i;
1560
1561         for (i = 0; job != NULL; i++, job = IMB_GET_COMPLETED_JOB(mb_mgr))
1562                 post_process_mb_sync_job(job);
1563
1564         return i;
1565 }
1566
1567 static inline uint32_t
1568 flush_mb_sync_mgr(MB_MGR *mb_mgr)
1569 {
1570         JOB_AES_HMAC *job;
1571
1572         job = IMB_FLUSH_JOB(mb_mgr);
1573         return handle_completed_sync_jobs(job, mb_mgr);
1574 }
1575
1576 static inline uint16_t
1577 flush_mb_mgr(struct aesni_mb_qp *qp, struct rte_crypto_op **ops,
1578                 uint16_t nb_ops)
1579 {
1580         int processed_ops = 0;
1581
1582         /* Flush the remaining jobs */
1583         JOB_AES_HMAC *job = IMB_FLUSH_JOB(qp->mb_mgr);
1584
1585         if (job)
1586                 processed_ops += handle_completed_jobs(qp, job,
1587                                 &ops[processed_ops], nb_ops - processed_ops);
1588
1589         return processed_ops;
1590 }
1591
1592 static inline JOB_AES_HMAC *
1593 set_job_null_op(JOB_AES_HMAC *job, struct rte_crypto_op *op)
1594 {
1595         job->chain_order = HASH_CIPHER;
1596         job->cipher_mode = NULL_CIPHER;
1597         job->hash_alg = NULL_HASH;
1598         job->cipher_direction = DECRYPT;
1599
1600         /* Set user data to be crypto operation data struct */
1601         job->user_data = op;
1602
1603         return job;
1604 }
1605
1606 static uint16_t
1607 aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
1608                 uint16_t nb_ops)
1609 {
1610         struct aesni_mb_qp *qp = queue_pair;
1611
1612         struct rte_crypto_op *op;
1613         JOB_AES_HMAC *job;
1614
1615         int retval, processed_jobs = 0;
1616
1617         if (unlikely(nb_ops == 0))
1618                 return 0;
1619
1620         uint8_t digest_idx = qp->digest_idx;
1621         do {
1622                 /* Get next free mb job struct from mb manager */
1623                 job = IMB_GET_NEXT_JOB(qp->mb_mgr);
1624                 if (unlikely(job == NULL)) {
1625                         /* if no free mb job structs we need to flush mb_mgr */
1626                         processed_jobs += flush_mb_mgr(qp,
1627                                         &ops[processed_jobs],
1628                                         nb_ops - processed_jobs);
1629
1630                         if (nb_ops == processed_jobs)
1631                                 break;
1632
1633                         job = IMB_GET_NEXT_JOB(qp->mb_mgr);
1634                 }
1635
1636                 /*
1637                  * Get next operation to process from ingress queue.
1638                  * There is no need to return the job to the MB_MGR
1639                  * if there are no more operations to process, since the MB_MGR
1640                  * can use that pointer again in next get_next calls.
1641                  */
1642                 retval = rte_ring_dequeue(qp->ingress_queue, (void **)&op);
1643                 if (retval < 0)
1644                         break;
1645
1646 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
1647                 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
1648                         retval = set_sec_mb_job_params(job, qp, op,
1649                                                 &digest_idx);
1650                 else
1651 #endif
1652                         retval = set_mb_job_params(job, qp, op, &digest_idx);
1653
1654                 if (unlikely(retval != 0)) {
1655                         qp->stats.dequeue_err_count++;
1656                         set_job_null_op(job, op);
1657                 }
1658
1659                 /* Submit job to multi-buffer for processing */
1660 #ifdef RTE_LIBRTE_PMD_AESNI_MB_DEBUG
1661                 job = IMB_SUBMIT_JOB(qp->mb_mgr);
1662 #else
1663                 job = IMB_SUBMIT_JOB_NOCHECK(qp->mb_mgr);
1664 #endif
1665                 /*
1666                  * If submit returns a processed job then handle it,
1667                  * before submitting subsequent jobs
1668                  */
1669                 if (job)
1670                         processed_jobs += handle_completed_jobs(qp, job,
1671                                         &ops[processed_jobs],
1672                                         nb_ops - processed_jobs);
1673
1674         } while (processed_jobs < nb_ops);
1675
1676         qp->digest_idx = digest_idx;
1677
1678         if (processed_jobs < 1)
1679                 processed_jobs += flush_mb_mgr(qp,
1680                                 &ops[processed_jobs],
1681                                 nb_ops - processed_jobs);
1682
1683         return processed_jobs;
1684 }
1685
1686 static MB_MGR *
1687 alloc_init_mb_mgr(enum aesni_mb_vector_mode vector_mode)
1688 {
1689         MB_MGR *mb_mgr = alloc_mb_mgr(0);
1690         if (mb_mgr == NULL)
1691                 return NULL;
1692
1693         switch (vector_mode) {
1694         case RTE_AESNI_MB_SSE:
1695                 init_mb_mgr_sse(mb_mgr);
1696                 break;
1697         case RTE_AESNI_MB_AVX:
1698                 init_mb_mgr_avx(mb_mgr);
1699                 break;
1700         case RTE_AESNI_MB_AVX2:
1701                 init_mb_mgr_avx2(mb_mgr);
1702                 break;
1703         case RTE_AESNI_MB_AVX512:
1704                 init_mb_mgr_avx512(mb_mgr);
1705                 break;
1706         default:
1707                 AESNI_MB_LOG(ERR, "Unsupported vector mode %u\n", vector_mode);
1708                 free_mb_mgr(mb_mgr);
1709                 return NULL;
1710         }
1711
1712         return mb_mgr;
1713 }
1714
1715 static inline void
1716 aesni_mb_fill_error_code(struct rte_crypto_sym_vec *vec, int32_t err)
1717 {
1718         uint32_t i;
1719
1720         for (i = 0; i != vec->num; ++i)
1721                 vec->status[i] = err;
1722 }
1723
1724 static inline int
1725 check_crypto_sgl(union rte_crypto_sym_ofs so, const struct rte_crypto_sgl *sgl)
1726 {
1727         /* no multi-seg support with current AESNI-MB PMD */
1728         if (sgl->num != 1)
1729                 return ENOTSUP;
1730         else if (so.ofs.cipher.head + so.ofs.cipher.tail > sgl->vec[0].len)
1731                 return EINVAL;
1732         return 0;
1733 }
1734
1735 static inline JOB_AES_HMAC *
1736 submit_sync_job(MB_MGR *mb_mgr)
1737 {
1738 #ifdef RTE_LIBRTE_PMD_AESNI_MB_DEBUG
1739         return IMB_SUBMIT_JOB(mb_mgr);
1740 #else
1741         return IMB_SUBMIT_JOB_NOCHECK(mb_mgr);
1742 #endif
1743 }
1744
1745 static inline uint32_t
1746 generate_sync_dgst(struct rte_crypto_sym_vec *vec,
1747         const uint8_t dgst[][DIGEST_LENGTH_MAX], uint32_t len)
1748 {
1749         uint32_t i, k;
1750
1751         for (i = 0, k = 0; i != vec->num; i++) {
1752                 if (vec->status[i] == 0) {
1753                         memcpy(vec->digest[i], dgst[i], len);
1754                         k++;
1755                 }
1756         }
1757
1758         return k;
1759 }
1760
1761 static inline uint32_t
1762 verify_sync_dgst(struct rte_crypto_sym_vec *vec,
1763         const uint8_t dgst[][DIGEST_LENGTH_MAX], uint32_t len)
1764 {
1765         uint32_t i, k;
1766
1767         for (i = 0, k = 0; i != vec->num; i++) {
1768                 if (vec->status[i] == 0) {
1769                         if (memcmp(vec->digest[i], dgst[i], len) != 0)
1770                                 vec->status[i] = EBADMSG;
1771                         else
1772                                 k++;
1773                 }
1774         }
1775
1776         return k;
1777 }
1778
1779 uint32_t
1780 aesni_mb_cpu_crypto_process_bulk(struct rte_cryptodev *dev,
1781         struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs sofs,
1782         struct rte_crypto_sym_vec *vec)
1783 {
1784         int32_t ret;
1785         uint32_t i, j, k, len;
1786         void *buf;
1787         JOB_AES_HMAC *job;
1788         MB_MGR *mb_mgr;
1789         struct aesni_mb_private *priv;
1790         struct aesni_mb_session *s;
1791         uint8_t tmp_dgst[vec->num][DIGEST_LENGTH_MAX];
1792
1793         s = get_sym_session_private_data(sess, dev->driver_id);
1794         if (s == NULL) {
1795                 aesni_mb_fill_error_code(vec, EINVAL);
1796                 return 0;
1797         }
1798
1799         /* get per-thread MB MGR, create one if needed */
1800         mb_mgr = RTE_PER_LCORE(sync_mb_mgr);
1801         if (mb_mgr == NULL) {
1802
1803                 priv = dev->data->dev_private;
1804                 mb_mgr = alloc_init_mb_mgr(priv->vector_mode);
1805                 if (mb_mgr == NULL) {
1806                         aesni_mb_fill_error_code(vec, ENOMEM);
1807                         return 0;
1808                 }
1809                 RTE_PER_LCORE(sync_mb_mgr) = mb_mgr;
1810         }
1811
1812         for (i = 0, j = 0, k = 0; i != vec->num; i++) {
1813
1814
1815                 ret = check_crypto_sgl(sofs, vec->sgl + i);
1816                 if (ret != 0) {
1817                         vec->status[i] = ret;
1818                         continue;
1819                 }
1820
1821                 buf = vec->sgl[i].vec[0].base;
1822                 len = vec->sgl[i].vec[0].len;
1823
1824                 job = IMB_GET_NEXT_JOB(mb_mgr);
1825                 if (job == NULL) {
1826                         k += flush_mb_sync_mgr(mb_mgr);
1827                         job = IMB_GET_NEXT_JOB(mb_mgr);
1828                         RTE_ASSERT(job != NULL);
1829                 }
1830
1831                 /* Submit job for processing */
1832                 set_cpu_mb_job_params(job, s, sofs, buf, len,
1833                         vec->iv[i], vec->aad[i], tmp_dgst[i],
1834                         &vec->status[i]);
1835                 job = submit_sync_job(mb_mgr);
1836                 j++;
1837
1838                 /* handle completed jobs */
1839                 k += handle_completed_sync_jobs(job, mb_mgr);
1840         }
1841
1842         /* flush remaining jobs */
1843         while (k != j)
1844                 k += flush_mb_sync_mgr(mb_mgr);
1845
1846         /* finish processing for successful jobs: check/update digest */
1847         if (k != 0) {
1848                 if (s->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY)
1849                         k = verify_sync_dgst(vec,
1850                                 (const uint8_t (*)[DIGEST_LENGTH_MAX])tmp_dgst,
1851                                 s->auth.req_digest_len);
1852                 else
1853                         k = generate_sync_dgst(vec,
1854                                 (const uint8_t (*)[DIGEST_LENGTH_MAX])tmp_dgst,
1855                                 s->auth.req_digest_len);
1856         }
1857
1858         return k;
1859 }
1860
1861 static int cryptodev_aesni_mb_remove(struct rte_vdev_device *vdev);
1862
1863 static uint64_t
1864 vec_mode_to_flags(enum aesni_mb_vector_mode mode)
1865 {
1866         switch (mode) {
1867         case RTE_AESNI_MB_SSE:
1868                 return RTE_CRYPTODEV_FF_CPU_SSE;
1869         case RTE_AESNI_MB_AVX:
1870                 return RTE_CRYPTODEV_FF_CPU_AVX;
1871         case RTE_AESNI_MB_AVX2:
1872                 return RTE_CRYPTODEV_FF_CPU_AVX2;
1873         case RTE_AESNI_MB_AVX512:
1874                 return RTE_CRYPTODEV_FF_CPU_AVX512;
1875         default:
1876                 AESNI_MB_LOG(ERR, "Unsupported vector mode %u\n", mode);
1877                 return 0;
1878         }
1879 }
1880
1881 static int
1882 cryptodev_aesni_mb_create(const char *name,
1883                         struct rte_vdev_device *vdev,
1884                         struct rte_cryptodev_pmd_init_params *init_params)
1885 {
1886         struct rte_cryptodev *dev;
1887         struct aesni_mb_private *internals;
1888         enum aesni_mb_vector_mode vector_mode;
1889         MB_MGR *mb_mgr;
1890
1891         dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
1892         if (dev == NULL) {
1893                 AESNI_MB_LOG(ERR, "failed to create cryptodev vdev");
1894                 return -ENODEV;
1895         }
1896
1897         /* Check CPU for supported vector instruction set */
1898         if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
1899                 vector_mode = RTE_AESNI_MB_AVX512;
1900         else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
1901                 vector_mode = RTE_AESNI_MB_AVX2;
1902         else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
1903                 vector_mode = RTE_AESNI_MB_AVX;
1904         else
1905                 vector_mode = RTE_AESNI_MB_SSE;
1906
1907         dev->driver_id = cryptodev_driver_id;
1908         dev->dev_ops = rte_aesni_mb_pmd_ops;
1909
1910         /* register rx/tx burst functions for data path */
1911         dev->dequeue_burst = aesni_mb_pmd_dequeue_burst;
1912         dev->enqueue_burst = aesni_mb_pmd_enqueue_burst;
1913
1914         dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
1915                         RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
1916                         RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
1917                         RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO |
1918                         RTE_CRYPTODEV_FF_SYM_SESSIONLESS;
1919
1920 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
1921         struct rte_security_ctx *security_instance;
1922         security_instance = rte_malloc("aesni_mb_sec",
1923                                 sizeof(struct rte_security_ctx),
1924                                 RTE_CACHE_LINE_SIZE);
1925         if (security_instance == NULL) {
1926                 AESNI_MB_LOG(ERR, "rte_security_ctx memory alloc failed");
1927                 rte_cryptodev_pmd_destroy(dev);
1928                 return -ENOMEM;
1929         }
1930
1931         security_instance->device = (void *)dev;
1932         security_instance->ops = rte_aesni_mb_pmd_sec_ops;
1933         security_instance->sess_cnt = 0;
1934         dev->security_ctx = security_instance;
1935         dev->feature_flags |= RTE_CRYPTODEV_FF_SECURITY;
1936 #endif
1937
1938         /* Check CPU for support for AES instruction set */
1939         if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES))
1940                 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AESNI;
1941         else
1942                 AESNI_MB_LOG(WARNING, "AES instructions not supported by CPU");
1943
1944         dev->feature_flags |= vec_mode_to_flags(vector_mode);
1945
1946         mb_mgr = alloc_init_mb_mgr(vector_mode);
1947         if (mb_mgr == NULL) {
1948 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
1949                 rte_free(dev->security_ctx);
1950                 dev->security_ctx = NULL;
1951 #endif
1952                 rte_cryptodev_pmd_destroy(dev);
1953                 return -ENOMEM;
1954         }
1955
1956         /* Set vector instructions mode supported */
1957         internals = dev->data->dev_private;
1958
1959         internals->vector_mode = vector_mode;
1960         internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
1961         internals->mb_mgr = mb_mgr;
1962
1963         AESNI_MB_LOG(INFO, "IPSec Multi-buffer library version used: %s\n",
1964                         imb_get_version_str());
1965         return 0;
1966 }
1967
1968 static int
1969 cryptodev_aesni_mb_probe(struct rte_vdev_device *vdev)
1970 {
1971         struct rte_cryptodev_pmd_init_params init_params = {
1972                 "",
1973                 sizeof(struct aesni_mb_private),
1974                 rte_socket_id(),
1975                 RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
1976         };
1977         const char *name, *args;
1978         int retval;
1979
1980         name = rte_vdev_device_name(vdev);
1981         if (name == NULL)
1982                 return -EINVAL;
1983
1984         args = rte_vdev_device_args(vdev);
1985
1986         retval = rte_cryptodev_pmd_parse_input_args(&init_params, args);
1987         if (retval) {
1988                 AESNI_MB_LOG(ERR, "Failed to parse initialisation arguments[%s]",
1989                                 args);
1990                 return -EINVAL;
1991         }
1992
1993         return cryptodev_aesni_mb_create(name, vdev, &init_params);
1994 }
1995
1996 static int
1997 cryptodev_aesni_mb_remove(struct rte_vdev_device *vdev)
1998 {
1999         struct rte_cryptodev *cryptodev;
2000         struct aesni_mb_private *internals;
2001         const char *name;
2002
2003         name = rte_vdev_device_name(vdev);
2004         if (name == NULL)
2005                 return -EINVAL;
2006
2007         cryptodev = rte_cryptodev_pmd_get_named_dev(name);
2008         if (cryptodev == NULL)
2009                 return -ENODEV;
2010
2011         internals = cryptodev->data->dev_private;
2012
2013         free_mb_mgr(internals->mb_mgr);
2014         if (RTE_PER_LCORE(sync_mb_mgr)) {
2015                 free_mb_mgr(RTE_PER_LCORE(sync_mb_mgr));
2016                 RTE_PER_LCORE(sync_mb_mgr) = NULL;
2017         }
2018
2019 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
2020         rte_free(cryptodev->security_ctx);
2021         cryptodev->security_ctx = NULL;
2022 #endif
2023
2024         return rte_cryptodev_pmd_destroy(cryptodev);
2025 }
2026
2027 static struct rte_vdev_driver cryptodev_aesni_mb_pmd_drv = {
2028         .probe = cryptodev_aesni_mb_probe,
2029         .remove = cryptodev_aesni_mb_remove
2030 };
2031
2032 static struct cryptodev_driver aesni_mb_crypto_drv;
2033
2034 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd_drv);
2035 RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd);
2036 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_MB_PMD,
2037         "max_nb_queue_pairs=<int> "
2038         "socket_id=<int>");
2039 RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_mb_crypto_drv,
2040                 cryptodev_aesni_mb_pmd_drv.driver,
2041                 cryptodev_driver_id);
2042 RTE_LOG_REGISTER(aesni_mb_logtype_driver, pmd.crypto.aesni_mb, NOTICE);