1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2017 Intel Corporation
5 #include <intel-ipsec-mb.h>
7 #include <rte_common.h>
8 #include <rte_hexdump.h>
9 #include <rte_cryptodev.h>
10 #include <rte_cryptodev_pmd.h>
11 #include <rte_bus_vdev.h>
12 #include <rte_malloc.h>
13 #include <rte_cpuflags.h>
14 #include <rte_per_lcore.h>
15 #include <rte_ether.h>
17 #include "aesni_mb_pmd_private.h"
19 #define AES_CCM_DIGEST_MIN_LEN 4
20 #define AES_CCM_DIGEST_MAX_LEN 16
21 #define HMAC_MAX_BLOCK_SIZE 128
22 static uint8_t cryptodev_driver_id;
25 * Needed to support CPU-CRYPTO API (rte_cryptodev_sym_cpu_crypto_process),
26 * as we still use JOB based API even for synchronous processing.
28 static RTE_DEFINE_PER_LCORE(MB_MGR *, sync_mb_mgr);
30 typedef void (*hash_one_block_t)(const void *data, void *digest);
31 typedef void (*aes_keyexp_t)(const void *key, void *enc_exp_keys, void *dec_exp_keys);
34 * Calculate the authentication pre-computes
36 * @param one_block_hash Function pointer to calculate digest on ipad/opad
37 * @param ipad Inner pad output byte array
38 * @param opad Outer pad output byte array
39 * @param hkey Authentication key
40 * @param hkey_len Authentication key length
41 * @param blocksize Block size of selected hash algo
44 calculate_auth_precomputes(hash_one_block_t one_block_hash,
45 uint8_t *ipad, uint8_t *opad,
46 const uint8_t *hkey, uint16_t hkey_len,
51 uint8_t ipad_buf[blocksize] __rte_aligned(16);
52 uint8_t opad_buf[blocksize] __rte_aligned(16);
54 /* Setup inner and outer pads */
55 memset(ipad_buf, HMAC_IPAD_VALUE, blocksize);
56 memset(opad_buf, HMAC_OPAD_VALUE, blocksize);
58 /* XOR hash key with inner and outer pads */
59 length = hkey_len > blocksize ? blocksize : hkey_len;
61 for (i = 0; i < length; i++) {
62 ipad_buf[i] ^= hkey[i];
63 opad_buf[i] ^= hkey[i];
66 /* Compute partial hashes */
67 (*one_block_hash)(ipad_buf, ipad);
68 (*one_block_hash)(opad_buf, opad);
71 memset(ipad_buf, 0, blocksize);
72 memset(opad_buf, 0, blocksize);
75 /** Get xform chain order */
76 static enum aesni_mb_operation
77 aesni_mb_get_chain_order(const struct rte_crypto_sym_xform *xform)
80 return AESNI_MB_OP_NOT_SUPPORTED;
82 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
83 if (xform->next == NULL)
84 return AESNI_MB_OP_CIPHER_ONLY;
85 if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
86 return AESNI_MB_OP_CIPHER_HASH;
89 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
90 if (xform->next == NULL)
91 return AESNI_MB_OP_HASH_ONLY;
92 if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
93 return AESNI_MB_OP_HASH_CIPHER;
95 #if IMB_VERSION_NUM > IMB_VERSION(0, 52, 0)
96 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
97 if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
99 * CCM requires to hash first and cipher later
102 if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM)
103 return AESNI_MB_OP_AEAD_HASH_CIPHER;
105 return AESNI_MB_OP_AEAD_CIPHER_HASH;
107 if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM)
108 return AESNI_MB_OP_AEAD_CIPHER_HASH;
110 return AESNI_MB_OP_AEAD_HASH_CIPHER;
114 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
115 if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM ||
116 xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM) {
117 if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
118 return AESNI_MB_OP_AEAD_CIPHER_HASH;
120 return AESNI_MB_OP_AEAD_HASH_CIPHER;
125 return AESNI_MB_OP_NOT_SUPPORTED;
128 /** Set session authentication parameters */
130 aesni_mb_set_session_auth_parameters(const MB_MGR *mb_mgr,
131 struct aesni_mb_session *sess,
132 const struct rte_crypto_sym_xform *xform)
134 hash_one_block_t hash_oneblock_fn = NULL;
135 unsigned int key_larger_block_size = 0;
136 uint8_t hashed_key[HMAC_MAX_BLOCK_SIZE] = { 0 };
137 uint32_t auth_precompute = 1;
140 sess->auth.algo = NULL_HASH;
144 if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
145 AESNI_MB_LOG(ERR, "Crypto xform struct not of type auth");
149 /* Set IV parameters */
150 sess->auth_iv.offset = xform->auth.iv.offset;
151 sess->auth_iv.length = xform->auth.iv.length;
153 /* Set the request digest size */
154 sess->auth.req_digest_len = xform->auth.digest_length;
156 /* Select auth generate/verify */
157 sess->auth.operation = xform->auth.op;
159 /* Set Authentication Parameters */
160 if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_XCBC_MAC) {
161 sess->auth.algo = AES_XCBC;
163 uint16_t xcbc_mac_digest_len =
164 get_truncated_digest_byte_length(AES_XCBC);
165 if (sess->auth.req_digest_len != xcbc_mac_digest_len) {
166 AESNI_MB_LOG(ERR, "Invalid digest size\n");
169 sess->auth.gen_digest_len = sess->auth.req_digest_len;
171 IMB_AES_XCBC_KEYEXP(mb_mgr, xform->auth.key.data,
172 sess->auth.xcbc.k1_expanded,
173 sess->auth.xcbc.k2, sess->auth.xcbc.k3);
177 if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_CMAC) {
180 sess->auth.algo = AES_CMAC;
182 uint16_t cmac_digest_len = get_digest_byte_length(AES_CMAC);
184 if (sess->auth.req_digest_len > cmac_digest_len) {
185 AESNI_MB_LOG(ERR, "Invalid digest size\n");
189 * Multi-buffer lib supports digest sizes from 4 to 16 bytes
190 * in version 0.50 and sizes of 12 and 16 bytes,
192 * If size requested is different, generate the full digest
193 * (16 bytes) in a temporary location and then memcpy
194 * the requested number of bytes.
196 if (sess->auth.req_digest_len < 4)
197 sess->auth.gen_digest_len = cmac_digest_len;
199 sess->auth.gen_digest_len = sess->auth.req_digest_len;
201 IMB_AES_KEYEXP_128(mb_mgr, xform->auth.key.data,
202 sess->auth.cmac.expkey, dust);
203 IMB_AES_CMAC_SUBKEY_GEN_128(mb_mgr, sess->auth.cmac.expkey,
204 sess->auth.cmac.skey1, sess->auth.cmac.skey2);
208 if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
209 if (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) {
210 sess->cipher.direction = ENCRYPT;
211 sess->chain_order = CIPHER_HASH;
213 sess->cipher.direction = DECRYPT;
215 sess->auth.algo = AES_GMAC;
216 if (sess->auth.req_digest_len > get_digest_byte_length(AES_GMAC)) {
217 AESNI_MB_LOG(ERR, "Invalid digest size\n");
220 sess->auth.gen_digest_len = sess->auth.req_digest_len;
221 sess->iv.length = xform->auth.iv.length;
222 sess->iv.offset = xform->auth.iv.offset;
224 switch (xform->auth.key.length) {
226 IMB_AES128_GCM_PRE(mb_mgr, xform->auth.key.data,
227 &sess->cipher.gcm_key);
228 sess->cipher.key_length_in_bytes = AES_128_BYTES;
231 IMB_AES192_GCM_PRE(mb_mgr, xform->auth.key.data,
232 &sess->cipher.gcm_key);
233 sess->cipher.key_length_in_bytes = AES_192_BYTES;
236 IMB_AES256_GCM_PRE(mb_mgr, xform->auth.key.data,
237 &sess->cipher.gcm_key);
238 sess->cipher.key_length_in_bytes = AES_256_BYTES;
241 RTE_LOG(ERR, PMD, "failed to parse test type\n");
248 #if IMB_VERSION(0, 53, 3) <= IMB_VERSION_NUM
249 if (xform->auth.algo == RTE_CRYPTO_AUTH_ZUC_EIA3) {
250 sess->auth.algo = IMB_AUTH_ZUC_EIA3_BITLEN;
251 uint16_t zuc_eia3_digest_len =
252 get_truncated_digest_byte_length(IMB_AUTH_ZUC_EIA3_BITLEN);
253 if (sess->auth.req_digest_len != zuc_eia3_digest_len) {
254 AESNI_MB_LOG(ERR, "Invalid digest size\n");
257 sess->auth.gen_digest_len = sess->auth.req_digest_len;
259 memcpy(sess->auth.zuc_auth_key, xform->auth.key.data, 16);
261 } else if (xform->auth.algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
262 sess->auth.algo = IMB_AUTH_SNOW3G_UIA2_BITLEN;
263 uint16_t snow3g_uia2_digest_len =
264 get_truncated_digest_byte_length(IMB_AUTH_SNOW3G_UIA2_BITLEN);
265 if (sess->auth.req_digest_len != snow3g_uia2_digest_len) {
266 AESNI_MB_LOG(ERR, "Invalid digest size\n");
269 sess->auth.gen_digest_len = sess->auth.req_digest_len;
271 IMB_SNOW3G_INIT_KEY_SCHED(mb_mgr, xform->auth.key.data,
272 &sess->auth.pKeySched_snow3g_auth);
274 } else if (xform->auth.algo == RTE_CRYPTO_AUTH_KASUMI_F9) {
275 sess->auth.algo = IMB_AUTH_KASUMI_UIA1;
276 uint16_t kasumi_f9_digest_len =
277 get_truncated_digest_byte_length(IMB_AUTH_KASUMI_UIA1);
278 if (sess->auth.req_digest_len != kasumi_f9_digest_len) {
279 AESNI_MB_LOG(ERR, "Invalid digest size\n");
282 sess->auth.gen_digest_len = sess->auth.req_digest_len;
284 IMB_KASUMI_INIT_F9_KEY_SCHED(mb_mgr, xform->auth.key.data,
285 &sess->auth.pKeySched_kasumi_auth);
290 switch (xform->auth.algo) {
291 case RTE_CRYPTO_AUTH_MD5_HMAC:
292 sess->auth.algo = MD5;
293 hash_oneblock_fn = mb_mgr->md5_one_block;
295 case RTE_CRYPTO_AUTH_SHA1_HMAC:
296 sess->auth.algo = SHA1;
297 hash_oneblock_fn = mb_mgr->sha1_one_block;
298 if (xform->auth.key.length > get_auth_algo_blocksize(SHA1)) {
300 xform->auth.key.data,
301 xform->auth.key.length,
303 key_larger_block_size = 1;
306 case RTE_CRYPTO_AUTH_SHA1:
307 sess->auth.algo = PLAIN_SHA1;
310 case RTE_CRYPTO_AUTH_SHA224_HMAC:
311 sess->auth.algo = SHA_224;
312 hash_oneblock_fn = mb_mgr->sha224_one_block;
313 if (xform->auth.key.length > get_auth_algo_blocksize(SHA_224)) {
315 xform->auth.key.data,
316 xform->auth.key.length,
318 key_larger_block_size = 1;
321 case RTE_CRYPTO_AUTH_SHA224:
322 sess->auth.algo = PLAIN_SHA_224;
325 case RTE_CRYPTO_AUTH_SHA256_HMAC:
326 sess->auth.algo = SHA_256;
327 hash_oneblock_fn = mb_mgr->sha256_one_block;
328 if (xform->auth.key.length > get_auth_algo_blocksize(SHA_256)) {
330 xform->auth.key.data,
331 xform->auth.key.length,
333 key_larger_block_size = 1;
336 case RTE_CRYPTO_AUTH_SHA256:
337 sess->auth.algo = PLAIN_SHA_256;
340 case RTE_CRYPTO_AUTH_SHA384_HMAC:
341 sess->auth.algo = SHA_384;
342 hash_oneblock_fn = mb_mgr->sha384_one_block;
343 if (xform->auth.key.length > get_auth_algo_blocksize(SHA_384)) {
345 xform->auth.key.data,
346 xform->auth.key.length,
348 key_larger_block_size = 1;
351 case RTE_CRYPTO_AUTH_SHA384:
352 sess->auth.algo = PLAIN_SHA_384;
355 case RTE_CRYPTO_AUTH_SHA512_HMAC:
356 sess->auth.algo = SHA_512;
357 hash_oneblock_fn = mb_mgr->sha512_one_block;
358 if (xform->auth.key.length > get_auth_algo_blocksize(SHA_512)) {
360 xform->auth.key.data,
361 xform->auth.key.length,
363 key_larger_block_size = 1;
366 case RTE_CRYPTO_AUTH_SHA512:
367 sess->auth.algo = PLAIN_SHA_512;
371 AESNI_MB_LOG(ERR, "Unsupported authentication algorithm selection");
374 uint16_t trunc_digest_size =
375 get_truncated_digest_byte_length(sess->auth.algo);
376 uint16_t full_digest_size =
377 get_digest_byte_length(sess->auth.algo);
379 if (sess->auth.req_digest_len > full_digest_size ||
380 sess->auth.req_digest_len == 0) {
381 AESNI_MB_LOG(ERR, "Invalid digest size\n");
385 if (sess->auth.req_digest_len != trunc_digest_size &&
386 sess->auth.req_digest_len != full_digest_size)
387 sess->auth.gen_digest_len = full_digest_size;
389 sess->auth.gen_digest_len = sess->auth.req_digest_len;
391 /* Plain SHA does not require precompute key */
392 if (auth_precompute == 0)
395 /* Calculate Authentication precomputes */
396 if (key_larger_block_size) {
397 calculate_auth_precomputes(hash_oneblock_fn,
398 sess->auth.pads.inner, sess->auth.pads.outer,
400 xform->auth.key.length,
401 get_auth_algo_blocksize(sess->auth.algo));
403 calculate_auth_precomputes(hash_oneblock_fn,
404 sess->auth.pads.inner, sess->auth.pads.outer,
405 xform->auth.key.data,
406 xform->auth.key.length,
407 get_auth_algo_blocksize(sess->auth.algo));
413 /** Set session cipher parameters */
415 aesni_mb_set_session_cipher_parameters(const MB_MGR *mb_mgr,
416 struct aesni_mb_session *sess,
417 const struct rte_crypto_sym_xform *xform)
421 uint8_t is_docsis = 0;
422 #if IMB_VERSION(0, 53, 3) <= IMB_VERSION_NUM
424 uint8_t is_snow3g = 0;
425 uint8_t is_kasumi = 0;
429 sess->cipher.mode = NULL_CIPHER;
433 if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
434 AESNI_MB_LOG(ERR, "Crypto xform struct not of type cipher");
438 /* Select cipher direction */
439 switch (xform->cipher.op) {
440 case RTE_CRYPTO_CIPHER_OP_ENCRYPT:
441 sess->cipher.direction = ENCRYPT;
443 case RTE_CRYPTO_CIPHER_OP_DECRYPT:
444 sess->cipher.direction = DECRYPT;
447 AESNI_MB_LOG(ERR, "Invalid cipher operation parameter");
451 /* Select cipher mode */
452 switch (xform->cipher.algo) {
453 case RTE_CRYPTO_CIPHER_AES_CBC:
454 sess->cipher.mode = CBC;
457 case RTE_CRYPTO_CIPHER_AES_CTR:
458 sess->cipher.mode = CNTR;
461 case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
462 sess->cipher.mode = DOCSIS_SEC_BPI;
465 case RTE_CRYPTO_CIPHER_DES_CBC:
466 sess->cipher.mode = DES;
468 case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
469 sess->cipher.mode = DOCSIS_DES;
471 case RTE_CRYPTO_CIPHER_3DES_CBC:
472 sess->cipher.mode = DES3;
475 #if IMB_VERSION(0, 53, 0) <= IMB_VERSION_NUM
476 case RTE_CRYPTO_CIPHER_AES_ECB:
477 sess->cipher.mode = ECB;
481 #if IMB_VERSION(0, 53, 3) <= IMB_VERSION_NUM
482 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
483 sess->cipher.mode = IMB_CIPHER_ZUC_EEA3;
486 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
487 sess->cipher.mode = IMB_CIPHER_SNOW3G_UEA2_BITLEN;
490 case RTE_CRYPTO_CIPHER_KASUMI_F8:
491 sess->cipher.mode = IMB_CIPHER_KASUMI_UEA1_BITLEN;
496 AESNI_MB_LOG(ERR, "Unsupported cipher mode parameter");
500 /* Set IV parameters */
501 sess->iv.offset = xform->cipher.iv.offset;
502 sess->iv.length = xform->cipher.iv.length;
504 /* Check key length and choose key expansion function for AES */
506 switch (xform->cipher.key.length) {
508 sess->cipher.key_length_in_bytes = AES_128_BYTES;
509 IMB_AES_KEYEXP_128(mb_mgr, xform->cipher.key.data,
510 sess->cipher.expanded_aes_keys.encode,
511 sess->cipher.expanded_aes_keys.decode);
514 sess->cipher.key_length_in_bytes = AES_192_BYTES;
515 IMB_AES_KEYEXP_192(mb_mgr, xform->cipher.key.data,
516 sess->cipher.expanded_aes_keys.encode,
517 sess->cipher.expanded_aes_keys.decode);
520 sess->cipher.key_length_in_bytes = AES_256_BYTES;
521 IMB_AES_KEYEXP_256(mb_mgr, xform->cipher.key.data,
522 sess->cipher.expanded_aes_keys.encode,
523 sess->cipher.expanded_aes_keys.decode);
526 AESNI_MB_LOG(ERR, "Invalid cipher key length");
529 } else if (is_docsis) {
530 switch (xform->cipher.key.length) {
532 sess->cipher.key_length_in_bytes = AES_128_BYTES;
533 IMB_AES_KEYEXP_128(mb_mgr, xform->cipher.key.data,
534 sess->cipher.expanded_aes_keys.encode,
535 sess->cipher.expanded_aes_keys.decode);
537 #if IMB_VERSION(0, 53, 3) <= IMB_VERSION_NUM
539 sess->cipher.key_length_in_bytes = AES_256_BYTES;
540 IMB_AES_KEYEXP_256(mb_mgr, xform->cipher.key.data,
541 sess->cipher.expanded_aes_keys.encode,
542 sess->cipher.expanded_aes_keys.decode);
546 AESNI_MB_LOG(ERR, "Invalid cipher key length");
549 } else if (is_3DES) {
550 uint64_t *keys[3] = {sess->cipher.exp_3des_keys.key[0],
551 sess->cipher.exp_3des_keys.key[1],
552 sess->cipher.exp_3des_keys.key[2]};
554 switch (xform->cipher.key.length) {
556 IMB_DES_KEYSCHED(mb_mgr, keys[0],
557 xform->cipher.key.data);
558 IMB_DES_KEYSCHED(mb_mgr, keys[1],
559 xform->cipher.key.data + 8);
560 IMB_DES_KEYSCHED(mb_mgr, keys[2],
561 xform->cipher.key.data + 16);
563 /* Initialize keys - 24 bytes: [K1-K2-K3] */
564 sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
565 sess->cipher.exp_3des_keys.ks_ptr[1] = keys[1];
566 sess->cipher.exp_3des_keys.ks_ptr[2] = keys[2];
569 IMB_DES_KEYSCHED(mb_mgr, keys[0],
570 xform->cipher.key.data);
571 IMB_DES_KEYSCHED(mb_mgr, keys[1],
572 xform->cipher.key.data + 8);
573 /* Initialize keys - 16 bytes: [K1=K1,K2=K2,K3=K1] */
574 sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
575 sess->cipher.exp_3des_keys.ks_ptr[1] = keys[1];
576 sess->cipher.exp_3des_keys.ks_ptr[2] = keys[0];
579 IMB_DES_KEYSCHED(mb_mgr, keys[0],
580 xform->cipher.key.data);
582 /* Initialize keys - 8 bytes: [K1 = K2 = K3] */
583 sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
584 sess->cipher.exp_3des_keys.ks_ptr[1] = keys[0];
585 sess->cipher.exp_3des_keys.ks_ptr[2] = keys[0];
588 AESNI_MB_LOG(ERR, "Invalid cipher key length");
592 sess->cipher.key_length_in_bytes = 24;
593 #if IMB_VERSION(0, 53, 3) <= IMB_VERSION_NUM
595 if (xform->cipher.key.length != 16) {
596 AESNI_MB_LOG(ERR, "Invalid cipher key length");
599 sess->cipher.key_length_in_bytes = 16;
600 memcpy(sess->cipher.zuc_cipher_key, xform->cipher.key.data,
602 } else if (is_snow3g) {
603 if (xform->cipher.key.length != 16) {
604 AESNI_MB_LOG(ERR, "Invalid cipher key length");
607 sess->cipher.key_length_in_bytes = 16;
608 IMB_SNOW3G_INIT_KEY_SCHED(mb_mgr, xform->cipher.key.data,
609 &sess->cipher.pKeySched_snow3g_cipher);
610 } else if (is_kasumi) {
611 if (xform->cipher.key.length != 16) {
612 AESNI_MB_LOG(ERR, "Invalid cipher key length");
615 sess->cipher.key_length_in_bytes = 16;
616 IMB_KASUMI_INIT_F8_KEY_SCHED(mb_mgr, xform->cipher.key.data,
617 &sess->cipher.pKeySched_kasumi_cipher);
620 if (xform->cipher.key.length != 8) {
621 AESNI_MB_LOG(ERR, "Invalid cipher key length");
624 sess->cipher.key_length_in_bytes = 8;
626 IMB_DES_KEYSCHED(mb_mgr,
627 (uint64_t *)sess->cipher.expanded_aes_keys.encode,
628 xform->cipher.key.data);
629 IMB_DES_KEYSCHED(mb_mgr,
630 (uint64_t *)sess->cipher.expanded_aes_keys.decode,
631 xform->cipher.key.data);
638 aesni_mb_set_session_aead_parameters(const MB_MGR *mb_mgr,
639 struct aesni_mb_session *sess,
640 const struct rte_crypto_sym_xform *xform)
642 switch (xform->aead.op) {
643 case RTE_CRYPTO_AEAD_OP_ENCRYPT:
644 sess->cipher.direction = ENCRYPT;
645 sess->auth.operation = RTE_CRYPTO_AUTH_OP_GENERATE;
647 case RTE_CRYPTO_AEAD_OP_DECRYPT:
648 sess->cipher.direction = DECRYPT;
649 sess->auth.operation = RTE_CRYPTO_AUTH_OP_VERIFY;
652 AESNI_MB_LOG(ERR, "Invalid aead operation parameter");
656 /* Set IV parameters */
657 sess->iv.offset = xform->aead.iv.offset;
658 sess->iv.length = xform->aead.iv.length;
660 /* Set digest sizes */
661 sess->auth.req_digest_len = xform->aead.digest_length;
662 sess->auth.gen_digest_len = sess->auth.req_digest_len;
664 switch (xform->aead.algo) {
665 case RTE_CRYPTO_AEAD_AES_CCM:
666 sess->cipher.mode = CCM;
667 sess->auth.algo = AES_CCM;
669 /* Check key length and choose key expansion function for AES */
670 switch (xform->aead.key.length) {
672 sess->cipher.key_length_in_bytes = AES_128_BYTES;
673 IMB_AES_KEYEXP_128(mb_mgr, xform->aead.key.data,
674 sess->cipher.expanded_aes_keys.encode,
675 sess->cipher.expanded_aes_keys.decode);
678 AESNI_MB_LOG(ERR, "Invalid cipher key length");
682 /* CCM digests must be between 4 and 16 and an even number */
683 if (sess->auth.req_digest_len < AES_CCM_DIGEST_MIN_LEN ||
684 sess->auth.req_digest_len > AES_CCM_DIGEST_MAX_LEN ||
685 (sess->auth.req_digest_len & 1) == 1) {
686 AESNI_MB_LOG(ERR, "Invalid digest size\n");
691 case RTE_CRYPTO_AEAD_AES_GCM:
692 sess->cipher.mode = GCM;
693 sess->auth.algo = AES_GMAC;
695 switch (xform->aead.key.length) {
697 sess->cipher.key_length_in_bytes = AES_128_BYTES;
698 IMB_AES128_GCM_PRE(mb_mgr, xform->aead.key.data,
699 &sess->cipher.gcm_key);
702 sess->cipher.key_length_in_bytes = AES_192_BYTES;
703 IMB_AES192_GCM_PRE(mb_mgr, xform->aead.key.data,
704 &sess->cipher.gcm_key);
707 sess->cipher.key_length_in_bytes = AES_256_BYTES;
708 IMB_AES256_GCM_PRE(mb_mgr, xform->aead.key.data,
709 &sess->cipher.gcm_key);
712 AESNI_MB_LOG(ERR, "Invalid cipher key length");
716 /* GCM digest size must be between 1 and 16 */
717 if (sess->auth.req_digest_len == 0 ||
718 sess->auth.req_digest_len > 16) {
719 AESNI_MB_LOG(ERR, "Invalid digest size\n");
725 AESNI_MB_LOG(ERR, "Unsupported aead mode parameter");
732 /** Parse crypto xform chain and set private session parameters */
734 aesni_mb_set_session_parameters(const MB_MGR *mb_mgr,
735 struct aesni_mb_session *sess,
736 const struct rte_crypto_sym_xform *xform)
738 const struct rte_crypto_sym_xform *auth_xform = NULL;
739 const struct rte_crypto_sym_xform *cipher_xform = NULL;
740 const struct rte_crypto_sym_xform *aead_xform = NULL;
743 /* Select Crypto operation - hash then cipher / cipher then hash */
744 switch (aesni_mb_get_chain_order(xform)) {
745 case AESNI_MB_OP_HASH_CIPHER:
746 sess->chain_order = HASH_CIPHER;
748 cipher_xform = xform->next;
750 case AESNI_MB_OP_CIPHER_HASH:
751 sess->chain_order = CIPHER_HASH;
752 auth_xform = xform->next;
753 cipher_xform = xform;
755 case AESNI_MB_OP_HASH_ONLY:
756 sess->chain_order = HASH_CIPHER;
760 case AESNI_MB_OP_CIPHER_ONLY:
762 * Multi buffer library operates only at two modes,
763 * CIPHER_HASH and HASH_CIPHER. When doing ciphering only,
764 * chain order depends on cipher operation: encryption is always
765 * the first operation and decryption the last one.
767 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
768 sess->chain_order = CIPHER_HASH;
770 sess->chain_order = HASH_CIPHER;
772 cipher_xform = xform;
774 case AESNI_MB_OP_AEAD_CIPHER_HASH:
775 sess->chain_order = CIPHER_HASH;
776 sess->aead.aad_len = xform->aead.aad_length;
779 case AESNI_MB_OP_AEAD_HASH_CIPHER:
780 sess->chain_order = HASH_CIPHER;
781 sess->aead.aad_len = xform->aead.aad_length;
784 case AESNI_MB_OP_NOT_SUPPORTED:
786 AESNI_MB_LOG(ERR, "Unsupported operation chain order parameter");
790 /* Default IV length = 0 */
792 sess->auth_iv.length = 0;
794 ret = aesni_mb_set_session_auth_parameters(mb_mgr, sess, auth_xform);
796 AESNI_MB_LOG(ERR, "Invalid/unsupported authentication parameters");
800 ret = aesni_mb_set_session_cipher_parameters(mb_mgr, sess,
803 AESNI_MB_LOG(ERR, "Invalid/unsupported cipher parameters");
808 ret = aesni_mb_set_session_aead_parameters(mb_mgr, sess,
811 AESNI_MB_LOG(ERR, "Invalid/unsupported aead parameters");
819 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
820 /** Check DOCSIS security session configuration is valid */
822 check_docsis_sec_session(struct rte_security_session_conf *conf)
824 struct rte_crypto_sym_xform *crypto_sym = conf->crypto_xform;
825 struct rte_security_docsis_xform *docsis = &conf->docsis;
827 /* Downlink: CRC generate -> Cipher encrypt */
828 if (docsis->direction == RTE_SECURITY_DOCSIS_DOWNLINK) {
830 if (crypto_sym != NULL &&
831 crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
832 crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
833 crypto_sym->cipher.algo ==
834 RTE_CRYPTO_CIPHER_AES_DOCSISBPI &&
835 (crypto_sym->cipher.key.length == IMB_KEY_AES_128_BYTES ||
836 crypto_sym->cipher.key.length == IMB_KEY_AES_256_BYTES) &&
837 crypto_sym->cipher.iv.length == AES_BLOCK_SIZE &&
838 crypto_sym->next == NULL) {
841 /* Uplink: Cipher decrypt -> CRC verify */
842 } else if (docsis->direction == RTE_SECURITY_DOCSIS_UPLINK) {
844 if (crypto_sym != NULL &&
845 crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
846 crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
847 crypto_sym->cipher.algo ==
848 RTE_CRYPTO_CIPHER_AES_DOCSISBPI &&
849 (crypto_sym->cipher.key.length == IMB_KEY_AES_128_BYTES ||
850 crypto_sym->cipher.key.length == IMB_KEY_AES_256_BYTES) &&
851 crypto_sym->cipher.iv.length == AES_BLOCK_SIZE &&
852 crypto_sym->next == NULL) {
860 /** Set DOCSIS security session auth (CRC) parameters */
862 aesni_mb_set_docsis_sec_session_auth_parameters(struct aesni_mb_session *sess,
863 struct rte_security_docsis_xform *xform)
866 AESNI_MB_LOG(ERR, "Invalid DOCSIS xform");
870 /* Select CRC generate/verify */
871 if (xform->direction == RTE_SECURITY_DOCSIS_UPLINK) {
872 sess->auth.algo = IMB_AUTH_DOCSIS_CRC32;
873 sess->auth.operation = RTE_CRYPTO_AUTH_OP_VERIFY;
874 } else if (xform->direction == RTE_SECURITY_DOCSIS_DOWNLINK) {
875 sess->auth.algo = IMB_AUTH_DOCSIS_CRC32;
876 sess->auth.operation = RTE_CRYPTO_AUTH_OP_GENERATE;
878 AESNI_MB_LOG(ERR, "Unsupported DOCSIS direction");
882 sess->auth.req_digest_len = RTE_ETHER_CRC_LEN;
883 sess->auth.gen_digest_len = RTE_ETHER_CRC_LEN;
889 * Parse DOCSIS security session configuration and set private session
893 aesni_mb_set_docsis_sec_session_parameters(
894 __rte_unused struct rte_cryptodev *dev,
895 struct rte_security_session_conf *conf,
898 struct rte_security_docsis_xform *docsis_xform;
899 struct rte_crypto_sym_xform *cipher_xform;
900 struct aesni_mb_session *aesni_sess = sess;
901 struct aesni_mb_private *internals = dev->data->dev_private;
904 ret = check_docsis_sec_session(conf);
906 AESNI_MB_LOG(ERR, "Unsupported DOCSIS security configuration");
910 switch (conf->docsis.direction) {
911 case RTE_SECURITY_DOCSIS_UPLINK:
912 aesni_sess->chain_order = IMB_ORDER_CIPHER_HASH;
913 docsis_xform = &conf->docsis;
914 cipher_xform = conf->crypto_xform;
916 case RTE_SECURITY_DOCSIS_DOWNLINK:
917 aesni_sess->chain_order = IMB_ORDER_HASH_CIPHER;
918 cipher_xform = conf->crypto_xform;
919 docsis_xform = &conf->docsis;
925 /* Default IV length = 0 */
926 aesni_sess->iv.length = 0;
928 ret = aesni_mb_set_docsis_sec_session_auth_parameters(aesni_sess,
931 AESNI_MB_LOG(ERR, "Invalid/unsupported DOCSIS parameters");
935 ret = aesni_mb_set_session_cipher_parameters(internals->mb_mgr,
936 aesni_sess, cipher_xform);
939 AESNI_MB_LOG(ERR, "Invalid/unsupported cipher parameters");
948 * burst enqueue, place crypto operations on ingress queue for processing.
950 * @param __qp Queue Pair to process
951 * @param ops Crypto operations for processing
952 * @param nb_ops Number of crypto operations for processing
955 * - Number of crypto operations enqueued
958 aesni_mb_pmd_enqueue_burst(void *__qp, struct rte_crypto_op **ops,
961 struct aesni_mb_qp *qp = __qp;
963 unsigned int nb_enqueued;
965 nb_enqueued = rte_ring_enqueue_burst(qp->ingress_queue,
966 (void **)ops, nb_ops, NULL);
968 qp->stats.enqueued_count += nb_enqueued;
973 /** Get multi buffer session */
974 static inline struct aesni_mb_session *
975 get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op)
977 struct aesni_mb_session *sess = NULL;
979 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
980 if (likely(op->sym->session != NULL))
981 sess = (struct aesni_mb_session *)
982 get_sym_session_private_data(
984 cryptodev_driver_id);
985 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
986 } else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
987 if (likely(op->sym->sec_session != NULL))
988 sess = (struct aesni_mb_session *)
989 get_sec_session_private_data(
990 op->sym->sec_session);
993 void *_sess = rte_cryptodev_sym_session_create(qp->sess_mp);
994 void *_sess_private_data = NULL;
999 if (rte_mempool_get(qp->sess_mp_priv,
1000 (void **)&_sess_private_data))
1003 sess = (struct aesni_mb_session *)_sess_private_data;
1005 if (unlikely(aesni_mb_set_session_parameters(qp->mb_mgr,
1006 sess, op->sym->xform) != 0)) {
1007 rte_mempool_put(qp->sess_mp, _sess);
1008 rte_mempool_put(qp->sess_mp_priv, _sess_private_data);
1011 op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
1012 set_sym_session_private_data(op->sym->session,
1013 cryptodev_driver_id, _sess_private_data);
1016 if (unlikely(sess == NULL))
1017 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
1022 static inline uint64_t
1023 auth_start_offset(struct rte_crypto_op *op, struct aesni_mb_session *session,
1026 struct rte_mbuf *m_src, *m_dst;
1027 uint8_t *p_src, *p_dst;
1028 uintptr_t u_src, u_dst;
1029 uint32_t cipher_end, auth_end;
1031 /* Only cipher then hash needs special calculation. */
1032 if (!oop || session->chain_order != CIPHER_HASH)
1033 return op->sym->auth.data.offset;
1035 m_src = op->sym->m_src;
1036 m_dst = op->sym->m_dst;
1038 p_src = rte_pktmbuf_mtod(m_src, uint8_t *);
1039 p_dst = rte_pktmbuf_mtod(m_dst, uint8_t *);
1040 u_src = (uintptr_t)p_src;
1041 u_dst = (uintptr_t)p_dst + op->sym->auth.data.offset;
1044 * Copy the content between cipher offset and auth offset for generating
1047 if (op->sym->cipher.data.offset > op->sym->auth.data.offset)
1048 memcpy(p_dst + op->sym->auth.data.offset,
1049 p_src + op->sym->auth.data.offset,
1050 op->sym->cipher.data.offset -
1051 op->sym->auth.data.offset);
1054 * Copy the content between (cipher offset + length) and (auth offset +
1055 * length) for generating correct digest
1057 cipher_end = op->sym->cipher.data.offset + op->sym->cipher.data.length;
1058 auth_end = op->sym->auth.data.offset + op->sym->auth.data.length;
1059 if (cipher_end < auth_end)
1060 memcpy(p_dst + cipher_end, p_src + cipher_end,
1061 auth_end - cipher_end);
1064 * Since intel-ipsec-mb only supports positive values,
1065 * we need to deduct the correct offset between src and dst.
1068 return u_src < u_dst ? (u_dst - u_src) :
1069 (UINT64_MAX - u_src + u_dst + 1);
1073 set_cpu_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_session *session,
1074 union rte_crypto_sym_ofs sofs, void *buf, uint32_t len,
1075 void *iv, void *aad, void *digest, void *udata)
1077 /* Set crypto operation */
1078 job->chain_order = session->chain_order;
1080 /* Set cipher parameters */
1081 job->cipher_direction = session->cipher.direction;
1082 job->cipher_mode = session->cipher.mode;
1084 job->aes_key_len_in_bytes = session->cipher.key_length_in_bytes;
1086 /* Set authentication parameters */
1087 job->hash_alg = session->auth.algo;
1090 switch (job->hash_alg) {
1092 job->u.XCBC._k1_expanded = session->auth.xcbc.k1_expanded;
1093 job->u.XCBC._k2 = session->auth.xcbc.k2;
1094 job->u.XCBC._k3 = session->auth.xcbc.k3;
1096 job->aes_enc_key_expanded =
1097 session->cipher.expanded_aes_keys.encode;
1098 job->aes_dec_key_expanded =
1099 session->cipher.expanded_aes_keys.decode;
1103 job->u.CCM.aad = (uint8_t *)aad + 18;
1104 job->u.CCM.aad_len_in_bytes = session->aead.aad_len;
1105 job->aes_enc_key_expanded =
1106 session->cipher.expanded_aes_keys.encode;
1107 job->aes_dec_key_expanded =
1108 session->cipher.expanded_aes_keys.decode;
1113 job->u.CMAC._key_expanded = session->auth.cmac.expkey;
1114 job->u.CMAC._skey1 = session->auth.cmac.skey1;
1115 job->u.CMAC._skey2 = session->auth.cmac.skey2;
1116 job->aes_enc_key_expanded =
1117 session->cipher.expanded_aes_keys.encode;
1118 job->aes_dec_key_expanded =
1119 session->cipher.expanded_aes_keys.decode;
1123 if (session->cipher.mode == GCM) {
1124 job->u.GCM.aad = aad;
1125 job->u.GCM.aad_len_in_bytes = session->aead.aad_len;
1128 job->u.GCM.aad = buf;
1129 job->u.GCM.aad_len_in_bytes = len;
1130 job->cipher_mode = GCM;
1132 job->aes_enc_key_expanded = &session->cipher.gcm_key;
1133 job->aes_dec_key_expanded = &session->cipher.gcm_key;
1137 job->u.HMAC._hashed_auth_key_xor_ipad =
1138 session->auth.pads.inner;
1139 job->u.HMAC._hashed_auth_key_xor_opad =
1140 session->auth.pads.outer;
1142 if (job->cipher_mode == DES3) {
1143 job->aes_enc_key_expanded =
1144 session->cipher.exp_3des_keys.ks_ptr;
1145 job->aes_dec_key_expanded =
1146 session->cipher.exp_3des_keys.ks_ptr;
1148 job->aes_enc_key_expanded =
1149 session->cipher.expanded_aes_keys.encode;
1150 job->aes_dec_key_expanded =
1151 session->cipher.expanded_aes_keys.decode;
1156 * Multi-buffer library current only support returning a truncated
1157 * digest length as specified in the relevant IPsec RFCs
1160 /* Set digest location and length */
1161 job->auth_tag_output = digest;
1162 job->auth_tag_output_len_in_bytes = session->auth.gen_digest_len;
1164 /* Set IV parameters */
1165 job->iv_len_in_bytes = session->iv.length;
1167 /* Data Parameters */
1169 job->dst = (uint8_t *)buf + sofs.ofs.cipher.head;
1170 job->cipher_start_src_offset_in_bytes = sofs.ofs.cipher.head;
1171 job->hash_start_src_offset_in_bytes = sofs.ofs.auth.head;
1172 if (job->hash_alg == AES_GMAC && session->cipher.mode != GCM) {
1173 job->msg_len_to_hash_in_bytes = 0;
1174 job->msg_len_to_cipher_in_bytes = 0;
1176 job->msg_len_to_hash_in_bytes = len - sofs.ofs.auth.head -
1178 job->msg_len_to_cipher_in_bytes = len - sofs.ofs.cipher.head -
1179 sofs.ofs.cipher.tail;
1182 job->user_data = udata;
1186 * Process a crypto operation and complete a JOB_AES_HMAC job structure for
1187 * submission to the multi buffer library for processing.
1189 * @param qp queue pair
1190 * @param job JOB_AES_HMAC structure to fill
1191 * @param m mbuf to process
1194 * - Completed JOB_AES_HMAC structure pointer on success
1195 * - NULL pointer if completion of JOB_AES_HMAC structure isn't possible
1198 set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
1199 struct rte_crypto_op *op, uint8_t *digest_idx)
1201 struct rte_mbuf *m_src = op->sym->m_src, *m_dst;
1202 struct aesni_mb_session *session;
1203 uint32_t m_offset, oop;
1205 session = get_session(qp, op);
1206 if (session == NULL) {
1207 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
1211 /* Set crypto operation */
1212 job->chain_order = session->chain_order;
1214 /* Set cipher parameters */
1215 job->cipher_direction = session->cipher.direction;
1216 job->cipher_mode = session->cipher.mode;
1218 job->aes_key_len_in_bytes = session->cipher.key_length_in_bytes;
1220 /* Set authentication parameters */
1221 job->hash_alg = session->auth.algo;
1223 switch (job->hash_alg) {
1225 job->u.XCBC._k1_expanded = session->auth.xcbc.k1_expanded;
1226 job->u.XCBC._k2 = session->auth.xcbc.k2;
1227 job->u.XCBC._k3 = session->auth.xcbc.k3;
1229 job->aes_enc_key_expanded =
1230 session->cipher.expanded_aes_keys.encode;
1231 job->aes_dec_key_expanded =
1232 session->cipher.expanded_aes_keys.decode;
1236 job->u.CCM.aad = op->sym->aead.aad.data + 18;
1237 job->u.CCM.aad_len_in_bytes = session->aead.aad_len;
1238 job->aes_enc_key_expanded =
1239 session->cipher.expanded_aes_keys.encode;
1240 job->aes_dec_key_expanded =
1241 session->cipher.expanded_aes_keys.decode;
1245 job->u.CMAC._key_expanded = session->auth.cmac.expkey;
1246 job->u.CMAC._skey1 = session->auth.cmac.skey1;
1247 job->u.CMAC._skey2 = session->auth.cmac.skey2;
1248 job->aes_enc_key_expanded =
1249 session->cipher.expanded_aes_keys.encode;
1250 job->aes_dec_key_expanded =
1251 session->cipher.expanded_aes_keys.decode;
1255 if (session->cipher.mode == GCM) {
1256 job->u.GCM.aad = op->sym->aead.aad.data;
1257 job->u.GCM.aad_len_in_bytes = session->aead.aad_len;
1260 job->u.GCM.aad = rte_pktmbuf_mtod_offset(m_src,
1261 uint8_t *, op->sym->auth.data.offset);
1262 job->u.GCM.aad_len_in_bytes = op->sym->auth.data.length;
1263 job->cipher_mode = GCM;
1265 job->aes_enc_key_expanded = &session->cipher.gcm_key;
1266 job->aes_dec_key_expanded = &session->cipher.gcm_key;
1268 #if IMB_VERSION(0, 53, 3) <= IMB_VERSION_NUM
1269 case IMB_AUTH_ZUC_EIA3_BITLEN:
1270 job->u.ZUC_EIA3._key = session->auth.zuc_auth_key;
1271 job->u.ZUC_EIA3._iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1272 session->auth_iv.offset);
1274 case IMB_AUTH_SNOW3G_UIA2_BITLEN:
1275 job->u.SNOW3G_UIA2._key = (void *) &session->auth.pKeySched_snow3g_auth;
1276 job->u.SNOW3G_UIA2._iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1277 session->auth_iv.offset);
1279 case IMB_AUTH_KASUMI_UIA1:
1280 job->u.KASUMI_UIA1._key = (void *) &session->auth.pKeySched_kasumi_auth;
1284 job->u.HMAC._hashed_auth_key_xor_ipad = session->auth.pads.inner;
1285 job->u.HMAC._hashed_auth_key_xor_opad = session->auth.pads.outer;
1287 if (job->cipher_mode == DES3) {
1288 job->aes_enc_key_expanded =
1289 session->cipher.exp_3des_keys.ks_ptr;
1290 job->aes_dec_key_expanded =
1291 session->cipher.exp_3des_keys.ks_ptr;
1293 job->aes_enc_key_expanded =
1294 session->cipher.expanded_aes_keys.encode;
1295 job->aes_dec_key_expanded =
1296 session->cipher.expanded_aes_keys.decode;
1300 if (job->hash_alg == AES_CCM || (job->hash_alg == AES_GMAC &&
1301 session->cipher.mode == GCM))
1302 m_offset = op->sym->aead.data.offset;
1304 m_offset = op->sym->cipher.data.offset;
1306 #if IMB_VERSION(0, 53, 3) <= IMB_VERSION_NUM
1307 if (job->cipher_mode == IMB_CIPHER_ZUC_EEA3) {
1308 job->aes_enc_key_expanded = session->cipher.zuc_cipher_key;
1309 job->aes_dec_key_expanded = session->cipher.zuc_cipher_key;
1310 } else if (job->cipher_mode == IMB_CIPHER_SNOW3G_UEA2_BITLEN) {
1311 job->enc_keys = &session->cipher.pKeySched_snow3g_cipher;
1313 } else if (job->cipher_mode == IMB_CIPHER_KASUMI_UEA1_BITLEN) {
1314 job->enc_keys = &session->cipher.pKeySched_kasumi_cipher;
1319 if (!op->sym->m_dst) {
1320 /* in-place operation */
1323 } else if (op->sym->m_dst == op->sym->m_src) {
1324 /* in-place operation */
1328 /* out-of-place operation */
1329 m_dst = op->sym->m_dst;
1333 /* Set digest output location */
1334 if (job->hash_alg != NULL_HASH &&
1335 session->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
1336 job->auth_tag_output = qp->temp_digests[*digest_idx];
1337 *digest_idx = (*digest_idx + 1) % MAX_JOBS;
1339 if (job->hash_alg == AES_CCM || (job->hash_alg == AES_GMAC &&
1340 session->cipher.mode == GCM))
1341 job->auth_tag_output = op->sym->aead.digest.data;
1343 job->auth_tag_output = op->sym->auth.digest.data;
1345 if (session->auth.req_digest_len != session->auth.gen_digest_len) {
1346 job->auth_tag_output = qp->temp_digests[*digest_idx];
1347 *digest_idx = (*digest_idx + 1) % MAX_JOBS;
1351 * Multi-buffer library current only support returning a truncated
1352 * digest length as specified in the relevant IPsec RFCs
1355 /* Set digest length */
1356 job->auth_tag_output_len_in_bytes = session->auth.gen_digest_len;
1358 /* Set IV parameters */
1359 job->iv_len_in_bytes = session->iv.length;
1361 /* Data Parameters */
1362 job->src = rte_pktmbuf_mtod(m_src, uint8_t *);
1363 job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, m_offset);
1365 switch (job->hash_alg) {
1367 job->cipher_start_src_offset_in_bytes =
1368 op->sym->aead.data.offset;
1369 job->msg_len_to_cipher_in_bytes = op->sym->aead.data.length;
1370 job->hash_start_src_offset_in_bytes = op->sym->aead.data.offset;
1371 job->msg_len_to_hash_in_bytes = op->sym->aead.data.length;
1373 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1374 session->iv.offset + 1);
1378 if (session->cipher.mode == GCM) {
1379 job->cipher_start_src_offset_in_bytes =
1380 op->sym->aead.data.offset;
1381 job->hash_start_src_offset_in_bytes =
1382 op->sym->aead.data.offset;
1383 job->msg_len_to_cipher_in_bytes =
1384 op->sym->aead.data.length;
1385 job->msg_len_to_hash_in_bytes =
1386 op->sym->aead.data.length;
1388 job->cipher_start_src_offset_in_bytes =
1389 op->sym->auth.data.offset;
1390 job->hash_start_src_offset_in_bytes =
1391 op->sym->auth.data.offset;
1392 job->msg_len_to_cipher_in_bytes = 0;
1393 job->msg_len_to_hash_in_bytes = 0;
1396 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1397 session->iv.offset);
1401 /* For SNOW3G, length and offsets are already in bits */
1402 job->cipher_start_src_offset_in_bytes =
1403 op->sym->cipher.data.offset;
1404 job->msg_len_to_cipher_in_bytes = op->sym->cipher.data.length;
1406 job->hash_start_src_offset_in_bytes = auth_start_offset(op,
1408 job->msg_len_to_hash_in_bytes = op->sym->auth.data.length;
1410 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1411 session->iv.offset);
1414 #if IMB_VERSION(0, 53, 3) <= IMB_VERSION_NUM
1415 if (job->cipher_mode == IMB_CIPHER_ZUC_EEA3)
1416 job->msg_len_to_cipher_in_bytes >>= 3;
1417 else if (job->hash_alg == IMB_AUTH_KASUMI_UIA1)
1418 job->msg_len_to_hash_in_bytes >>= 3;
1421 /* Set user data to be crypto operation data struct */
1422 job->user_data = op;
1427 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
1429 * Process a crypto operation containing a security op and complete a
1430 * JOB_AES_HMAC job structure for submission to the multi buffer library for
1434 set_sec_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
1435 struct rte_crypto_op *op, uint8_t *digest_idx)
1437 struct rte_mbuf *m_src, *m_dst;
1438 struct rte_crypto_sym_op *sym;
1439 struct aesni_mb_session *session;
1441 session = get_session(qp, op);
1442 if (unlikely(session == NULL)) {
1443 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
1447 /* Only DOCSIS protocol operations supported now */
1448 if (session->cipher.mode != IMB_CIPHER_DOCSIS_SEC_BPI ||
1449 session->auth.algo != IMB_AUTH_DOCSIS_CRC32) {
1450 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
1457 if (likely(sym->m_dst == NULL || sym->m_dst == m_src)) {
1458 /* in-place operation */
1461 /* out-of-place operation not supported */
1462 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
1466 /* Set crypto operation */
1467 job->chain_order = session->chain_order;
1469 /* Set cipher parameters */
1470 job->cipher_direction = session->cipher.direction;
1471 job->cipher_mode = session->cipher.mode;
1473 job->aes_key_len_in_bytes = session->cipher.key_length_in_bytes;
1474 job->aes_enc_key_expanded = session->cipher.expanded_aes_keys.encode;
1475 job->aes_dec_key_expanded = session->cipher.expanded_aes_keys.decode;
1477 /* Set IV parameters */
1478 job->iv_len_in_bytes = session->iv.length;
1479 job->iv = (uint8_t *)op + session->iv.offset;
1481 /* Set authentication parameters */
1482 job->hash_alg = session->auth.algo;
1484 /* Set digest output location */
1485 job->auth_tag_output = qp->temp_digests[*digest_idx];
1486 *digest_idx = (*digest_idx + 1) % MAX_JOBS;
1488 /* Set digest length */
1489 job->auth_tag_output_len_in_bytes = session->auth.gen_digest_len;
1491 /* Set data parameters */
1492 job->src = rte_pktmbuf_mtod(m_src, uint8_t *);
1493 job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *,
1494 sym->cipher.data.offset);
1496 job->cipher_start_src_offset_in_bytes = sym->cipher.data.offset;
1497 job->msg_len_to_cipher_in_bytes = sym->cipher.data.length;
1499 job->hash_start_src_offset_in_bytes = sym->auth.data.offset;
1500 job->msg_len_to_hash_in_bytes = sym->auth.data.length;
1502 job->user_data = op;
1508 verify_docsis_sec_crc(JOB_AES_HMAC *job, uint8_t *status)
1510 uint16_t crc_offset;
1513 if (!job->msg_len_to_hash_in_bytes)
1516 crc_offset = job->hash_start_src_offset_in_bytes +
1517 job->msg_len_to_hash_in_bytes -
1518 job->cipher_start_src_offset_in_bytes;
1519 crc = job->dst + crc_offset;
1521 /* Verify CRC (at the end of the message) */
1522 if (memcmp(job->auth_tag_output, crc, RTE_ETHER_CRC_LEN) != 0)
1523 *status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
1528 verify_digest(JOB_AES_HMAC *job, void *digest, uint16_t len, uint8_t *status)
1530 /* Verify digest if required */
1531 if (memcmp(job->auth_tag_output, digest, len) != 0)
1532 *status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
1536 generate_digest(JOB_AES_HMAC *job, struct rte_crypto_op *op,
1537 struct aesni_mb_session *sess)
1539 /* No extra copy needed */
1540 if (likely(sess->auth.req_digest_len == sess->auth.gen_digest_len))
1544 * This can only happen for HMAC, so only digest
1545 * for authentication algos is required
1547 memcpy(op->sym->auth.digest.data, job->auth_tag_output,
1548 sess->auth.req_digest_len);
1552 * Process a completed job and return rte_mbuf which job processed
1554 * @param qp Queue Pair to process
1555 * @param job JOB_AES_HMAC job to process
1558 * - Returns processed crypto operation.
1559 * - Returns NULL on invalid job
1561 static inline struct rte_crypto_op *
1562 post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
1564 struct rte_crypto_op *op = (struct rte_crypto_op *)job->user_data;
1565 struct aesni_mb_session *sess = NULL;
1567 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
1568 uint8_t is_docsis_sec = 0;
1570 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
1572 * Assuming at this point that if it's a security type op, that
1573 * this is for DOCSIS
1576 sess = get_sec_session_private_data(op->sym->sec_session);
1580 sess = get_sym_session_private_data(op->sym->session,
1581 cryptodev_driver_id);
1584 if (unlikely(sess == NULL)) {
1585 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
1589 if (likely(op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)) {
1590 switch (job->status) {
1592 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1594 if (job->hash_alg == NULL_HASH)
1597 if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
1598 if (job->hash_alg == AES_CCM ||
1599 (job->hash_alg == AES_GMAC &&
1600 sess->cipher.mode == GCM))
1602 op->sym->aead.digest.data,
1603 sess->auth.req_digest_len,
1605 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
1606 else if (is_docsis_sec)
1607 verify_docsis_sec_crc(job,
1612 op->sym->auth.digest.data,
1613 sess->auth.req_digest_len,
1616 generate_digest(job, op, sess);
1619 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
1623 /* Free session if a session-less crypto op */
1624 if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
1625 memset(sess, 0, sizeof(struct aesni_mb_session));
1626 memset(op->sym->session, 0,
1627 rte_cryptodev_sym_get_existing_header_session_size(
1629 rte_mempool_put(qp->sess_mp_priv, sess);
1630 rte_mempool_put(qp->sess_mp, op->sym->session);
1631 op->sym->session = NULL;
1638 post_process_mb_sync_job(JOB_AES_HMAC *job)
1642 st = job->user_data;
1643 st[0] = (job->status == STS_COMPLETED) ? 0 : EBADMSG;
1647 * Process a completed JOB_AES_HMAC job and keep processing jobs until
1648 * get_completed_job return NULL
1650 * @param qp Queue Pair to process
1651 * @param job JOB_AES_HMAC job
1654 * - Number of processed jobs
1657 handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job,
1658 struct rte_crypto_op **ops, uint16_t nb_ops)
1660 struct rte_crypto_op *op = NULL;
1661 unsigned processed_jobs = 0;
1663 while (job != NULL) {
1664 op = post_process_mb_job(qp, job);
1667 ops[processed_jobs++] = op;
1668 qp->stats.dequeued_count++;
1670 qp->stats.dequeue_err_count++;
1673 if (processed_jobs == nb_ops)
1676 job = IMB_GET_COMPLETED_JOB(qp->mb_mgr);
1679 return processed_jobs;
1682 static inline uint32_t
1683 handle_completed_sync_jobs(JOB_AES_HMAC *job, MB_MGR *mb_mgr)
1687 for (i = 0; job != NULL; i++, job = IMB_GET_COMPLETED_JOB(mb_mgr))
1688 post_process_mb_sync_job(job);
1693 static inline uint32_t
1694 flush_mb_sync_mgr(MB_MGR *mb_mgr)
1698 job = IMB_FLUSH_JOB(mb_mgr);
1699 return handle_completed_sync_jobs(job, mb_mgr);
1702 static inline uint16_t
1703 flush_mb_mgr(struct aesni_mb_qp *qp, struct rte_crypto_op **ops,
1706 int processed_ops = 0;
1708 /* Flush the remaining jobs */
1709 JOB_AES_HMAC *job = IMB_FLUSH_JOB(qp->mb_mgr);
1712 processed_ops += handle_completed_jobs(qp, job,
1713 &ops[processed_ops], nb_ops - processed_ops);
1715 return processed_ops;
1718 static inline JOB_AES_HMAC *
1719 set_job_null_op(JOB_AES_HMAC *job, struct rte_crypto_op *op)
1721 job->chain_order = HASH_CIPHER;
1722 job->cipher_mode = NULL_CIPHER;
1723 job->hash_alg = NULL_HASH;
1724 job->cipher_direction = DECRYPT;
1726 /* Set user data to be crypto operation data struct */
1727 job->user_data = op;
1733 aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
1736 struct aesni_mb_qp *qp = queue_pair;
1738 struct rte_crypto_op *op;
1741 int retval, processed_jobs = 0;
1743 if (unlikely(nb_ops == 0))
1746 uint8_t digest_idx = qp->digest_idx;
1748 /* Get next free mb job struct from mb manager */
1749 job = IMB_GET_NEXT_JOB(qp->mb_mgr);
1750 if (unlikely(job == NULL)) {
1751 /* if no free mb job structs we need to flush mb_mgr */
1752 processed_jobs += flush_mb_mgr(qp,
1753 &ops[processed_jobs],
1754 nb_ops - processed_jobs);
1756 if (nb_ops == processed_jobs)
1759 job = IMB_GET_NEXT_JOB(qp->mb_mgr);
1763 * Get next operation to process from ingress queue.
1764 * There is no need to return the job to the MB_MGR
1765 * if there are no more operations to process, since the MB_MGR
1766 * can use that pointer again in next get_next calls.
1768 retval = rte_ring_dequeue(qp->ingress_queue, (void **)&op);
1772 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
1773 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
1774 retval = set_sec_mb_job_params(job, qp, op,
1778 retval = set_mb_job_params(job, qp, op, &digest_idx);
1780 if (unlikely(retval != 0)) {
1781 qp->stats.dequeue_err_count++;
1782 set_job_null_op(job, op);
1785 /* Submit job to multi-buffer for processing */
1786 #ifdef RTE_LIBRTE_PMD_AESNI_MB_DEBUG
1787 job = IMB_SUBMIT_JOB(qp->mb_mgr);
1789 job = IMB_SUBMIT_JOB_NOCHECK(qp->mb_mgr);
1792 * If submit returns a processed job then handle it,
1793 * before submitting subsequent jobs
1796 processed_jobs += handle_completed_jobs(qp, job,
1797 &ops[processed_jobs],
1798 nb_ops - processed_jobs);
1800 } while (processed_jobs < nb_ops);
1802 qp->digest_idx = digest_idx;
1804 if (processed_jobs < 1)
1805 processed_jobs += flush_mb_mgr(qp,
1806 &ops[processed_jobs],
1807 nb_ops - processed_jobs);
1809 return processed_jobs;
1813 alloc_init_mb_mgr(enum aesni_mb_vector_mode vector_mode)
1815 MB_MGR *mb_mgr = alloc_mb_mgr(0);
1819 switch (vector_mode) {
1820 case RTE_AESNI_MB_SSE:
1821 init_mb_mgr_sse(mb_mgr);
1823 case RTE_AESNI_MB_AVX:
1824 init_mb_mgr_avx(mb_mgr);
1826 case RTE_AESNI_MB_AVX2:
1827 init_mb_mgr_avx2(mb_mgr);
1829 case RTE_AESNI_MB_AVX512:
1830 init_mb_mgr_avx512(mb_mgr);
1833 AESNI_MB_LOG(ERR, "Unsupported vector mode %u\n", vector_mode);
1834 free_mb_mgr(mb_mgr);
1842 aesni_mb_fill_error_code(struct rte_crypto_sym_vec *vec, int32_t err)
1846 for (i = 0; i != vec->num; ++i)
1847 vec->status[i] = err;
1851 check_crypto_sgl(union rte_crypto_sym_ofs so, const struct rte_crypto_sgl *sgl)
1853 /* no multi-seg support with current AESNI-MB PMD */
1856 else if (so.ofs.cipher.head + so.ofs.cipher.tail > sgl->vec[0].len)
1861 static inline JOB_AES_HMAC *
1862 submit_sync_job(MB_MGR *mb_mgr)
1864 #ifdef RTE_LIBRTE_PMD_AESNI_MB_DEBUG
1865 return IMB_SUBMIT_JOB(mb_mgr);
1867 return IMB_SUBMIT_JOB_NOCHECK(mb_mgr);
1871 static inline uint32_t
1872 generate_sync_dgst(struct rte_crypto_sym_vec *vec,
1873 const uint8_t dgst[][DIGEST_LENGTH_MAX], uint32_t len)
1877 for (i = 0, k = 0; i != vec->num; i++) {
1878 if (vec->status[i] == 0) {
1879 memcpy(vec->digest[i], dgst[i], len);
1887 static inline uint32_t
1888 verify_sync_dgst(struct rte_crypto_sym_vec *vec,
1889 const uint8_t dgst[][DIGEST_LENGTH_MAX], uint32_t len)
1893 for (i = 0, k = 0; i != vec->num; i++) {
1894 if (vec->status[i] == 0) {
1895 if (memcmp(vec->digest[i], dgst[i], len) != 0)
1896 vec->status[i] = EBADMSG;
1906 aesni_mb_cpu_crypto_process_bulk(struct rte_cryptodev *dev,
1907 struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs sofs,
1908 struct rte_crypto_sym_vec *vec)
1911 uint32_t i, j, k, len;
1915 struct aesni_mb_private *priv;
1916 struct aesni_mb_session *s;
1917 uint8_t tmp_dgst[vec->num][DIGEST_LENGTH_MAX];
1919 s = get_sym_session_private_data(sess, dev->driver_id);
1921 aesni_mb_fill_error_code(vec, EINVAL);
1925 /* get per-thread MB MGR, create one if needed */
1926 mb_mgr = RTE_PER_LCORE(sync_mb_mgr);
1927 if (mb_mgr == NULL) {
1929 priv = dev->data->dev_private;
1930 mb_mgr = alloc_init_mb_mgr(priv->vector_mode);
1931 if (mb_mgr == NULL) {
1932 aesni_mb_fill_error_code(vec, ENOMEM);
1935 RTE_PER_LCORE(sync_mb_mgr) = mb_mgr;
1938 for (i = 0, j = 0, k = 0; i != vec->num; i++) {
1941 ret = check_crypto_sgl(sofs, vec->sgl + i);
1943 vec->status[i] = ret;
1947 buf = vec->sgl[i].vec[0].base;
1948 len = vec->sgl[i].vec[0].len;
1950 job = IMB_GET_NEXT_JOB(mb_mgr);
1952 k += flush_mb_sync_mgr(mb_mgr);
1953 job = IMB_GET_NEXT_JOB(mb_mgr);
1954 RTE_ASSERT(job != NULL);
1957 /* Submit job for processing */
1958 set_cpu_mb_job_params(job, s, sofs, buf, len,
1959 vec->iv[i], vec->aad[i], tmp_dgst[i],
1961 job = submit_sync_job(mb_mgr);
1964 /* handle completed jobs */
1965 k += handle_completed_sync_jobs(job, mb_mgr);
1968 /* flush remaining jobs */
1970 k += flush_mb_sync_mgr(mb_mgr);
1972 /* finish processing for successful jobs: check/update digest */
1974 if (s->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY)
1975 k = verify_sync_dgst(vec,
1976 (const uint8_t (*)[DIGEST_LENGTH_MAX])tmp_dgst,
1977 s->auth.req_digest_len);
1979 k = generate_sync_dgst(vec,
1980 (const uint8_t (*)[DIGEST_LENGTH_MAX])tmp_dgst,
1981 s->auth.req_digest_len);
1987 static int cryptodev_aesni_mb_remove(struct rte_vdev_device *vdev);
1990 vec_mode_to_flags(enum aesni_mb_vector_mode mode)
1993 case RTE_AESNI_MB_SSE:
1994 return RTE_CRYPTODEV_FF_CPU_SSE;
1995 case RTE_AESNI_MB_AVX:
1996 return RTE_CRYPTODEV_FF_CPU_AVX;
1997 case RTE_AESNI_MB_AVX2:
1998 return RTE_CRYPTODEV_FF_CPU_AVX2;
1999 case RTE_AESNI_MB_AVX512:
2000 return RTE_CRYPTODEV_FF_CPU_AVX512;
2002 AESNI_MB_LOG(ERR, "Unsupported vector mode %u\n", mode);
2008 cryptodev_aesni_mb_create(const char *name,
2009 struct rte_vdev_device *vdev,
2010 struct rte_cryptodev_pmd_init_params *init_params)
2012 struct rte_cryptodev *dev;
2013 struct aesni_mb_private *internals;
2014 enum aesni_mb_vector_mode vector_mode;
2017 dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
2019 AESNI_MB_LOG(ERR, "failed to create cryptodev vdev");
2023 /* Check CPU for supported vector instruction set */
2024 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
2025 vector_mode = RTE_AESNI_MB_AVX512;
2026 else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
2027 vector_mode = RTE_AESNI_MB_AVX2;
2028 else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
2029 vector_mode = RTE_AESNI_MB_AVX;
2031 vector_mode = RTE_AESNI_MB_SSE;
2033 dev->driver_id = cryptodev_driver_id;
2034 dev->dev_ops = rte_aesni_mb_pmd_ops;
2036 /* register rx/tx burst functions for data path */
2037 dev->dequeue_burst = aesni_mb_pmd_dequeue_burst;
2038 dev->enqueue_burst = aesni_mb_pmd_enqueue_burst;
2040 dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
2041 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
2042 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
2043 RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO |
2044 RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA |
2045 RTE_CRYPTODEV_FF_SYM_SESSIONLESS;
2047 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
2048 struct rte_security_ctx *security_instance;
2049 security_instance = rte_malloc("aesni_mb_sec",
2050 sizeof(struct rte_security_ctx),
2051 RTE_CACHE_LINE_SIZE);
2052 if (security_instance == NULL) {
2053 AESNI_MB_LOG(ERR, "rte_security_ctx memory alloc failed");
2054 rte_cryptodev_pmd_destroy(dev);
2058 security_instance->device = (void *)dev;
2059 security_instance->ops = rte_aesni_mb_pmd_sec_ops;
2060 security_instance->sess_cnt = 0;
2061 dev->security_ctx = security_instance;
2062 dev->feature_flags |= RTE_CRYPTODEV_FF_SECURITY;
2065 /* Check CPU for support for AES instruction set */
2066 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES))
2067 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AESNI;
2069 AESNI_MB_LOG(WARNING, "AES instructions not supported by CPU");
2071 dev->feature_flags |= vec_mode_to_flags(vector_mode);
2073 mb_mgr = alloc_init_mb_mgr(vector_mode);
2074 if (mb_mgr == NULL) {
2075 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
2076 rte_free(dev->security_ctx);
2077 dev->security_ctx = NULL;
2079 rte_cryptodev_pmd_destroy(dev);
2083 /* Set vector instructions mode supported */
2084 internals = dev->data->dev_private;
2086 internals->vector_mode = vector_mode;
2087 internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
2088 internals->mb_mgr = mb_mgr;
2090 AESNI_MB_LOG(INFO, "IPSec Multi-buffer library version used: %s\n",
2091 imb_get_version_str());
2096 cryptodev_aesni_mb_probe(struct rte_vdev_device *vdev)
2098 struct rte_cryptodev_pmd_init_params init_params = {
2100 sizeof(struct aesni_mb_private),
2102 RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
2104 const char *name, *args;
2107 name = rte_vdev_device_name(vdev);
2111 args = rte_vdev_device_args(vdev);
2113 retval = rte_cryptodev_pmd_parse_input_args(&init_params, args);
2115 AESNI_MB_LOG(ERR, "Failed to parse initialisation arguments[%s]",
2120 return cryptodev_aesni_mb_create(name, vdev, &init_params);
2124 cryptodev_aesni_mb_remove(struct rte_vdev_device *vdev)
2126 struct rte_cryptodev *cryptodev;
2127 struct aesni_mb_private *internals;
2130 name = rte_vdev_device_name(vdev);
2134 cryptodev = rte_cryptodev_pmd_get_named_dev(name);
2135 if (cryptodev == NULL)
2138 internals = cryptodev->data->dev_private;
2140 free_mb_mgr(internals->mb_mgr);
2141 if (RTE_PER_LCORE(sync_mb_mgr)) {
2142 free_mb_mgr(RTE_PER_LCORE(sync_mb_mgr));
2143 RTE_PER_LCORE(sync_mb_mgr) = NULL;
2146 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
2147 rte_free(cryptodev->security_ctx);
2148 cryptodev->security_ctx = NULL;
2151 return rte_cryptodev_pmd_destroy(cryptodev);
2154 static struct rte_vdev_driver cryptodev_aesni_mb_pmd_drv = {
2155 .probe = cryptodev_aesni_mb_probe,
2156 .remove = cryptodev_aesni_mb_remove
2159 static struct cryptodev_driver aesni_mb_crypto_drv;
2161 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd_drv);
2162 RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd);
2163 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_MB_PMD,
2164 "max_nb_queue_pairs=<int> "
2166 RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_mb_crypto_drv,
2167 cryptodev_aesni_mb_pmd_drv.driver,
2168 cryptodev_driver_id);
2169 RTE_LOG_REGISTER(aesni_mb_logtype_driver, pmd.crypto.aesni_mb, NOTICE);