1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2017 Intel Corporation
5 #include <intel-ipsec-mb.h>
7 #include <rte_common.h>
8 #include <rte_hexdump.h>
9 #include <rte_cryptodev.h>
10 #include <rte_cryptodev_pmd.h>
11 #include <rte_bus_vdev.h>
12 #include <rte_malloc.h>
13 #include <rte_cpuflags.h>
14 #include <rte_per_lcore.h>
15 #include <rte_ether.h>
17 #include "aesni_mb_pmd_private.h"
19 #define AES_CCM_DIGEST_MIN_LEN 4
20 #define AES_CCM_DIGEST_MAX_LEN 16
21 #define HMAC_MAX_BLOCK_SIZE 128
22 static uint8_t cryptodev_driver_id;
25 * Needed to support CPU-CRYPTO API (rte_cryptodev_sym_cpu_crypto_process),
26 * as we still use JOB based API even for synchronous processing.
28 static RTE_DEFINE_PER_LCORE(MB_MGR *, sync_mb_mgr);
30 typedef void (*hash_one_block_t)(const void *data, void *digest);
31 typedef void (*aes_keyexp_t)(const void *key, void *enc_exp_keys, void *dec_exp_keys);
34 * Calculate the authentication pre-computes
36 * @param one_block_hash Function pointer to calculate digest on ipad/opad
37 * @param ipad Inner pad output byte array
38 * @param opad Outer pad output byte array
39 * @param hkey Authentication key
40 * @param hkey_len Authentication key length
41 * @param blocksize Block size of selected hash algo
44 calculate_auth_precomputes(hash_one_block_t one_block_hash,
45 uint8_t *ipad, uint8_t *opad,
46 const uint8_t *hkey, uint16_t hkey_len,
51 uint8_t ipad_buf[blocksize] __rte_aligned(16);
52 uint8_t opad_buf[blocksize] __rte_aligned(16);
54 /* Setup inner and outer pads */
55 memset(ipad_buf, HMAC_IPAD_VALUE, blocksize);
56 memset(opad_buf, HMAC_OPAD_VALUE, blocksize);
58 /* XOR hash key with inner and outer pads */
59 length = hkey_len > blocksize ? blocksize : hkey_len;
61 for (i = 0; i < length; i++) {
62 ipad_buf[i] ^= hkey[i];
63 opad_buf[i] ^= hkey[i];
66 /* Compute partial hashes */
67 (*one_block_hash)(ipad_buf, ipad);
68 (*one_block_hash)(opad_buf, opad);
71 memset(ipad_buf, 0, blocksize);
72 memset(opad_buf, 0, blocksize);
75 /** Get xform chain order */
76 static enum aesni_mb_operation
77 aesni_mb_get_chain_order(const struct rte_crypto_sym_xform *xform)
80 return AESNI_MB_OP_NOT_SUPPORTED;
82 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
83 if (xform->next == NULL)
84 return AESNI_MB_OP_CIPHER_ONLY;
85 if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
86 return AESNI_MB_OP_CIPHER_HASH;
89 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
90 if (xform->next == NULL)
91 return AESNI_MB_OP_HASH_ONLY;
92 if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
93 return AESNI_MB_OP_HASH_CIPHER;
95 #if IMB_VERSION_NUM > IMB_VERSION(0, 52, 0)
96 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
97 if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
99 * CCM requires to hash first and cipher later
102 if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM)
103 return AESNI_MB_OP_AEAD_HASH_CIPHER;
105 return AESNI_MB_OP_AEAD_CIPHER_HASH;
107 if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM)
108 return AESNI_MB_OP_AEAD_CIPHER_HASH;
110 return AESNI_MB_OP_AEAD_HASH_CIPHER;
114 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
115 if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM ||
116 xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM) {
117 if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
118 return AESNI_MB_OP_AEAD_CIPHER_HASH;
120 return AESNI_MB_OP_AEAD_HASH_CIPHER;
125 return AESNI_MB_OP_NOT_SUPPORTED;
128 /** Set session authentication parameters */
130 aesni_mb_set_session_auth_parameters(const MB_MGR *mb_mgr,
131 struct aesni_mb_session *sess,
132 const struct rte_crypto_sym_xform *xform)
134 hash_one_block_t hash_oneblock_fn = NULL;
135 unsigned int key_larger_block_size = 0;
136 uint8_t hashed_key[HMAC_MAX_BLOCK_SIZE] = { 0 };
137 uint32_t auth_precompute = 1;
140 sess->auth.algo = NULL_HASH;
144 if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
145 AESNI_MB_LOG(ERR, "Crypto xform struct not of type auth");
149 /* Set IV parameters */
150 sess->auth_iv.offset = xform->auth.iv.offset;
151 sess->auth_iv.length = xform->auth.iv.length;
153 /* Set the request digest size */
154 sess->auth.req_digest_len = xform->auth.digest_length;
156 /* Select auth generate/verify */
157 sess->auth.operation = xform->auth.op;
159 /* Set Authentication Parameters */
160 if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_XCBC_MAC) {
161 sess->auth.algo = AES_XCBC;
163 uint16_t xcbc_mac_digest_len =
164 get_truncated_digest_byte_length(AES_XCBC);
165 if (sess->auth.req_digest_len != xcbc_mac_digest_len) {
166 AESNI_MB_LOG(ERR, "Invalid digest size\n");
169 sess->auth.gen_digest_len = sess->auth.req_digest_len;
171 IMB_AES_XCBC_KEYEXP(mb_mgr, xform->auth.key.data,
172 sess->auth.xcbc.k1_expanded,
173 sess->auth.xcbc.k2, sess->auth.xcbc.k3);
177 if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_CMAC) {
180 sess->auth.algo = AES_CMAC;
182 uint16_t cmac_digest_len = get_digest_byte_length(AES_CMAC);
184 if (sess->auth.req_digest_len > cmac_digest_len) {
185 AESNI_MB_LOG(ERR, "Invalid digest size\n");
189 * Multi-buffer lib supports digest sizes from 4 to 16 bytes
190 * in version 0.50 and sizes of 12 and 16 bytes,
192 * If size requested is different, generate the full digest
193 * (16 bytes) in a temporary location and then memcpy
194 * the requested number of bytes.
196 if (sess->auth.req_digest_len < 4)
197 sess->auth.gen_digest_len = cmac_digest_len;
199 sess->auth.gen_digest_len = sess->auth.req_digest_len;
201 IMB_AES_KEYEXP_128(mb_mgr, xform->auth.key.data,
202 sess->auth.cmac.expkey, dust);
203 IMB_AES_CMAC_SUBKEY_GEN_128(mb_mgr, sess->auth.cmac.expkey,
204 sess->auth.cmac.skey1, sess->auth.cmac.skey2);
208 if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
209 if (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) {
210 sess->cipher.direction = ENCRYPT;
211 sess->chain_order = CIPHER_HASH;
213 sess->cipher.direction = DECRYPT;
215 sess->auth.algo = AES_GMAC;
217 * Multi-buffer lib supports 8, 12 and 16 bytes of digest.
218 * If size requested is different, generate the full digest
219 * (16 bytes) in a temporary location and then memcpy
220 * the requested number of bytes.
222 if (sess->auth.req_digest_len != 16 &&
223 sess->auth.req_digest_len != 12 &&
224 sess->auth.req_digest_len != 8) {
225 sess->auth.gen_digest_len = 16;
227 sess->auth.gen_digest_len = sess->auth.req_digest_len;
229 sess->iv.length = xform->auth.iv.length;
230 sess->iv.offset = xform->auth.iv.offset;
232 switch (xform->auth.key.length) {
234 IMB_AES128_GCM_PRE(mb_mgr, xform->auth.key.data,
235 &sess->cipher.gcm_key);
236 sess->cipher.key_length_in_bytes = AES_128_BYTES;
239 IMB_AES192_GCM_PRE(mb_mgr, xform->auth.key.data,
240 &sess->cipher.gcm_key);
241 sess->cipher.key_length_in_bytes = AES_192_BYTES;
244 IMB_AES256_GCM_PRE(mb_mgr, xform->auth.key.data,
245 &sess->cipher.gcm_key);
246 sess->cipher.key_length_in_bytes = AES_256_BYTES;
249 RTE_LOG(ERR, PMD, "failed to parse test type\n");
256 #if IMB_VERSION(0, 53, 3) <= IMB_VERSION_NUM
257 if (xform->auth.algo == RTE_CRYPTO_AUTH_ZUC_EIA3) {
258 sess->auth.algo = IMB_AUTH_ZUC_EIA3_BITLEN;
259 uint16_t zuc_eia3_digest_len =
260 get_truncated_digest_byte_length(IMB_AUTH_ZUC_EIA3_BITLEN);
261 if (sess->auth.req_digest_len != zuc_eia3_digest_len) {
262 AESNI_MB_LOG(ERR, "Invalid digest size\n");
265 sess->auth.gen_digest_len = sess->auth.req_digest_len;
267 memcpy(sess->auth.zuc_auth_key, xform->auth.key.data, 16);
269 } else if (xform->auth.algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
270 sess->auth.algo = IMB_AUTH_SNOW3G_UIA2_BITLEN;
271 uint16_t snow3g_uia2_digest_len =
272 get_truncated_digest_byte_length(IMB_AUTH_SNOW3G_UIA2_BITLEN);
273 if (sess->auth.req_digest_len != snow3g_uia2_digest_len) {
274 AESNI_MB_LOG(ERR, "Invalid digest size\n");
277 sess->auth.gen_digest_len = sess->auth.req_digest_len;
279 IMB_SNOW3G_INIT_KEY_SCHED(mb_mgr, xform->auth.key.data,
280 &sess->auth.pKeySched_snow3g_auth);
282 } else if (xform->auth.algo == RTE_CRYPTO_AUTH_KASUMI_F9) {
283 sess->auth.algo = IMB_AUTH_KASUMI_UIA1;
284 uint16_t kasumi_f9_digest_len =
285 get_truncated_digest_byte_length(IMB_AUTH_KASUMI_UIA1);
286 if (sess->auth.req_digest_len != kasumi_f9_digest_len) {
287 AESNI_MB_LOG(ERR, "Invalid digest size\n");
290 sess->auth.gen_digest_len = sess->auth.req_digest_len;
292 IMB_KASUMI_INIT_F9_KEY_SCHED(mb_mgr, xform->auth.key.data,
293 &sess->auth.pKeySched_kasumi_auth);
298 switch (xform->auth.algo) {
299 case RTE_CRYPTO_AUTH_MD5_HMAC:
300 sess->auth.algo = MD5;
301 hash_oneblock_fn = mb_mgr->md5_one_block;
303 case RTE_CRYPTO_AUTH_SHA1_HMAC:
304 sess->auth.algo = SHA1;
305 hash_oneblock_fn = mb_mgr->sha1_one_block;
306 if (xform->auth.key.length > get_auth_algo_blocksize(SHA1)) {
308 xform->auth.key.data,
309 xform->auth.key.length,
311 key_larger_block_size = 1;
314 case RTE_CRYPTO_AUTH_SHA1:
315 sess->auth.algo = PLAIN_SHA1;
318 case RTE_CRYPTO_AUTH_SHA224_HMAC:
319 sess->auth.algo = SHA_224;
320 hash_oneblock_fn = mb_mgr->sha224_one_block;
321 if (xform->auth.key.length > get_auth_algo_blocksize(SHA_224)) {
323 xform->auth.key.data,
324 xform->auth.key.length,
326 key_larger_block_size = 1;
329 case RTE_CRYPTO_AUTH_SHA224:
330 sess->auth.algo = PLAIN_SHA_224;
333 case RTE_CRYPTO_AUTH_SHA256_HMAC:
334 sess->auth.algo = SHA_256;
335 hash_oneblock_fn = mb_mgr->sha256_one_block;
336 if (xform->auth.key.length > get_auth_algo_blocksize(SHA_256)) {
338 xform->auth.key.data,
339 xform->auth.key.length,
341 key_larger_block_size = 1;
344 case RTE_CRYPTO_AUTH_SHA256:
345 sess->auth.algo = PLAIN_SHA_256;
348 case RTE_CRYPTO_AUTH_SHA384_HMAC:
349 sess->auth.algo = SHA_384;
350 hash_oneblock_fn = mb_mgr->sha384_one_block;
351 if (xform->auth.key.length > get_auth_algo_blocksize(SHA_384)) {
353 xform->auth.key.data,
354 xform->auth.key.length,
356 key_larger_block_size = 1;
359 case RTE_CRYPTO_AUTH_SHA384:
360 sess->auth.algo = PLAIN_SHA_384;
363 case RTE_CRYPTO_AUTH_SHA512_HMAC:
364 sess->auth.algo = SHA_512;
365 hash_oneblock_fn = mb_mgr->sha512_one_block;
366 if (xform->auth.key.length > get_auth_algo_blocksize(SHA_512)) {
368 xform->auth.key.data,
369 xform->auth.key.length,
371 key_larger_block_size = 1;
374 case RTE_CRYPTO_AUTH_SHA512:
375 sess->auth.algo = PLAIN_SHA_512;
379 AESNI_MB_LOG(ERR, "Unsupported authentication algorithm selection");
382 uint16_t trunc_digest_size =
383 get_truncated_digest_byte_length(sess->auth.algo);
384 uint16_t full_digest_size =
385 get_digest_byte_length(sess->auth.algo);
387 if (sess->auth.req_digest_len > full_digest_size ||
388 sess->auth.req_digest_len == 0) {
389 AESNI_MB_LOG(ERR, "Invalid digest size\n");
393 if (sess->auth.req_digest_len != trunc_digest_size &&
394 sess->auth.req_digest_len != full_digest_size)
395 sess->auth.gen_digest_len = full_digest_size;
397 sess->auth.gen_digest_len = sess->auth.req_digest_len;
399 /* Plain SHA does not require precompute key */
400 if (auth_precompute == 0)
403 /* Calculate Authentication precomputes */
404 if (key_larger_block_size) {
405 calculate_auth_precomputes(hash_oneblock_fn,
406 sess->auth.pads.inner, sess->auth.pads.outer,
408 xform->auth.key.length,
409 get_auth_algo_blocksize(sess->auth.algo));
411 calculate_auth_precomputes(hash_oneblock_fn,
412 sess->auth.pads.inner, sess->auth.pads.outer,
413 xform->auth.key.data,
414 xform->auth.key.length,
415 get_auth_algo_blocksize(sess->auth.algo));
421 /** Set session cipher parameters */
423 aesni_mb_set_session_cipher_parameters(const MB_MGR *mb_mgr,
424 struct aesni_mb_session *sess,
425 const struct rte_crypto_sym_xform *xform)
429 uint8_t is_docsis = 0;
430 #if IMB_VERSION(0, 53, 3) <= IMB_VERSION_NUM
432 uint8_t is_snow3g = 0;
433 uint8_t is_kasumi = 0;
437 sess->cipher.mode = NULL_CIPHER;
441 if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
442 AESNI_MB_LOG(ERR, "Crypto xform struct not of type cipher");
446 /* Select cipher direction */
447 switch (xform->cipher.op) {
448 case RTE_CRYPTO_CIPHER_OP_ENCRYPT:
449 sess->cipher.direction = ENCRYPT;
451 case RTE_CRYPTO_CIPHER_OP_DECRYPT:
452 sess->cipher.direction = DECRYPT;
455 AESNI_MB_LOG(ERR, "Invalid cipher operation parameter");
459 /* Select cipher mode */
460 switch (xform->cipher.algo) {
461 case RTE_CRYPTO_CIPHER_AES_CBC:
462 sess->cipher.mode = CBC;
465 case RTE_CRYPTO_CIPHER_AES_CTR:
466 sess->cipher.mode = CNTR;
469 case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
470 sess->cipher.mode = DOCSIS_SEC_BPI;
473 case RTE_CRYPTO_CIPHER_DES_CBC:
474 sess->cipher.mode = DES;
476 case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
477 sess->cipher.mode = DOCSIS_DES;
479 case RTE_CRYPTO_CIPHER_3DES_CBC:
480 sess->cipher.mode = DES3;
483 #if IMB_VERSION(0, 53, 0) <= IMB_VERSION_NUM
484 case RTE_CRYPTO_CIPHER_AES_ECB:
485 sess->cipher.mode = ECB;
489 #if IMB_VERSION(0, 53, 3) <= IMB_VERSION_NUM
490 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
491 sess->cipher.mode = IMB_CIPHER_ZUC_EEA3;
494 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
495 sess->cipher.mode = IMB_CIPHER_SNOW3G_UEA2_BITLEN;
498 case RTE_CRYPTO_CIPHER_KASUMI_F8:
499 sess->cipher.mode = IMB_CIPHER_KASUMI_UEA1_BITLEN;
504 AESNI_MB_LOG(ERR, "Unsupported cipher mode parameter");
508 /* Set IV parameters */
509 sess->iv.offset = xform->cipher.iv.offset;
510 sess->iv.length = xform->cipher.iv.length;
512 /* Check key length and choose key expansion function for AES */
514 switch (xform->cipher.key.length) {
516 sess->cipher.key_length_in_bytes = AES_128_BYTES;
517 IMB_AES_KEYEXP_128(mb_mgr, xform->cipher.key.data,
518 sess->cipher.expanded_aes_keys.encode,
519 sess->cipher.expanded_aes_keys.decode);
522 sess->cipher.key_length_in_bytes = AES_192_BYTES;
523 IMB_AES_KEYEXP_192(mb_mgr, xform->cipher.key.data,
524 sess->cipher.expanded_aes_keys.encode,
525 sess->cipher.expanded_aes_keys.decode);
528 sess->cipher.key_length_in_bytes = AES_256_BYTES;
529 IMB_AES_KEYEXP_256(mb_mgr, xform->cipher.key.data,
530 sess->cipher.expanded_aes_keys.encode,
531 sess->cipher.expanded_aes_keys.decode);
534 AESNI_MB_LOG(ERR, "Invalid cipher key length");
537 } else if (is_docsis) {
538 switch (xform->cipher.key.length) {
540 sess->cipher.key_length_in_bytes = AES_128_BYTES;
541 IMB_AES_KEYEXP_128(mb_mgr, xform->cipher.key.data,
542 sess->cipher.expanded_aes_keys.encode,
543 sess->cipher.expanded_aes_keys.decode);
545 #if IMB_VERSION(0, 53, 3) <= IMB_VERSION_NUM
547 sess->cipher.key_length_in_bytes = AES_256_BYTES;
548 IMB_AES_KEYEXP_256(mb_mgr, xform->cipher.key.data,
549 sess->cipher.expanded_aes_keys.encode,
550 sess->cipher.expanded_aes_keys.decode);
554 AESNI_MB_LOG(ERR, "Invalid cipher key length");
557 } else if (is_3DES) {
558 uint64_t *keys[3] = {sess->cipher.exp_3des_keys.key[0],
559 sess->cipher.exp_3des_keys.key[1],
560 sess->cipher.exp_3des_keys.key[2]};
562 switch (xform->cipher.key.length) {
564 IMB_DES_KEYSCHED(mb_mgr, keys[0],
565 xform->cipher.key.data);
566 IMB_DES_KEYSCHED(mb_mgr, keys[1],
567 xform->cipher.key.data + 8);
568 IMB_DES_KEYSCHED(mb_mgr, keys[2],
569 xform->cipher.key.data + 16);
571 /* Initialize keys - 24 bytes: [K1-K2-K3] */
572 sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
573 sess->cipher.exp_3des_keys.ks_ptr[1] = keys[1];
574 sess->cipher.exp_3des_keys.ks_ptr[2] = keys[2];
577 IMB_DES_KEYSCHED(mb_mgr, keys[0],
578 xform->cipher.key.data);
579 IMB_DES_KEYSCHED(mb_mgr, keys[1],
580 xform->cipher.key.data + 8);
581 /* Initialize keys - 16 bytes: [K1=K1,K2=K2,K3=K1] */
582 sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
583 sess->cipher.exp_3des_keys.ks_ptr[1] = keys[1];
584 sess->cipher.exp_3des_keys.ks_ptr[2] = keys[0];
587 IMB_DES_KEYSCHED(mb_mgr, keys[0],
588 xform->cipher.key.data);
590 /* Initialize keys - 8 bytes: [K1 = K2 = K3] */
591 sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
592 sess->cipher.exp_3des_keys.ks_ptr[1] = keys[0];
593 sess->cipher.exp_3des_keys.ks_ptr[2] = keys[0];
596 AESNI_MB_LOG(ERR, "Invalid cipher key length");
600 sess->cipher.key_length_in_bytes = 24;
601 #if IMB_VERSION(0, 53, 3) <= IMB_VERSION_NUM
603 if (xform->cipher.key.length != 16) {
604 AESNI_MB_LOG(ERR, "Invalid cipher key length");
607 sess->cipher.key_length_in_bytes = 16;
608 memcpy(sess->cipher.zuc_cipher_key, xform->cipher.key.data,
610 } else if (is_snow3g) {
611 if (xform->cipher.key.length != 16) {
612 AESNI_MB_LOG(ERR, "Invalid cipher key length");
615 sess->cipher.key_length_in_bytes = 16;
616 IMB_SNOW3G_INIT_KEY_SCHED(mb_mgr, xform->cipher.key.data,
617 &sess->cipher.pKeySched_snow3g_cipher);
618 } else if (is_kasumi) {
619 if (xform->cipher.key.length != 16) {
620 AESNI_MB_LOG(ERR, "Invalid cipher key length");
623 sess->cipher.key_length_in_bytes = 16;
624 IMB_KASUMI_INIT_F8_KEY_SCHED(mb_mgr, xform->cipher.key.data,
625 &sess->cipher.pKeySched_kasumi_cipher);
628 if (xform->cipher.key.length != 8) {
629 AESNI_MB_LOG(ERR, "Invalid cipher key length");
632 sess->cipher.key_length_in_bytes = 8;
634 IMB_DES_KEYSCHED(mb_mgr,
635 (uint64_t *)sess->cipher.expanded_aes_keys.encode,
636 xform->cipher.key.data);
637 IMB_DES_KEYSCHED(mb_mgr,
638 (uint64_t *)sess->cipher.expanded_aes_keys.decode,
639 xform->cipher.key.data);
646 aesni_mb_set_session_aead_parameters(const MB_MGR *mb_mgr,
647 struct aesni_mb_session *sess,
648 const struct rte_crypto_sym_xform *xform)
650 switch (xform->aead.op) {
651 case RTE_CRYPTO_AEAD_OP_ENCRYPT:
652 sess->cipher.direction = ENCRYPT;
653 sess->auth.operation = RTE_CRYPTO_AUTH_OP_GENERATE;
655 case RTE_CRYPTO_AEAD_OP_DECRYPT:
656 sess->cipher.direction = DECRYPT;
657 sess->auth.operation = RTE_CRYPTO_AUTH_OP_VERIFY;
660 AESNI_MB_LOG(ERR, "Invalid aead operation parameter");
664 switch (xform->aead.algo) {
665 case RTE_CRYPTO_AEAD_AES_CCM:
666 sess->cipher.mode = CCM;
667 sess->auth.algo = AES_CCM;
669 /* Check key length and choose key expansion function for AES */
670 switch (xform->aead.key.length) {
672 sess->cipher.key_length_in_bytes = AES_128_BYTES;
673 IMB_AES_KEYEXP_128(mb_mgr, xform->aead.key.data,
674 sess->cipher.expanded_aes_keys.encode,
675 sess->cipher.expanded_aes_keys.decode);
678 AESNI_MB_LOG(ERR, "Invalid cipher key length");
684 case RTE_CRYPTO_AEAD_AES_GCM:
685 sess->cipher.mode = GCM;
686 sess->auth.algo = AES_GMAC;
688 switch (xform->aead.key.length) {
690 sess->cipher.key_length_in_bytes = AES_128_BYTES;
691 IMB_AES128_GCM_PRE(mb_mgr, xform->aead.key.data,
692 &sess->cipher.gcm_key);
695 sess->cipher.key_length_in_bytes = AES_192_BYTES;
696 IMB_AES192_GCM_PRE(mb_mgr, xform->aead.key.data,
697 &sess->cipher.gcm_key);
700 sess->cipher.key_length_in_bytes = AES_256_BYTES;
701 IMB_AES256_GCM_PRE(mb_mgr, xform->aead.key.data,
702 &sess->cipher.gcm_key);
705 AESNI_MB_LOG(ERR, "Invalid cipher key length");
712 AESNI_MB_LOG(ERR, "Unsupported aead mode parameter");
716 /* Set IV parameters */
717 sess->iv.offset = xform->aead.iv.offset;
718 sess->iv.length = xform->aead.iv.length;
720 sess->auth.req_digest_len = xform->aead.digest_length;
721 /* CCM digests must be between 4 and 16 and an even number */
722 if (sess->auth.req_digest_len < AES_CCM_DIGEST_MIN_LEN ||
723 sess->auth.req_digest_len > AES_CCM_DIGEST_MAX_LEN ||
724 (sess->auth.req_digest_len & 1) == 1) {
725 AESNI_MB_LOG(ERR, "Invalid digest size\n");
728 sess->auth.gen_digest_len = sess->auth.req_digest_len;
733 /** Parse crypto xform chain and set private session parameters */
735 aesni_mb_set_session_parameters(const MB_MGR *mb_mgr,
736 struct aesni_mb_session *sess,
737 const struct rte_crypto_sym_xform *xform)
739 const struct rte_crypto_sym_xform *auth_xform = NULL;
740 const struct rte_crypto_sym_xform *cipher_xform = NULL;
741 const struct rte_crypto_sym_xform *aead_xform = NULL;
744 /* Select Crypto operation - hash then cipher / cipher then hash */
745 switch (aesni_mb_get_chain_order(xform)) {
746 case AESNI_MB_OP_HASH_CIPHER:
747 sess->chain_order = HASH_CIPHER;
749 cipher_xform = xform->next;
751 case AESNI_MB_OP_CIPHER_HASH:
752 sess->chain_order = CIPHER_HASH;
753 auth_xform = xform->next;
754 cipher_xform = xform;
756 case AESNI_MB_OP_HASH_ONLY:
757 sess->chain_order = HASH_CIPHER;
761 case AESNI_MB_OP_CIPHER_ONLY:
763 * Multi buffer library operates only at two modes,
764 * CIPHER_HASH and HASH_CIPHER. When doing ciphering only,
765 * chain order depends on cipher operation: encryption is always
766 * the first operation and decryption the last one.
768 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
769 sess->chain_order = CIPHER_HASH;
771 sess->chain_order = HASH_CIPHER;
773 cipher_xform = xform;
775 case AESNI_MB_OP_AEAD_CIPHER_HASH:
776 sess->chain_order = CIPHER_HASH;
777 sess->aead.aad_len = xform->aead.aad_length;
780 case AESNI_MB_OP_AEAD_HASH_CIPHER:
781 sess->chain_order = HASH_CIPHER;
782 sess->aead.aad_len = xform->aead.aad_length;
785 case AESNI_MB_OP_NOT_SUPPORTED:
787 AESNI_MB_LOG(ERR, "Unsupported operation chain order parameter");
791 /* Default IV length = 0 */
793 sess->auth_iv.length = 0;
795 ret = aesni_mb_set_session_auth_parameters(mb_mgr, sess, auth_xform);
797 AESNI_MB_LOG(ERR, "Invalid/unsupported authentication parameters");
801 ret = aesni_mb_set_session_cipher_parameters(mb_mgr, sess,
804 AESNI_MB_LOG(ERR, "Invalid/unsupported cipher parameters");
809 ret = aesni_mb_set_session_aead_parameters(mb_mgr, sess,
812 AESNI_MB_LOG(ERR, "Invalid/unsupported aead parameters");
820 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
821 /** Check DOCSIS security session configuration is valid */
823 check_docsis_sec_session(struct rte_security_session_conf *conf)
825 struct rte_crypto_sym_xform *crypto_sym = conf->crypto_xform;
826 struct rte_security_docsis_xform *docsis = &conf->docsis;
828 /* Downlink: CRC generate -> Cipher encrypt */
829 if (docsis->direction == RTE_SECURITY_DOCSIS_DOWNLINK) {
831 if (crypto_sym != NULL &&
832 crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
833 crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
834 crypto_sym->cipher.algo ==
835 RTE_CRYPTO_CIPHER_AES_DOCSISBPI &&
836 (crypto_sym->cipher.key.length == IMB_KEY_AES_128_BYTES ||
837 crypto_sym->cipher.key.length == IMB_KEY_AES_256_BYTES) &&
838 crypto_sym->cipher.iv.length == AES_BLOCK_SIZE &&
839 crypto_sym->next == NULL) {
842 /* Uplink: Cipher decrypt -> CRC verify */
843 } else if (docsis->direction == RTE_SECURITY_DOCSIS_UPLINK) {
845 if (crypto_sym != NULL &&
846 crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
847 crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
848 crypto_sym->cipher.algo ==
849 RTE_CRYPTO_CIPHER_AES_DOCSISBPI &&
850 (crypto_sym->cipher.key.length == IMB_KEY_AES_128_BYTES ||
851 crypto_sym->cipher.key.length == IMB_KEY_AES_256_BYTES) &&
852 crypto_sym->cipher.iv.length == AES_BLOCK_SIZE &&
853 crypto_sym->next == NULL) {
861 /** Set DOCSIS security session auth (CRC) parameters */
863 aesni_mb_set_docsis_sec_session_auth_parameters(struct aesni_mb_session *sess,
864 struct rte_security_docsis_xform *xform)
867 AESNI_MB_LOG(ERR, "Invalid DOCSIS xform");
871 /* Select CRC generate/verify */
872 if (xform->direction == RTE_SECURITY_DOCSIS_UPLINK) {
873 sess->auth.algo = IMB_AUTH_DOCSIS_CRC32;
874 sess->auth.operation = RTE_CRYPTO_AUTH_OP_VERIFY;
875 } else if (xform->direction == RTE_SECURITY_DOCSIS_DOWNLINK) {
876 sess->auth.algo = IMB_AUTH_DOCSIS_CRC32;
877 sess->auth.operation = RTE_CRYPTO_AUTH_OP_GENERATE;
879 AESNI_MB_LOG(ERR, "Unsupported DOCSIS direction");
883 sess->auth.req_digest_len = RTE_ETHER_CRC_LEN;
884 sess->auth.gen_digest_len = RTE_ETHER_CRC_LEN;
890 * Parse DOCSIS security session configuration and set private session
894 aesni_mb_set_docsis_sec_session_parameters(
895 __rte_unused struct rte_cryptodev *dev,
896 struct rte_security_session_conf *conf,
899 struct rte_security_docsis_xform *docsis_xform;
900 struct rte_crypto_sym_xform *cipher_xform;
901 struct aesni_mb_session *aesni_sess = sess;
902 struct aesni_mb_private *internals = dev->data->dev_private;
905 ret = check_docsis_sec_session(conf);
907 AESNI_MB_LOG(ERR, "Unsupported DOCSIS security configuration");
911 switch (conf->docsis.direction) {
912 case RTE_SECURITY_DOCSIS_UPLINK:
913 aesni_sess->chain_order = IMB_ORDER_CIPHER_HASH;
914 docsis_xform = &conf->docsis;
915 cipher_xform = conf->crypto_xform;
917 case RTE_SECURITY_DOCSIS_DOWNLINK:
918 aesni_sess->chain_order = IMB_ORDER_HASH_CIPHER;
919 cipher_xform = conf->crypto_xform;
920 docsis_xform = &conf->docsis;
926 /* Default IV length = 0 */
927 aesni_sess->iv.length = 0;
929 ret = aesni_mb_set_docsis_sec_session_auth_parameters(aesni_sess,
932 AESNI_MB_LOG(ERR, "Invalid/unsupported DOCSIS parameters");
936 ret = aesni_mb_set_session_cipher_parameters(internals->mb_mgr,
937 aesni_sess, cipher_xform);
940 AESNI_MB_LOG(ERR, "Invalid/unsupported cipher parameters");
949 * burst enqueue, place crypto operations on ingress queue for processing.
951 * @param __qp Queue Pair to process
952 * @param ops Crypto operations for processing
953 * @param nb_ops Number of crypto operations for processing
956 * - Number of crypto operations enqueued
959 aesni_mb_pmd_enqueue_burst(void *__qp, struct rte_crypto_op **ops,
962 struct aesni_mb_qp *qp = __qp;
964 unsigned int nb_enqueued;
966 nb_enqueued = rte_ring_enqueue_burst(qp->ingress_queue,
967 (void **)ops, nb_ops, NULL);
969 qp->stats.enqueued_count += nb_enqueued;
974 /** Get multi buffer session */
975 static inline struct aesni_mb_session *
976 get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op)
978 struct aesni_mb_session *sess = NULL;
980 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
981 if (likely(op->sym->session != NULL))
982 sess = (struct aesni_mb_session *)
983 get_sym_session_private_data(
985 cryptodev_driver_id);
986 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
987 } else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
988 if (likely(op->sym->sec_session != NULL))
989 sess = (struct aesni_mb_session *)
990 get_sec_session_private_data(
991 op->sym->sec_session);
994 void *_sess = rte_cryptodev_sym_session_create(qp->sess_mp);
995 void *_sess_private_data = NULL;
1000 if (rte_mempool_get(qp->sess_mp_priv,
1001 (void **)&_sess_private_data))
1004 sess = (struct aesni_mb_session *)_sess_private_data;
1006 if (unlikely(aesni_mb_set_session_parameters(qp->mb_mgr,
1007 sess, op->sym->xform) != 0)) {
1008 rte_mempool_put(qp->sess_mp, _sess);
1009 rte_mempool_put(qp->sess_mp_priv, _sess_private_data);
1012 op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
1013 set_sym_session_private_data(op->sym->session,
1014 cryptodev_driver_id, _sess_private_data);
1017 if (unlikely(sess == NULL))
1018 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
1023 static inline uint64_t
1024 auth_start_offset(struct rte_crypto_op *op, struct aesni_mb_session *session,
1027 struct rte_mbuf *m_src, *m_dst;
1028 uint8_t *p_src, *p_dst;
1029 uintptr_t u_src, u_dst;
1030 uint32_t cipher_end, auth_end;
1032 /* Only cipher then hash needs special calculation. */
1033 if (!oop || session->chain_order != CIPHER_HASH)
1034 return op->sym->auth.data.offset;
1036 m_src = op->sym->m_src;
1037 m_dst = op->sym->m_dst;
1039 p_src = rte_pktmbuf_mtod(m_src, uint8_t *);
1040 p_dst = rte_pktmbuf_mtod(m_dst, uint8_t *);
1041 u_src = (uintptr_t)p_src;
1042 u_dst = (uintptr_t)p_dst + op->sym->auth.data.offset;
1045 * Copy the content between cipher offset and auth offset for generating
1048 if (op->sym->cipher.data.offset > op->sym->auth.data.offset)
1049 memcpy(p_dst + op->sym->auth.data.offset,
1050 p_src + op->sym->auth.data.offset,
1051 op->sym->cipher.data.offset -
1052 op->sym->auth.data.offset);
1055 * Copy the content between (cipher offset + length) and (auth offset +
1056 * length) for generating correct digest
1058 cipher_end = op->sym->cipher.data.offset + op->sym->cipher.data.length;
1059 auth_end = op->sym->auth.data.offset + op->sym->auth.data.length;
1060 if (cipher_end < auth_end)
1061 memcpy(p_dst + cipher_end, p_src + cipher_end,
1062 auth_end - cipher_end);
1065 * Since intel-ipsec-mb only supports positive values,
1066 * we need to deduct the correct offset between src and dst.
1069 return u_src < u_dst ? (u_dst - u_src) :
1070 (UINT64_MAX - u_src + u_dst + 1);
1074 set_cpu_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_session *session,
1075 union rte_crypto_sym_ofs sofs, void *buf, uint32_t len,
1076 void *iv, void *aad, void *digest, void *udata)
1078 /* Set crypto operation */
1079 job->chain_order = session->chain_order;
1081 /* Set cipher parameters */
1082 job->cipher_direction = session->cipher.direction;
1083 job->cipher_mode = session->cipher.mode;
1085 job->aes_key_len_in_bytes = session->cipher.key_length_in_bytes;
1087 /* Set authentication parameters */
1088 job->hash_alg = session->auth.algo;
1091 switch (job->hash_alg) {
1093 job->u.XCBC._k1_expanded = session->auth.xcbc.k1_expanded;
1094 job->u.XCBC._k2 = session->auth.xcbc.k2;
1095 job->u.XCBC._k3 = session->auth.xcbc.k3;
1097 job->aes_enc_key_expanded =
1098 session->cipher.expanded_aes_keys.encode;
1099 job->aes_dec_key_expanded =
1100 session->cipher.expanded_aes_keys.decode;
1104 job->u.CCM.aad = (uint8_t *)aad + 18;
1105 job->u.CCM.aad_len_in_bytes = session->aead.aad_len;
1106 job->aes_enc_key_expanded =
1107 session->cipher.expanded_aes_keys.encode;
1108 job->aes_dec_key_expanded =
1109 session->cipher.expanded_aes_keys.decode;
1114 job->u.CMAC._key_expanded = session->auth.cmac.expkey;
1115 job->u.CMAC._skey1 = session->auth.cmac.skey1;
1116 job->u.CMAC._skey2 = session->auth.cmac.skey2;
1117 job->aes_enc_key_expanded =
1118 session->cipher.expanded_aes_keys.encode;
1119 job->aes_dec_key_expanded =
1120 session->cipher.expanded_aes_keys.decode;
1124 if (session->cipher.mode == GCM) {
1125 job->u.GCM.aad = aad;
1126 job->u.GCM.aad_len_in_bytes = session->aead.aad_len;
1129 job->u.GCM.aad = buf;
1130 job->u.GCM.aad_len_in_bytes = len;
1131 job->cipher_mode = GCM;
1133 job->aes_enc_key_expanded = &session->cipher.gcm_key;
1134 job->aes_dec_key_expanded = &session->cipher.gcm_key;
1138 job->u.HMAC._hashed_auth_key_xor_ipad =
1139 session->auth.pads.inner;
1140 job->u.HMAC._hashed_auth_key_xor_opad =
1141 session->auth.pads.outer;
1143 if (job->cipher_mode == DES3) {
1144 job->aes_enc_key_expanded =
1145 session->cipher.exp_3des_keys.ks_ptr;
1146 job->aes_dec_key_expanded =
1147 session->cipher.exp_3des_keys.ks_ptr;
1149 job->aes_enc_key_expanded =
1150 session->cipher.expanded_aes_keys.encode;
1151 job->aes_dec_key_expanded =
1152 session->cipher.expanded_aes_keys.decode;
1157 * Multi-buffer library current only support returning a truncated
1158 * digest length as specified in the relevant IPsec RFCs
1161 /* Set digest location and length */
1162 job->auth_tag_output = digest;
1163 job->auth_tag_output_len_in_bytes = session->auth.gen_digest_len;
1165 /* Set IV parameters */
1166 job->iv_len_in_bytes = session->iv.length;
1168 /* Data Parameters */
1170 job->dst = (uint8_t *)buf + sofs.ofs.cipher.head;
1171 job->cipher_start_src_offset_in_bytes = sofs.ofs.cipher.head;
1172 job->hash_start_src_offset_in_bytes = sofs.ofs.auth.head;
1173 if (job->hash_alg == AES_GMAC && session->cipher.mode != GCM) {
1174 job->msg_len_to_hash_in_bytes = 0;
1175 job->msg_len_to_cipher_in_bytes = 0;
1177 job->msg_len_to_hash_in_bytes = len - sofs.ofs.auth.head -
1179 job->msg_len_to_cipher_in_bytes = len - sofs.ofs.cipher.head -
1180 sofs.ofs.cipher.tail;
1183 job->user_data = udata;
1187 * Process a crypto operation and complete a JOB_AES_HMAC job structure for
1188 * submission to the multi buffer library for processing.
1190 * @param qp queue pair
1191 * @param job JOB_AES_HMAC structure to fill
1192 * @param m mbuf to process
1195 * - Completed JOB_AES_HMAC structure pointer on success
1196 * - NULL pointer if completion of JOB_AES_HMAC structure isn't possible
1199 set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
1200 struct rte_crypto_op *op, uint8_t *digest_idx)
1202 struct rte_mbuf *m_src = op->sym->m_src, *m_dst;
1203 struct aesni_mb_session *session;
1204 uint32_t m_offset, oop;
1206 session = get_session(qp, op);
1207 if (session == NULL) {
1208 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
1212 /* Set crypto operation */
1213 job->chain_order = session->chain_order;
1215 /* Set cipher parameters */
1216 job->cipher_direction = session->cipher.direction;
1217 job->cipher_mode = session->cipher.mode;
1219 job->aes_key_len_in_bytes = session->cipher.key_length_in_bytes;
1221 /* Set authentication parameters */
1222 job->hash_alg = session->auth.algo;
1224 switch (job->hash_alg) {
1226 job->u.XCBC._k1_expanded = session->auth.xcbc.k1_expanded;
1227 job->u.XCBC._k2 = session->auth.xcbc.k2;
1228 job->u.XCBC._k3 = session->auth.xcbc.k3;
1230 job->aes_enc_key_expanded =
1231 session->cipher.expanded_aes_keys.encode;
1232 job->aes_dec_key_expanded =
1233 session->cipher.expanded_aes_keys.decode;
1237 job->u.CCM.aad = op->sym->aead.aad.data + 18;
1238 job->u.CCM.aad_len_in_bytes = session->aead.aad_len;
1239 job->aes_enc_key_expanded =
1240 session->cipher.expanded_aes_keys.encode;
1241 job->aes_dec_key_expanded =
1242 session->cipher.expanded_aes_keys.decode;
1246 job->u.CMAC._key_expanded = session->auth.cmac.expkey;
1247 job->u.CMAC._skey1 = session->auth.cmac.skey1;
1248 job->u.CMAC._skey2 = session->auth.cmac.skey2;
1249 job->aes_enc_key_expanded =
1250 session->cipher.expanded_aes_keys.encode;
1251 job->aes_dec_key_expanded =
1252 session->cipher.expanded_aes_keys.decode;
1256 if (session->cipher.mode == GCM) {
1257 job->u.GCM.aad = op->sym->aead.aad.data;
1258 job->u.GCM.aad_len_in_bytes = session->aead.aad_len;
1261 job->u.GCM.aad = rte_pktmbuf_mtod_offset(m_src,
1262 uint8_t *, op->sym->auth.data.offset);
1263 job->u.GCM.aad_len_in_bytes = op->sym->auth.data.length;
1264 job->cipher_mode = GCM;
1266 job->aes_enc_key_expanded = &session->cipher.gcm_key;
1267 job->aes_dec_key_expanded = &session->cipher.gcm_key;
1269 #if IMB_VERSION(0, 53, 3) <= IMB_VERSION_NUM
1270 case IMB_AUTH_ZUC_EIA3_BITLEN:
1271 job->u.ZUC_EIA3._key = session->auth.zuc_auth_key;
1272 job->u.ZUC_EIA3._iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1273 session->auth_iv.offset);
1275 case IMB_AUTH_SNOW3G_UIA2_BITLEN:
1276 job->u.SNOW3G_UIA2._key = (void *) &session->auth.pKeySched_snow3g_auth;
1277 job->u.SNOW3G_UIA2._iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1278 session->auth_iv.offset);
1280 case IMB_AUTH_KASUMI_UIA1:
1281 job->u.KASUMI_UIA1._key = (void *) &session->auth.pKeySched_kasumi_auth;
1285 job->u.HMAC._hashed_auth_key_xor_ipad = session->auth.pads.inner;
1286 job->u.HMAC._hashed_auth_key_xor_opad = session->auth.pads.outer;
1288 if (job->cipher_mode == DES3) {
1289 job->aes_enc_key_expanded =
1290 session->cipher.exp_3des_keys.ks_ptr;
1291 job->aes_dec_key_expanded =
1292 session->cipher.exp_3des_keys.ks_ptr;
1294 job->aes_enc_key_expanded =
1295 session->cipher.expanded_aes_keys.encode;
1296 job->aes_dec_key_expanded =
1297 session->cipher.expanded_aes_keys.decode;
1301 if (job->hash_alg == AES_CCM || (job->hash_alg == AES_GMAC &&
1302 session->cipher.mode == GCM))
1303 m_offset = op->sym->aead.data.offset;
1305 m_offset = op->sym->cipher.data.offset;
1307 #if IMB_VERSION(0, 53, 3) <= IMB_VERSION_NUM
1308 if (job->cipher_mode == IMB_CIPHER_ZUC_EEA3) {
1309 job->aes_enc_key_expanded = session->cipher.zuc_cipher_key;
1310 job->aes_dec_key_expanded = session->cipher.zuc_cipher_key;
1311 } else if (job->cipher_mode == IMB_CIPHER_SNOW3G_UEA2_BITLEN) {
1312 job->enc_keys = &session->cipher.pKeySched_snow3g_cipher;
1314 } else if (job->cipher_mode == IMB_CIPHER_KASUMI_UEA1_BITLEN) {
1315 job->enc_keys = &session->cipher.pKeySched_kasumi_cipher;
1320 if (!op->sym->m_dst) {
1321 /* in-place operation */
1324 } else if (op->sym->m_dst == op->sym->m_src) {
1325 /* in-place operation */
1329 /* out-of-place operation */
1330 m_dst = op->sym->m_dst;
1334 /* Set digest output location */
1335 if (job->hash_alg != NULL_HASH &&
1336 session->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
1337 job->auth_tag_output = qp->temp_digests[*digest_idx];
1338 *digest_idx = (*digest_idx + 1) % MAX_JOBS;
1340 if (job->hash_alg == AES_CCM || (job->hash_alg == AES_GMAC &&
1341 session->cipher.mode == GCM))
1342 job->auth_tag_output = op->sym->aead.digest.data;
1344 job->auth_tag_output = op->sym->auth.digest.data;
1346 if (session->auth.req_digest_len != session->auth.gen_digest_len) {
1347 job->auth_tag_output = qp->temp_digests[*digest_idx];
1348 *digest_idx = (*digest_idx + 1) % MAX_JOBS;
1352 * Multi-buffer library current only support returning a truncated
1353 * digest length as specified in the relevant IPsec RFCs
1356 /* Set digest length */
1357 job->auth_tag_output_len_in_bytes = session->auth.gen_digest_len;
1359 /* Set IV parameters */
1360 job->iv_len_in_bytes = session->iv.length;
1362 /* Data Parameters */
1363 job->src = rte_pktmbuf_mtod(m_src, uint8_t *);
1364 job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, m_offset);
1366 switch (job->hash_alg) {
1368 job->cipher_start_src_offset_in_bytes =
1369 op->sym->aead.data.offset;
1370 job->msg_len_to_cipher_in_bytes = op->sym->aead.data.length;
1371 job->hash_start_src_offset_in_bytes = op->sym->aead.data.offset;
1372 job->msg_len_to_hash_in_bytes = op->sym->aead.data.length;
1374 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1375 session->iv.offset + 1);
1379 if (session->cipher.mode == GCM) {
1380 job->cipher_start_src_offset_in_bytes =
1381 op->sym->aead.data.offset;
1382 job->hash_start_src_offset_in_bytes =
1383 op->sym->aead.data.offset;
1384 job->msg_len_to_cipher_in_bytes =
1385 op->sym->aead.data.length;
1386 job->msg_len_to_hash_in_bytes =
1387 op->sym->aead.data.length;
1389 job->cipher_start_src_offset_in_bytes =
1390 op->sym->auth.data.offset;
1391 job->hash_start_src_offset_in_bytes =
1392 op->sym->auth.data.offset;
1393 job->msg_len_to_cipher_in_bytes = 0;
1394 job->msg_len_to_hash_in_bytes = 0;
1397 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1398 session->iv.offset);
1402 /* For SNOW3G, length and offsets are already in bits */
1403 job->cipher_start_src_offset_in_bytes =
1404 op->sym->cipher.data.offset;
1405 job->msg_len_to_cipher_in_bytes = op->sym->cipher.data.length;
1407 job->hash_start_src_offset_in_bytes = auth_start_offset(op,
1409 job->msg_len_to_hash_in_bytes = op->sym->auth.data.length;
1411 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1412 session->iv.offset);
1415 #if IMB_VERSION(0, 53, 3) <= IMB_VERSION_NUM
1416 if (job->cipher_mode == IMB_CIPHER_ZUC_EEA3)
1417 job->msg_len_to_cipher_in_bytes >>= 3;
1418 else if (job->hash_alg == IMB_AUTH_KASUMI_UIA1)
1419 job->msg_len_to_hash_in_bytes >>= 3;
1422 /* Set user data to be crypto operation data struct */
1423 job->user_data = op;
1428 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
1430 * Process a crypto operation containing a security op and complete a
1431 * JOB_AES_HMAC job structure for submission to the multi buffer library for
1435 set_sec_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
1436 struct rte_crypto_op *op, uint8_t *digest_idx)
1438 struct rte_mbuf *m_src, *m_dst;
1439 struct rte_crypto_sym_op *sym;
1440 struct aesni_mb_session *session;
1442 session = get_session(qp, op);
1443 if (unlikely(session == NULL)) {
1444 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
1448 /* Only DOCSIS protocol operations supported now */
1449 if (session->cipher.mode != IMB_CIPHER_DOCSIS_SEC_BPI ||
1450 session->auth.algo != IMB_AUTH_DOCSIS_CRC32) {
1451 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
1458 if (likely(sym->m_dst == NULL || sym->m_dst == m_src)) {
1459 /* in-place operation */
1462 /* out-of-place operation not supported */
1463 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
1467 /* Set crypto operation */
1468 job->chain_order = session->chain_order;
1470 /* Set cipher parameters */
1471 job->cipher_direction = session->cipher.direction;
1472 job->cipher_mode = session->cipher.mode;
1474 job->aes_key_len_in_bytes = session->cipher.key_length_in_bytes;
1475 job->aes_enc_key_expanded = session->cipher.expanded_aes_keys.encode;
1476 job->aes_dec_key_expanded = session->cipher.expanded_aes_keys.decode;
1478 /* Set IV parameters */
1479 job->iv_len_in_bytes = session->iv.length;
1480 job->iv = (uint8_t *)op + session->iv.offset;
1482 /* Set authentication parameters */
1483 job->hash_alg = session->auth.algo;
1485 /* Set digest output location */
1486 job->auth_tag_output = qp->temp_digests[*digest_idx];
1487 *digest_idx = (*digest_idx + 1) % MAX_JOBS;
1489 /* Set digest length */
1490 job->auth_tag_output_len_in_bytes = session->auth.gen_digest_len;
1492 /* Set data parameters */
1493 job->src = rte_pktmbuf_mtod(m_src, uint8_t *);
1494 job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *,
1495 sym->cipher.data.offset);
1497 job->cipher_start_src_offset_in_bytes = sym->cipher.data.offset;
1498 job->msg_len_to_cipher_in_bytes = sym->cipher.data.length;
1500 job->hash_start_src_offset_in_bytes = sym->auth.data.offset;
1501 job->msg_len_to_hash_in_bytes = sym->auth.data.length;
1503 job->user_data = op;
1509 verify_docsis_sec_crc(JOB_AES_HMAC *job, uint8_t *status)
1511 uint16_t crc_offset;
1514 if (!job->msg_len_to_hash_in_bytes)
1517 crc_offset = job->hash_start_src_offset_in_bytes +
1518 job->msg_len_to_hash_in_bytes -
1519 job->cipher_start_src_offset_in_bytes;
1520 crc = job->dst + crc_offset;
1522 /* Verify CRC (at the end of the message) */
1523 if (memcmp(job->auth_tag_output, crc, RTE_ETHER_CRC_LEN) != 0)
1524 *status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
1529 verify_digest(JOB_AES_HMAC *job, void *digest, uint16_t len, uint8_t *status)
1531 /* Verify digest if required */
1532 if (memcmp(job->auth_tag_output, digest, len) != 0)
1533 *status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
1537 generate_digest(JOB_AES_HMAC *job, struct rte_crypto_op *op,
1538 struct aesni_mb_session *sess)
1540 /* No extra copy needed */
1541 if (likely(sess->auth.req_digest_len == sess->auth.gen_digest_len))
1545 * This can only happen for HMAC, so only digest
1546 * for authentication algos is required
1548 memcpy(op->sym->auth.digest.data, job->auth_tag_output,
1549 sess->auth.req_digest_len);
1553 * Process a completed job and return rte_mbuf which job processed
1555 * @param qp Queue Pair to process
1556 * @param job JOB_AES_HMAC job to process
1559 * - Returns processed crypto operation.
1560 * - Returns NULL on invalid job
1562 static inline struct rte_crypto_op *
1563 post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
1565 struct rte_crypto_op *op = (struct rte_crypto_op *)job->user_data;
1566 struct aesni_mb_session *sess = NULL;
1568 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
1569 uint8_t is_docsis_sec = 0;
1571 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
1573 * Assuming at this point that if it's a security type op, that
1574 * this is for DOCSIS
1577 sess = get_sec_session_private_data(op->sym->sec_session);
1581 sess = get_sym_session_private_data(op->sym->session,
1582 cryptodev_driver_id);
1585 if (unlikely(sess == NULL)) {
1586 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
1590 if (likely(op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)) {
1591 switch (job->status) {
1593 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1595 if (job->hash_alg == NULL_HASH)
1598 if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
1599 if (job->hash_alg == AES_CCM ||
1600 (job->hash_alg == AES_GMAC &&
1601 sess->cipher.mode == GCM))
1603 op->sym->aead.digest.data,
1604 sess->auth.req_digest_len,
1606 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
1607 else if (is_docsis_sec)
1608 verify_docsis_sec_crc(job,
1613 op->sym->auth.digest.data,
1614 sess->auth.req_digest_len,
1617 generate_digest(job, op, sess);
1620 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
1624 /* Free session if a session-less crypto op */
1625 if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
1626 memset(sess, 0, sizeof(struct aesni_mb_session));
1627 memset(op->sym->session, 0,
1628 rte_cryptodev_sym_get_existing_header_session_size(
1630 rte_mempool_put(qp->sess_mp_priv, sess);
1631 rte_mempool_put(qp->sess_mp, op->sym->session);
1632 op->sym->session = NULL;
1639 post_process_mb_sync_job(JOB_AES_HMAC *job)
1643 st = job->user_data;
1644 st[0] = (job->status == STS_COMPLETED) ? 0 : EBADMSG;
1648 * Process a completed JOB_AES_HMAC job and keep processing jobs until
1649 * get_completed_job return NULL
1651 * @param qp Queue Pair to process
1652 * @param job JOB_AES_HMAC job
1655 * - Number of processed jobs
1658 handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job,
1659 struct rte_crypto_op **ops, uint16_t nb_ops)
1661 struct rte_crypto_op *op = NULL;
1662 unsigned processed_jobs = 0;
1664 while (job != NULL) {
1665 op = post_process_mb_job(qp, job);
1668 ops[processed_jobs++] = op;
1669 qp->stats.dequeued_count++;
1671 qp->stats.dequeue_err_count++;
1674 if (processed_jobs == nb_ops)
1677 job = IMB_GET_COMPLETED_JOB(qp->mb_mgr);
1680 return processed_jobs;
1683 static inline uint32_t
1684 handle_completed_sync_jobs(JOB_AES_HMAC *job, MB_MGR *mb_mgr)
1688 for (i = 0; job != NULL; i++, job = IMB_GET_COMPLETED_JOB(mb_mgr))
1689 post_process_mb_sync_job(job);
1694 static inline uint32_t
1695 flush_mb_sync_mgr(MB_MGR *mb_mgr)
1699 job = IMB_FLUSH_JOB(mb_mgr);
1700 return handle_completed_sync_jobs(job, mb_mgr);
1703 static inline uint16_t
1704 flush_mb_mgr(struct aesni_mb_qp *qp, struct rte_crypto_op **ops,
1707 int processed_ops = 0;
1709 /* Flush the remaining jobs */
1710 JOB_AES_HMAC *job = IMB_FLUSH_JOB(qp->mb_mgr);
1713 processed_ops += handle_completed_jobs(qp, job,
1714 &ops[processed_ops], nb_ops - processed_ops);
1716 return processed_ops;
1719 static inline JOB_AES_HMAC *
1720 set_job_null_op(JOB_AES_HMAC *job, struct rte_crypto_op *op)
1722 job->chain_order = HASH_CIPHER;
1723 job->cipher_mode = NULL_CIPHER;
1724 job->hash_alg = NULL_HASH;
1725 job->cipher_direction = DECRYPT;
1727 /* Set user data to be crypto operation data struct */
1728 job->user_data = op;
1734 aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
1737 struct aesni_mb_qp *qp = queue_pair;
1739 struct rte_crypto_op *op;
1742 int retval, processed_jobs = 0;
1744 if (unlikely(nb_ops == 0))
1747 uint8_t digest_idx = qp->digest_idx;
1749 /* Get next free mb job struct from mb manager */
1750 job = IMB_GET_NEXT_JOB(qp->mb_mgr);
1751 if (unlikely(job == NULL)) {
1752 /* if no free mb job structs we need to flush mb_mgr */
1753 processed_jobs += flush_mb_mgr(qp,
1754 &ops[processed_jobs],
1755 nb_ops - processed_jobs);
1757 if (nb_ops == processed_jobs)
1760 job = IMB_GET_NEXT_JOB(qp->mb_mgr);
1764 * Get next operation to process from ingress queue.
1765 * There is no need to return the job to the MB_MGR
1766 * if there are no more operations to process, since the MB_MGR
1767 * can use that pointer again in next get_next calls.
1769 retval = rte_ring_dequeue(qp->ingress_queue, (void **)&op);
1773 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
1774 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
1775 retval = set_sec_mb_job_params(job, qp, op,
1779 retval = set_mb_job_params(job, qp, op, &digest_idx);
1781 if (unlikely(retval != 0)) {
1782 qp->stats.dequeue_err_count++;
1783 set_job_null_op(job, op);
1786 /* Submit job to multi-buffer for processing */
1787 #ifdef RTE_LIBRTE_PMD_AESNI_MB_DEBUG
1788 job = IMB_SUBMIT_JOB(qp->mb_mgr);
1790 job = IMB_SUBMIT_JOB_NOCHECK(qp->mb_mgr);
1793 * If submit returns a processed job then handle it,
1794 * before submitting subsequent jobs
1797 processed_jobs += handle_completed_jobs(qp, job,
1798 &ops[processed_jobs],
1799 nb_ops - processed_jobs);
1801 } while (processed_jobs < nb_ops);
1803 qp->digest_idx = digest_idx;
1805 if (processed_jobs < 1)
1806 processed_jobs += flush_mb_mgr(qp,
1807 &ops[processed_jobs],
1808 nb_ops - processed_jobs);
1810 return processed_jobs;
1814 alloc_init_mb_mgr(enum aesni_mb_vector_mode vector_mode)
1816 MB_MGR *mb_mgr = alloc_mb_mgr(0);
1820 switch (vector_mode) {
1821 case RTE_AESNI_MB_SSE:
1822 init_mb_mgr_sse(mb_mgr);
1824 case RTE_AESNI_MB_AVX:
1825 init_mb_mgr_avx(mb_mgr);
1827 case RTE_AESNI_MB_AVX2:
1828 init_mb_mgr_avx2(mb_mgr);
1830 case RTE_AESNI_MB_AVX512:
1831 init_mb_mgr_avx512(mb_mgr);
1834 AESNI_MB_LOG(ERR, "Unsupported vector mode %u\n", vector_mode);
1835 free_mb_mgr(mb_mgr);
1843 aesni_mb_fill_error_code(struct rte_crypto_sym_vec *vec, int32_t err)
1847 for (i = 0; i != vec->num; ++i)
1848 vec->status[i] = err;
1852 check_crypto_sgl(union rte_crypto_sym_ofs so, const struct rte_crypto_sgl *sgl)
1854 /* no multi-seg support with current AESNI-MB PMD */
1857 else if (so.ofs.cipher.head + so.ofs.cipher.tail > sgl->vec[0].len)
1862 static inline JOB_AES_HMAC *
1863 submit_sync_job(MB_MGR *mb_mgr)
1865 #ifdef RTE_LIBRTE_PMD_AESNI_MB_DEBUG
1866 return IMB_SUBMIT_JOB(mb_mgr);
1868 return IMB_SUBMIT_JOB_NOCHECK(mb_mgr);
1872 static inline uint32_t
1873 generate_sync_dgst(struct rte_crypto_sym_vec *vec,
1874 const uint8_t dgst[][DIGEST_LENGTH_MAX], uint32_t len)
1878 for (i = 0, k = 0; i != vec->num; i++) {
1879 if (vec->status[i] == 0) {
1880 memcpy(vec->digest[i], dgst[i], len);
1888 static inline uint32_t
1889 verify_sync_dgst(struct rte_crypto_sym_vec *vec,
1890 const uint8_t dgst[][DIGEST_LENGTH_MAX], uint32_t len)
1894 for (i = 0, k = 0; i != vec->num; i++) {
1895 if (vec->status[i] == 0) {
1896 if (memcmp(vec->digest[i], dgst[i], len) != 0)
1897 vec->status[i] = EBADMSG;
1907 aesni_mb_cpu_crypto_process_bulk(struct rte_cryptodev *dev,
1908 struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs sofs,
1909 struct rte_crypto_sym_vec *vec)
1912 uint32_t i, j, k, len;
1916 struct aesni_mb_private *priv;
1917 struct aesni_mb_session *s;
1918 uint8_t tmp_dgst[vec->num][DIGEST_LENGTH_MAX];
1920 s = get_sym_session_private_data(sess, dev->driver_id);
1922 aesni_mb_fill_error_code(vec, EINVAL);
1926 /* get per-thread MB MGR, create one if needed */
1927 mb_mgr = RTE_PER_LCORE(sync_mb_mgr);
1928 if (mb_mgr == NULL) {
1930 priv = dev->data->dev_private;
1931 mb_mgr = alloc_init_mb_mgr(priv->vector_mode);
1932 if (mb_mgr == NULL) {
1933 aesni_mb_fill_error_code(vec, ENOMEM);
1936 RTE_PER_LCORE(sync_mb_mgr) = mb_mgr;
1939 for (i = 0, j = 0, k = 0; i != vec->num; i++) {
1942 ret = check_crypto_sgl(sofs, vec->sgl + i);
1944 vec->status[i] = ret;
1948 buf = vec->sgl[i].vec[0].base;
1949 len = vec->sgl[i].vec[0].len;
1951 job = IMB_GET_NEXT_JOB(mb_mgr);
1953 k += flush_mb_sync_mgr(mb_mgr);
1954 job = IMB_GET_NEXT_JOB(mb_mgr);
1955 RTE_ASSERT(job != NULL);
1958 /* Submit job for processing */
1959 set_cpu_mb_job_params(job, s, sofs, buf, len,
1960 vec->iv[i], vec->aad[i], tmp_dgst[i],
1962 job = submit_sync_job(mb_mgr);
1965 /* handle completed jobs */
1966 k += handle_completed_sync_jobs(job, mb_mgr);
1969 /* flush remaining jobs */
1971 k += flush_mb_sync_mgr(mb_mgr);
1973 /* finish processing for successful jobs: check/update digest */
1975 if (s->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY)
1976 k = verify_sync_dgst(vec,
1977 (const uint8_t (*)[DIGEST_LENGTH_MAX])tmp_dgst,
1978 s->auth.req_digest_len);
1980 k = generate_sync_dgst(vec,
1981 (const uint8_t (*)[DIGEST_LENGTH_MAX])tmp_dgst,
1982 s->auth.req_digest_len);
1988 static int cryptodev_aesni_mb_remove(struct rte_vdev_device *vdev);
1991 vec_mode_to_flags(enum aesni_mb_vector_mode mode)
1994 case RTE_AESNI_MB_SSE:
1995 return RTE_CRYPTODEV_FF_CPU_SSE;
1996 case RTE_AESNI_MB_AVX:
1997 return RTE_CRYPTODEV_FF_CPU_AVX;
1998 case RTE_AESNI_MB_AVX2:
1999 return RTE_CRYPTODEV_FF_CPU_AVX2;
2000 case RTE_AESNI_MB_AVX512:
2001 return RTE_CRYPTODEV_FF_CPU_AVX512;
2003 AESNI_MB_LOG(ERR, "Unsupported vector mode %u\n", mode);
2009 cryptodev_aesni_mb_create(const char *name,
2010 struct rte_vdev_device *vdev,
2011 struct rte_cryptodev_pmd_init_params *init_params)
2013 struct rte_cryptodev *dev;
2014 struct aesni_mb_private *internals;
2015 enum aesni_mb_vector_mode vector_mode;
2018 dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
2020 AESNI_MB_LOG(ERR, "failed to create cryptodev vdev");
2024 /* Check CPU for supported vector instruction set */
2025 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
2026 vector_mode = RTE_AESNI_MB_AVX512;
2027 else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
2028 vector_mode = RTE_AESNI_MB_AVX2;
2029 else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
2030 vector_mode = RTE_AESNI_MB_AVX;
2032 vector_mode = RTE_AESNI_MB_SSE;
2034 dev->driver_id = cryptodev_driver_id;
2035 dev->dev_ops = rte_aesni_mb_pmd_ops;
2037 /* register rx/tx burst functions for data path */
2038 dev->dequeue_burst = aesni_mb_pmd_dequeue_burst;
2039 dev->enqueue_burst = aesni_mb_pmd_enqueue_burst;
2041 dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
2042 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
2043 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
2044 RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO |
2045 RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA |
2046 RTE_CRYPTODEV_FF_SYM_SESSIONLESS;
2048 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
2049 struct rte_security_ctx *security_instance;
2050 security_instance = rte_malloc("aesni_mb_sec",
2051 sizeof(struct rte_security_ctx),
2052 RTE_CACHE_LINE_SIZE);
2053 if (security_instance == NULL) {
2054 AESNI_MB_LOG(ERR, "rte_security_ctx memory alloc failed");
2055 rte_cryptodev_pmd_destroy(dev);
2059 security_instance->device = (void *)dev;
2060 security_instance->ops = rte_aesni_mb_pmd_sec_ops;
2061 security_instance->sess_cnt = 0;
2062 dev->security_ctx = security_instance;
2063 dev->feature_flags |= RTE_CRYPTODEV_FF_SECURITY;
2066 /* Check CPU for support for AES instruction set */
2067 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES))
2068 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AESNI;
2070 AESNI_MB_LOG(WARNING, "AES instructions not supported by CPU");
2072 dev->feature_flags |= vec_mode_to_flags(vector_mode);
2074 mb_mgr = alloc_init_mb_mgr(vector_mode);
2075 if (mb_mgr == NULL) {
2076 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
2077 rte_free(dev->security_ctx);
2078 dev->security_ctx = NULL;
2080 rte_cryptodev_pmd_destroy(dev);
2084 /* Set vector instructions mode supported */
2085 internals = dev->data->dev_private;
2087 internals->vector_mode = vector_mode;
2088 internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
2089 internals->mb_mgr = mb_mgr;
2091 AESNI_MB_LOG(INFO, "IPSec Multi-buffer library version used: %s\n",
2092 imb_get_version_str());
2097 cryptodev_aesni_mb_probe(struct rte_vdev_device *vdev)
2099 struct rte_cryptodev_pmd_init_params init_params = {
2101 sizeof(struct aesni_mb_private),
2103 RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
2105 const char *name, *args;
2108 name = rte_vdev_device_name(vdev);
2112 args = rte_vdev_device_args(vdev);
2114 retval = rte_cryptodev_pmd_parse_input_args(&init_params, args);
2116 AESNI_MB_LOG(ERR, "Failed to parse initialisation arguments[%s]",
2121 return cryptodev_aesni_mb_create(name, vdev, &init_params);
2125 cryptodev_aesni_mb_remove(struct rte_vdev_device *vdev)
2127 struct rte_cryptodev *cryptodev;
2128 struct aesni_mb_private *internals;
2131 name = rte_vdev_device_name(vdev);
2135 cryptodev = rte_cryptodev_pmd_get_named_dev(name);
2136 if (cryptodev == NULL)
2139 internals = cryptodev->data->dev_private;
2141 free_mb_mgr(internals->mb_mgr);
2142 if (RTE_PER_LCORE(sync_mb_mgr)) {
2143 free_mb_mgr(RTE_PER_LCORE(sync_mb_mgr));
2144 RTE_PER_LCORE(sync_mb_mgr) = NULL;
2147 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
2148 rte_free(cryptodev->security_ctx);
2149 cryptodev->security_ctx = NULL;
2152 return rte_cryptodev_pmd_destroy(cryptodev);
2155 static struct rte_vdev_driver cryptodev_aesni_mb_pmd_drv = {
2156 .probe = cryptodev_aesni_mb_probe,
2157 .remove = cryptodev_aesni_mb_remove
2160 static struct cryptodev_driver aesni_mb_crypto_drv;
2162 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd_drv);
2163 RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd);
2164 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_MB_PMD,
2165 "max_nb_queue_pairs=<int> "
2167 RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_mb_crypto_drv,
2168 cryptodev_aesni_mb_pmd_drv.driver,
2169 cryptodev_driver_id);
2170 RTE_LOG_REGISTER(aesni_mb_logtype_driver, pmd.crypto.aesni_mb, NOTICE);