1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2017 Intel Corporation
5 #include <intel-ipsec-mb.h>
7 #include <rte_common.h>
8 #include <rte_hexdump.h>
9 #include <rte_cryptodev.h>
10 #include <rte_cryptodev_pmd.h>
11 #include <rte_bus_vdev.h>
12 #include <rte_malloc.h>
13 #include <rte_cpuflags.h>
14 #include <rte_per_lcore.h>
15 #include <rte_ether.h>
17 #include "aesni_mb_pmd_private.h"
19 #define AES_CCM_DIGEST_MIN_LEN 4
20 #define AES_CCM_DIGEST_MAX_LEN 16
21 #define HMAC_MAX_BLOCK_SIZE 128
22 static uint8_t cryptodev_driver_id;
25 * Needed to support CPU-CRYPTO API (rte_cryptodev_sym_cpu_crypto_process),
26 * as we still use JOB based API even for synchronous processing.
28 static RTE_DEFINE_PER_LCORE(MB_MGR *, sync_mb_mgr);
30 typedef void (*hash_one_block_t)(const void *data, void *digest);
31 typedef void (*aes_keyexp_t)(const void *key, void *enc_exp_keys, void *dec_exp_keys);
34 * Calculate the authentication pre-computes
36 * @param one_block_hash Function pointer to calculate digest on ipad/opad
37 * @param ipad Inner pad output byte array
38 * @param opad Outer pad output byte array
39 * @param hkey Authentication key
40 * @param hkey_len Authentication key length
41 * @param blocksize Block size of selected hash algo
44 calculate_auth_precomputes(hash_one_block_t one_block_hash,
45 uint8_t *ipad, uint8_t *opad,
46 const uint8_t *hkey, uint16_t hkey_len,
51 uint8_t ipad_buf[blocksize] __rte_aligned(16);
52 uint8_t opad_buf[blocksize] __rte_aligned(16);
54 /* Setup inner and outer pads */
55 memset(ipad_buf, HMAC_IPAD_VALUE, blocksize);
56 memset(opad_buf, HMAC_OPAD_VALUE, blocksize);
58 /* XOR hash key with inner and outer pads */
59 length = hkey_len > blocksize ? blocksize : hkey_len;
61 for (i = 0; i < length; i++) {
62 ipad_buf[i] ^= hkey[i];
63 opad_buf[i] ^= hkey[i];
66 /* Compute partial hashes */
67 (*one_block_hash)(ipad_buf, ipad);
68 (*one_block_hash)(opad_buf, opad);
71 memset(ipad_buf, 0, blocksize);
72 memset(opad_buf, 0, blocksize);
75 /** Get xform chain order */
76 static enum aesni_mb_operation
77 aesni_mb_get_chain_order(const struct rte_crypto_sym_xform *xform)
80 return AESNI_MB_OP_NOT_SUPPORTED;
82 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
83 if (xform->next == NULL)
84 return AESNI_MB_OP_CIPHER_ONLY;
85 if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
86 return AESNI_MB_OP_CIPHER_HASH;
89 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
90 if (xform->next == NULL)
91 return AESNI_MB_OP_HASH_ONLY;
92 if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
93 return AESNI_MB_OP_HASH_CIPHER;
95 #if IMB_VERSION_NUM > IMB_VERSION(0, 52, 0)
96 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
97 if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
99 * CCM requires to hash first and cipher later
102 if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM)
103 return AESNI_MB_OP_AEAD_HASH_CIPHER;
105 return AESNI_MB_OP_AEAD_CIPHER_HASH;
107 if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM)
108 return AESNI_MB_OP_AEAD_CIPHER_HASH;
110 return AESNI_MB_OP_AEAD_HASH_CIPHER;
114 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
115 if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM ||
116 xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM) {
117 if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
118 return AESNI_MB_OP_AEAD_CIPHER_HASH;
120 return AESNI_MB_OP_AEAD_HASH_CIPHER;
125 return AESNI_MB_OP_NOT_SUPPORTED;
129 is_aead_algo(JOB_HASH_ALG hash_alg, JOB_CIPHER_MODE cipher_mode)
131 #if IMB_VERSION(0, 54, 3) <= IMB_VERSION_NUM
132 return (hash_alg == IMB_AUTH_CHACHA20_POLY1305 || hash_alg == AES_CCM ||
133 (hash_alg == AES_GMAC && cipher_mode == GCM));
135 return ((hash_alg == AES_GMAC && cipher_mode == GCM) ||
136 hash_alg == AES_CCM);
140 /** Set session authentication parameters */
142 aesni_mb_set_session_auth_parameters(const MB_MGR *mb_mgr,
143 struct aesni_mb_session *sess,
144 const struct rte_crypto_sym_xform *xform)
146 hash_one_block_t hash_oneblock_fn = NULL;
147 unsigned int key_larger_block_size = 0;
148 uint8_t hashed_key[HMAC_MAX_BLOCK_SIZE] = { 0 };
149 uint32_t auth_precompute = 1;
152 sess->auth.algo = NULL_HASH;
156 if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
157 AESNI_MB_LOG(ERR, "Crypto xform struct not of type auth");
161 /* Set IV parameters */
162 sess->auth_iv.offset = xform->auth.iv.offset;
163 sess->auth_iv.length = xform->auth.iv.length;
165 /* Set the request digest size */
166 sess->auth.req_digest_len = xform->auth.digest_length;
168 /* Select auth generate/verify */
169 sess->auth.operation = xform->auth.op;
171 /* Set Authentication Parameters */
172 if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_XCBC_MAC) {
173 sess->auth.algo = AES_XCBC;
175 uint16_t xcbc_mac_digest_len =
176 get_truncated_digest_byte_length(AES_XCBC);
177 if (sess->auth.req_digest_len != xcbc_mac_digest_len) {
178 AESNI_MB_LOG(ERR, "Invalid digest size\n");
181 sess->auth.gen_digest_len = sess->auth.req_digest_len;
183 IMB_AES_XCBC_KEYEXP(mb_mgr, xform->auth.key.data,
184 sess->auth.xcbc.k1_expanded,
185 sess->auth.xcbc.k2, sess->auth.xcbc.k3);
189 if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_CMAC) {
192 sess->auth.algo = AES_CMAC;
194 uint16_t cmac_digest_len = get_digest_byte_length(AES_CMAC);
196 if (sess->auth.req_digest_len > cmac_digest_len) {
197 AESNI_MB_LOG(ERR, "Invalid digest size\n");
201 * Multi-buffer lib supports digest sizes from 4 to 16 bytes
202 * in version 0.50 and sizes of 12 and 16 bytes,
204 * If size requested is different, generate the full digest
205 * (16 bytes) in a temporary location and then memcpy
206 * the requested number of bytes.
208 if (sess->auth.req_digest_len < 4)
209 sess->auth.gen_digest_len = cmac_digest_len;
211 sess->auth.gen_digest_len = sess->auth.req_digest_len;
213 IMB_AES_KEYEXP_128(mb_mgr, xform->auth.key.data,
214 sess->auth.cmac.expkey, dust);
215 IMB_AES_CMAC_SUBKEY_GEN_128(mb_mgr, sess->auth.cmac.expkey,
216 sess->auth.cmac.skey1, sess->auth.cmac.skey2);
220 if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
221 if (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) {
222 sess->cipher.direction = ENCRYPT;
223 sess->chain_order = CIPHER_HASH;
225 sess->cipher.direction = DECRYPT;
227 sess->auth.algo = AES_GMAC;
228 if (sess->auth.req_digest_len > get_digest_byte_length(AES_GMAC)) {
229 AESNI_MB_LOG(ERR, "Invalid digest size\n");
232 sess->auth.gen_digest_len = sess->auth.req_digest_len;
233 sess->iv.length = xform->auth.iv.length;
234 sess->iv.offset = xform->auth.iv.offset;
236 switch (xform->auth.key.length) {
238 IMB_AES128_GCM_PRE(mb_mgr, xform->auth.key.data,
239 &sess->cipher.gcm_key);
240 sess->cipher.key_length_in_bytes = AES_128_BYTES;
243 IMB_AES192_GCM_PRE(mb_mgr, xform->auth.key.data,
244 &sess->cipher.gcm_key);
245 sess->cipher.key_length_in_bytes = AES_192_BYTES;
248 IMB_AES256_GCM_PRE(mb_mgr, xform->auth.key.data,
249 &sess->cipher.gcm_key);
250 sess->cipher.key_length_in_bytes = AES_256_BYTES;
253 RTE_LOG(ERR, PMD, "failed to parse test type\n");
260 #if IMB_VERSION(0, 53, 3) <= IMB_VERSION_NUM
261 if (xform->auth.algo == RTE_CRYPTO_AUTH_ZUC_EIA3) {
262 sess->auth.algo = IMB_AUTH_ZUC_EIA3_BITLEN;
263 uint16_t zuc_eia3_digest_len =
264 get_truncated_digest_byte_length(IMB_AUTH_ZUC_EIA3_BITLEN);
265 if (sess->auth.req_digest_len != zuc_eia3_digest_len) {
266 AESNI_MB_LOG(ERR, "Invalid digest size\n");
269 sess->auth.gen_digest_len = sess->auth.req_digest_len;
271 memcpy(sess->auth.zuc_auth_key, xform->auth.key.data, 16);
273 } else if (xform->auth.algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
274 sess->auth.algo = IMB_AUTH_SNOW3G_UIA2_BITLEN;
275 uint16_t snow3g_uia2_digest_len =
276 get_truncated_digest_byte_length(IMB_AUTH_SNOW3G_UIA2_BITLEN);
277 if (sess->auth.req_digest_len != snow3g_uia2_digest_len) {
278 AESNI_MB_LOG(ERR, "Invalid digest size\n");
281 sess->auth.gen_digest_len = sess->auth.req_digest_len;
283 IMB_SNOW3G_INIT_KEY_SCHED(mb_mgr, xform->auth.key.data,
284 &sess->auth.pKeySched_snow3g_auth);
286 } else if (xform->auth.algo == RTE_CRYPTO_AUTH_KASUMI_F9) {
287 sess->auth.algo = IMB_AUTH_KASUMI_UIA1;
288 uint16_t kasumi_f9_digest_len =
289 get_truncated_digest_byte_length(IMB_AUTH_KASUMI_UIA1);
290 if (sess->auth.req_digest_len != kasumi_f9_digest_len) {
291 AESNI_MB_LOG(ERR, "Invalid digest size\n");
294 sess->auth.gen_digest_len = sess->auth.req_digest_len;
296 IMB_KASUMI_INIT_F9_KEY_SCHED(mb_mgr, xform->auth.key.data,
297 &sess->auth.pKeySched_kasumi_auth);
302 switch (xform->auth.algo) {
303 case RTE_CRYPTO_AUTH_MD5_HMAC:
304 sess->auth.algo = MD5;
305 hash_oneblock_fn = mb_mgr->md5_one_block;
307 case RTE_CRYPTO_AUTH_SHA1_HMAC:
308 sess->auth.algo = SHA1;
309 hash_oneblock_fn = mb_mgr->sha1_one_block;
310 if (xform->auth.key.length > get_auth_algo_blocksize(SHA1)) {
312 xform->auth.key.data,
313 xform->auth.key.length,
315 key_larger_block_size = 1;
318 case RTE_CRYPTO_AUTH_SHA1:
319 sess->auth.algo = PLAIN_SHA1;
322 case RTE_CRYPTO_AUTH_SHA224_HMAC:
323 sess->auth.algo = SHA_224;
324 hash_oneblock_fn = mb_mgr->sha224_one_block;
325 if (xform->auth.key.length > get_auth_algo_blocksize(SHA_224)) {
327 xform->auth.key.data,
328 xform->auth.key.length,
330 key_larger_block_size = 1;
333 case RTE_CRYPTO_AUTH_SHA224:
334 sess->auth.algo = PLAIN_SHA_224;
337 case RTE_CRYPTO_AUTH_SHA256_HMAC:
338 sess->auth.algo = SHA_256;
339 hash_oneblock_fn = mb_mgr->sha256_one_block;
340 if (xform->auth.key.length > get_auth_algo_blocksize(SHA_256)) {
342 xform->auth.key.data,
343 xform->auth.key.length,
345 key_larger_block_size = 1;
348 case RTE_CRYPTO_AUTH_SHA256:
349 sess->auth.algo = PLAIN_SHA_256;
352 case RTE_CRYPTO_AUTH_SHA384_HMAC:
353 sess->auth.algo = SHA_384;
354 hash_oneblock_fn = mb_mgr->sha384_one_block;
355 if (xform->auth.key.length > get_auth_algo_blocksize(SHA_384)) {
357 xform->auth.key.data,
358 xform->auth.key.length,
360 key_larger_block_size = 1;
363 case RTE_CRYPTO_AUTH_SHA384:
364 sess->auth.algo = PLAIN_SHA_384;
367 case RTE_CRYPTO_AUTH_SHA512_HMAC:
368 sess->auth.algo = SHA_512;
369 hash_oneblock_fn = mb_mgr->sha512_one_block;
370 if (xform->auth.key.length > get_auth_algo_blocksize(SHA_512)) {
372 xform->auth.key.data,
373 xform->auth.key.length,
375 key_larger_block_size = 1;
378 case RTE_CRYPTO_AUTH_SHA512:
379 sess->auth.algo = PLAIN_SHA_512;
383 AESNI_MB_LOG(ERR, "Unsupported authentication algorithm selection");
386 uint16_t trunc_digest_size =
387 get_truncated_digest_byte_length(sess->auth.algo);
388 uint16_t full_digest_size =
389 get_digest_byte_length(sess->auth.algo);
391 if (sess->auth.req_digest_len > full_digest_size ||
392 sess->auth.req_digest_len == 0) {
393 AESNI_MB_LOG(ERR, "Invalid digest size\n");
397 if (sess->auth.req_digest_len != trunc_digest_size &&
398 sess->auth.req_digest_len != full_digest_size)
399 sess->auth.gen_digest_len = full_digest_size;
401 sess->auth.gen_digest_len = sess->auth.req_digest_len;
403 /* Plain SHA does not require precompute key */
404 if (auth_precompute == 0)
407 /* Calculate Authentication precomputes */
408 if (key_larger_block_size) {
409 calculate_auth_precomputes(hash_oneblock_fn,
410 sess->auth.pads.inner, sess->auth.pads.outer,
412 xform->auth.key.length,
413 get_auth_algo_blocksize(sess->auth.algo));
415 calculate_auth_precomputes(hash_oneblock_fn,
416 sess->auth.pads.inner, sess->auth.pads.outer,
417 xform->auth.key.data,
418 xform->auth.key.length,
419 get_auth_algo_blocksize(sess->auth.algo));
425 /** Set session cipher parameters */
427 aesni_mb_set_session_cipher_parameters(const MB_MGR *mb_mgr,
428 struct aesni_mb_session *sess,
429 const struct rte_crypto_sym_xform *xform)
433 uint8_t is_docsis = 0;
434 #if IMB_VERSION(0, 53, 3) <= IMB_VERSION_NUM
436 uint8_t is_snow3g = 0;
437 uint8_t is_kasumi = 0;
441 sess->cipher.mode = NULL_CIPHER;
445 if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
446 AESNI_MB_LOG(ERR, "Crypto xform struct not of type cipher");
450 /* Select cipher direction */
451 switch (xform->cipher.op) {
452 case RTE_CRYPTO_CIPHER_OP_ENCRYPT:
453 sess->cipher.direction = ENCRYPT;
455 case RTE_CRYPTO_CIPHER_OP_DECRYPT:
456 sess->cipher.direction = DECRYPT;
459 AESNI_MB_LOG(ERR, "Invalid cipher operation parameter");
463 /* Select cipher mode */
464 switch (xform->cipher.algo) {
465 case RTE_CRYPTO_CIPHER_AES_CBC:
466 sess->cipher.mode = CBC;
469 case RTE_CRYPTO_CIPHER_AES_CTR:
470 sess->cipher.mode = CNTR;
473 case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
474 sess->cipher.mode = DOCSIS_SEC_BPI;
477 case RTE_CRYPTO_CIPHER_DES_CBC:
478 sess->cipher.mode = DES;
480 case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
481 sess->cipher.mode = DOCSIS_DES;
483 case RTE_CRYPTO_CIPHER_3DES_CBC:
484 sess->cipher.mode = DES3;
487 #if IMB_VERSION(0, 53, 0) <= IMB_VERSION_NUM
488 case RTE_CRYPTO_CIPHER_AES_ECB:
489 sess->cipher.mode = ECB;
493 #if IMB_VERSION(0, 53, 3) <= IMB_VERSION_NUM
494 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
495 sess->cipher.mode = IMB_CIPHER_ZUC_EEA3;
498 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
499 sess->cipher.mode = IMB_CIPHER_SNOW3G_UEA2_BITLEN;
502 case RTE_CRYPTO_CIPHER_KASUMI_F8:
503 sess->cipher.mode = IMB_CIPHER_KASUMI_UEA1_BITLEN;
508 AESNI_MB_LOG(ERR, "Unsupported cipher mode parameter");
512 /* Set IV parameters */
513 sess->iv.offset = xform->cipher.iv.offset;
514 sess->iv.length = xform->cipher.iv.length;
516 /* Check key length and choose key expansion function for AES */
518 switch (xform->cipher.key.length) {
520 sess->cipher.key_length_in_bytes = AES_128_BYTES;
521 IMB_AES_KEYEXP_128(mb_mgr, xform->cipher.key.data,
522 sess->cipher.expanded_aes_keys.encode,
523 sess->cipher.expanded_aes_keys.decode);
526 sess->cipher.key_length_in_bytes = AES_192_BYTES;
527 IMB_AES_KEYEXP_192(mb_mgr, xform->cipher.key.data,
528 sess->cipher.expanded_aes_keys.encode,
529 sess->cipher.expanded_aes_keys.decode);
532 sess->cipher.key_length_in_bytes = AES_256_BYTES;
533 IMB_AES_KEYEXP_256(mb_mgr, xform->cipher.key.data,
534 sess->cipher.expanded_aes_keys.encode,
535 sess->cipher.expanded_aes_keys.decode);
538 AESNI_MB_LOG(ERR, "Invalid cipher key length");
541 } else if (is_docsis) {
542 switch (xform->cipher.key.length) {
544 sess->cipher.key_length_in_bytes = AES_128_BYTES;
545 IMB_AES_KEYEXP_128(mb_mgr, xform->cipher.key.data,
546 sess->cipher.expanded_aes_keys.encode,
547 sess->cipher.expanded_aes_keys.decode);
549 #if IMB_VERSION(0, 53, 3) <= IMB_VERSION_NUM
551 sess->cipher.key_length_in_bytes = AES_256_BYTES;
552 IMB_AES_KEYEXP_256(mb_mgr, xform->cipher.key.data,
553 sess->cipher.expanded_aes_keys.encode,
554 sess->cipher.expanded_aes_keys.decode);
558 AESNI_MB_LOG(ERR, "Invalid cipher key length");
561 } else if (is_3DES) {
562 uint64_t *keys[3] = {sess->cipher.exp_3des_keys.key[0],
563 sess->cipher.exp_3des_keys.key[1],
564 sess->cipher.exp_3des_keys.key[2]};
566 switch (xform->cipher.key.length) {
568 IMB_DES_KEYSCHED(mb_mgr, keys[0],
569 xform->cipher.key.data);
570 IMB_DES_KEYSCHED(mb_mgr, keys[1],
571 xform->cipher.key.data + 8);
572 IMB_DES_KEYSCHED(mb_mgr, keys[2],
573 xform->cipher.key.data + 16);
575 /* Initialize keys - 24 bytes: [K1-K2-K3] */
576 sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
577 sess->cipher.exp_3des_keys.ks_ptr[1] = keys[1];
578 sess->cipher.exp_3des_keys.ks_ptr[2] = keys[2];
581 IMB_DES_KEYSCHED(mb_mgr, keys[0],
582 xform->cipher.key.data);
583 IMB_DES_KEYSCHED(mb_mgr, keys[1],
584 xform->cipher.key.data + 8);
585 /* Initialize keys - 16 bytes: [K1=K1,K2=K2,K3=K1] */
586 sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
587 sess->cipher.exp_3des_keys.ks_ptr[1] = keys[1];
588 sess->cipher.exp_3des_keys.ks_ptr[2] = keys[0];
591 IMB_DES_KEYSCHED(mb_mgr, keys[0],
592 xform->cipher.key.data);
594 /* Initialize keys - 8 bytes: [K1 = K2 = K3] */
595 sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
596 sess->cipher.exp_3des_keys.ks_ptr[1] = keys[0];
597 sess->cipher.exp_3des_keys.ks_ptr[2] = keys[0];
600 AESNI_MB_LOG(ERR, "Invalid cipher key length");
604 sess->cipher.key_length_in_bytes = 24;
605 #if IMB_VERSION(0, 53, 3) <= IMB_VERSION_NUM
607 if (xform->cipher.key.length != 16) {
608 AESNI_MB_LOG(ERR, "Invalid cipher key length");
611 sess->cipher.key_length_in_bytes = 16;
612 memcpy(sess->cipher.zuc_cipher_key, xform->cipher.key.data,
614 } else if (is_snow3g) {
615 if (xform->cipher.key.length != 16) {
616 AESNI_MB_LOG(ERR, "Invalid cipher key length");
619 sess->cipher.key_length_in_bytes = 16;
620 IMB_SNOW3G_INIT_KEY_SCHED(mb_mgr, xform->cipher.key.data,
621 &sess->cipher.pKeySched_snow3g_cipher);
622 } else if (is_kasumi) {
623 if (xform->cipher.key.length != 16) {
624 AESNI_MB_LOG(ERR, "Invalid cipher key length");
627 sess->cipher.key_length_in_bytes = 16;
628 IMB_KASUMI_INIT_F8_KEY_SCHED(mb_mgr, xform->cipher.key.data,
629 &sess->cipher.pKeySched_kasumi_cipher);
632 if (xform->cipher.key.length != 8) {
633 AESNI_MB_LOG(ERR, "Invalid cipher key length");
636 sess->cipher.key_length_in_bytes = 8;
638 IMB_DES_KEYSCHED(mb_mgr,
639 (uint64_t *)sess->cipher.expanded_aes_keys.encode,
640 xform->cipher.key.data);
641 IMB_DES_KEYSCHED(mb_mgr,
642 (uint64_t *)sess->cipher.expanded_aes_keys.decode,
643 xform->cipher.key.data);
650 aesni_mb_set_session_aead_parameters(const MB_MGR *mb_mgr,
651 struct aesni_mb_session *sess,
652 const struct rte_crypto_sym_xform *xform)
654 switch (xform->aead.op) {
655 case RTE_CRYPTO_AEAD_OP_ENCRYPT:
656 sess->cipher.direction = ENCRYPT;
657 sess->auth.operation = RTE_CRYPTO_AUTH_OP_GENERATE;
659 case RTE_CRYPTO_AEAD_OP_DECRYPT:
660 sess->cipher.direction = DECRYPT;
661 sess->auth.operation = RTE_CRYPTO_AUTH_OP_VERIFY;
664 AESNI_MB_LOG(ERR, "Invalid aead operation parameter");
668 /* Set IV parameters */
669 sess->iv.offset = xform->aead.iv.offset;
670 sess->iv.length = xform->aead.iv.length;
672 /* Set digest sizes */
673 sess->auth.req_digest_len = xform->aead.digest_length;
674 sess->auth.gen_digest_len = sess->auth.req_digest_len;
676 switch (xform->aead.algo) {
677 case RTE_CRYPTO_AEAD_AES_CCM:
678 sess->cipher.mode = CCM;
679 sess->auth.algo = AES_CCM;
681 /* Check key length and choose key expansion function for AES */
682 switch (xform->aead.key.length) {
684 sess->cipher.key_length_in_bytes = AES_128_BYTES;
685 IMB_AES_KEYEXP_128(mb_mgr, xform->aead.key.data,
686 sess->cipher.expanded_aes_keys.encode,
687 sess->cipher.expanded_aes_keys.decode);
690 sess->cipher.key_length_in_bytes = AES_256_BYTES;
691 IMB_AES_KEYEXP_256(mb_mgr, xform->aead.key.data,
692 sess->cipher.expanded_aes_keys.encode,
693 sess->cipher.expanded_aes_keys.decode);
696 AESNI_MB_LOG(ERR, "Invalid cipher key length");
700 /* CCM digests must be between 4 and 16 and an even number */
701 if (sess->auth.req_digest_len < AES_CCM_DIGEST_MIN_LEN ||
702 sess->auth.req_digest_len > AES_CCM_DIGEST_MAX_LEN ||
703 (sess->auth.req_digest_len & 1) == 1) {
704 AESNI_MB_LOG(ERR, "Invalid digest size\n");
709 case RTE_CRYPTO_AEAD_AES_GCM:
710 sess->cipher.mode = GCM;
711 sess->auth.algo = AES_GMAC;
713 switch (xform->aead.key.length) {
715 sess->cipher.key_length_in_bytes = AES_128_BYTES;
716 IMB_AES128_GCM_PRE(mb_mgr, xform->aead.key.data,
717 &sess->cipher.gcm_key);
720 sess->cipher.key_length_in_bytes = AES_192_BYTES;
721 IMB_AES192_GCM_PRE(mb_mgr, xform->aead.key.data,
722 &sess->cipher.gcm_key);
725 sess->cipher.key_length_in_bytes = AES_256_BYTES;
726 IMB_AES256_GCM_PRE(mb_mgr, xform->aead.key.data,
727 &sess->cipher.gcm_key);
730 AESNI_MB_LOG(ERR, "Invalid cipher key length");
734 /* GCM digest size must be between 1 and 16 */
735 if (sess->auth.req_digest_len == 0 ||
736 sess->auth.req_digest_len > 16) {
737 AESNI_MB_LOG(ERR, "Invalid digest size\n");
742 #if IMB_VERSION(0, 54, 3) <= IMB_VERSION_NUM
743 case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
744 sess->cipher.mode = IMB_CIPHER_CHACHA20_POLY1305;
745 sess->auth.algo = IMB_AUTH_CHACHA20_POLY1305;
747 if (xform->aead.key.length != 32) {
748 AESNI_MB_LOG(ERR, "Invalid key length");
751 sess->cipher.key_length_in_bytes = 32;
752 memcpy(sess->cipher.expanded_aes_keys.encode,
753 xform->aead.key.data, 32);
754 if (sess->auth.req_digest_len != 16) {
755 AESNI_MB_LOG(ERR, "Invalid digest size\n");
761 AESNI_MB_LOG(ERR, "Unsupported aead mode parameter");
768 /** Parse crypto xform chain and set private session parameters */
770 aesni_mb_set_session_parameters(const MB_MGR *mb_mgr,
771 struct aesni_mb_session *sess,
772 const struct rte_crypto_sym_xform *xform)
774 const struct rte_crypto_sym_xform *auth_xform = NULL;
775 const struct rte_crypto_sym_xform *cipher_xform = NULL;
776 const struct rte_crypto_sym_xform *aead_xform = NULL;
779 /* Select Crypto operation - hash then cipher / cipher then hash */
780 switch (aesni_mb_get_chain_order(xform)) {
781 case AESNI_MB_OP_HASH_CIPHER:
782 sess->chain_order = HASH_CIPHER;
784 cipher_xform = xform->next;
786 case AESNI_MB_OP_CIPHER_HASH:
787 sess->chain_order = CIPHER_HASH;
788 auth_xform = xform->next;
789 cipher_xform = xform;
791 case AESNI_MB_OP_HASH_ONLY:
792 sess->chain_order = HASH_CIPHER;
796 case AESNI_MB_OP_CIPHER_ONLY:
798 * Multi buffer library operates only at two modes,
799 * CIPHER_HASH and HASH_CIPHER. When doing ciphering only,
800 * chain order depends on cipher operation: encryption is always
801 * the first operation and decryption the last one.
803 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
804 sess->chain_order = CIPHER_HASH;
806 sess->chain_order = HASH_CIPHER;
808 cipher_xform = xform;
810 case AESNI_MB_OP_AEAD_CIPHER_HASH:
811 sess->chain_order = CIPHER_HASH;
812 sess->aead.aad_len = xform->aead.aad_length;
815 case AESNI_MB_OP_AEAD_HASH_CIPHER:
816 sess->chain_order = HASH_CIPHER;
817 sess->aead.aad_len = xform->aead.aad_length;
820 case AESNI_MB_OP_NOT_SUPPORTED:
822 AESNI_MB_LOG(ERR, "Unsupported operation chain order parameter");
826 /* Default IV length = 0 */
828 sess->auth_iv.length = 0;
830 ret = aesni_mb_set_session_auth_parameters(mb_mgr, sess, auth_xform);
832 AESNI_MB_LOG(ERR, "Invalid/unsupported authentication parameters");
836 ret = aesni_mb_set_session_cipher_parameters(mb_mgr, sess,
839 AESNI_MB_LOG(ERR, "Invalid/unsupported cipher parameters");
844 ret = aesni_mb_set_session_aead_parameters(mb_mgr, sess,
847 AESNI_MB_LOG(ERR, "Invalid/unsupported aead parameters");
855 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
856 /** Check DOCSIS security session configuration is valid */
858 check_docsis_sec_session(struct rte_security_session_conf *conf)
860 struct rte_crypto_sym_xform *crypto_sym = conf->crypto_xform;
861 struct rte_security_docsis_xform *docsis = &conf->docsis;
863 /* Downlink: CRC generate -> Cipher encrypt */
864 if (docsis->direction == RTE_SECURITY_DOCSIS_DOWNLINK) {
866 if (crypto_sym != NULL &&
867 crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
868 crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
869 crypto_sym->cipher.algo ==
870 RTE_CRYPTO_CIPHER_AES_DOCSISBPI &&
871 (crypto_sym->cipher.key.length == IMB_KEY_AES_128_BYTES ||
872 crypto_sym->cipher.key.length == IMB_KEY_AES_256_BYTES) &&
873 crypto_sym->cipher.iv.length == AES_BLOCK_SIZE &&
874 crypto_sym->next == NULL) {
877 /* Uplink: Cipher decrypt -> CRC verify */
878 } else if (docsis->direction == RTE_SECURITY_DOCSIS_UPLINK) {
880 if (crypto_sym != NULL &&
881 crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
882 crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
883 crypto_sym->cipher.algo ==
884 RTE_CRYPTO_CIPHER_AES_DOCSISBPI &&
885 (crypto_sym->cipher.key.length == IMB_KEY_AES_128_BYTES ||
886 crypto_sym->cipher.key.length == IMB_KEY_AES_256_BYTES) &&
887 crypto_sym->cipher.iv.length == AES_BLOCK_SIZE &&
888 crypto_sym->next == NULL) {
896 /** Set DOCSIS security session auth (CRC) parameters */
898 aesni_mb_set_docsis_sec_session_auth_parameters(struct aesni_mb_session *sess,
899 struct rte_security_docsis_xform *xform)
902 AESNI_MB_LOG(ERR, "Invalid DOCSIS xform");
906 /* Select CRC generate/verify */
907 if (xform->direction == RTE_SECURITY_DOCSIS_UPLINK) {
908 sess->auth.algo = IMB_AUTH_DOCSIS_CRC32;
909 sess->auth.operation = RTE_CRYPTO_AUTH_OP_VERIFY;
910 } else if (xform->direction == RTE_SECURITY_DOCSIS_DOWNLINK) {
911 sess->auth.algo = IMB_AUTH_DOCSIS_CRC32;
912 sess->auth.operation = RTE_CRYPTO_AUTH_OP_GENERATE;
914 AESNI_MB_LOG(ERR, "Unsupported DOCSIS direction");
918 sess->auth.req_digest_len = RTE_ETHER_CRC_LEN;
919 sess->auth.gen_digest_len = RTE_ETHER_CRC_LEN;
925 * Parse DOCSIS security session configuration and set private session
929 aesni_mb_set_docsis_sec_session_parameters(
930 __rte_unused struct rte_cryptodev *dev,
931 struct rte_security_session_conf *conf,
934 struct rte_security_docsis_xform *docsis_xform;
935 struct rte_crypto_sym_xform *cipher_xform;
936 struct aesni_mb_session *aesni_sess = sess;
937 struct aesni_mb_private *internals = dev->data->dev_private;
940 ret = check_docsis_sec_session(conf);
942 AESNI_MB_LOG(ERR, "Unsupported DOCSIS security configuration");
946 switch (conf->docsis.direction) {
947 case RTE_SECURITY_DOCSIS_UPLINK:
948 aesni_sess->chain_order = IMB_ORDER_CIPHER_HASH;
949 docsis_xform = &conf->docsis;
950 cipher_xform = conf->crypto_xform;
952 case RTE_SECURITY_DOCSIS_DOWNLINK:
953 aesni_sess->chain_order = IMB_ORDER_HASH_CIPHER;
954 cipher_xform = conf->crypto_xform;
955 docsis_xform = &conf->docsis;
961 /* Default IV length = 0 */
962 aesni_sess->iv.length = 0;
964 ret = aesni_mb_set_docsis_sec_session_auth_parameters(aesni_sess,
967 AESNI_MB_LOG(ERR, "Invalid/unsupported DOCSIS parameters");
971 ret = aesni_mb_set_session_cipher_parameters(internals->mb_mgr,
972 aesni_sess, cipher_xform);
975 AESNI_MB_LOG(ERR, "Invalid/unsupported cipher parameters");
984 * burst enqueue, place crypto operations on ingress queue for processing.
986 * @param __qp Queue Pair to process
987 * @param ops Crypto operations for processing
988 * @param nb_ops Number of crypto operations for processing
991 * - Number of crypto operations enqueued
994 aesni_mb_pmd_enqueue_burst(void *__qp, struct rte_crypto_op **ops,
997 struct aesni_mb_qp *qp = __qp;
999 unsigned int nb_enqueued;
1001 nb_enqueued = rte_ring_enqueue_burst(qp->ingress_queue,
1002 (void **)ops, nb_ops, NULL);
1004 qp->stats.enqueued_count += nb_enqueued;
1009 /** Get multi buffer session */
1010 static inline struct aesni_mb_session *
1011 get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op)
1013 struct aesni_mb_session *sess = NULL;
1015 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
1016 if (likely(op->sym->session != NULL))
1017 sess = (struct aesni_mb_session *)
1018 get_sym_session_private_data(
1020 cryptodev_driver_id);
1021 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
1022 } else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
1023 if (likely(op->sym->sec_session != NULL))
1024 sess = (struct aesni_mb_session *)
1025 get_sec_session_private_data(
1026 op->sym->sec_session);
1029 void *_sess = rte_cryptodev_sym_session_create(qp->sess_mp);
1030 void *_sess_private_data = NULL;
1035 if (rte_mempool_get(qp->sess_mp_priv,
1036 (void **)&_sess_private_data))
1039 sess = (struct aesni_mb_session *)_sess_private_data;
1041 if (unlikely(aesni_mb_set_session_parameters(qp->mb_mgr,
1042 sess, op->sym->xform) != 0)) {
1043 rte_mempool_put(qp->sess_mp, _sess);
1044 rte_mempool_put(qp->sess_mp_priv, _sess_private_data);
1047 op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
1048 set_sym_session_private_data(op->sym->session,
1049 cryptodev_driver_id, _sess_private_data);
1052 if (unlikely(sess == NULL))
1053 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
1058 static inline uint64_t
1059 auth_start_offset(struct rte_crypto_op *op, struct aesni_mb_session *session,
1062 struct rte_mbuf *m_src, *m_dst;
1063 uint8_t *p_src, *p_dst;
1064 uintptr_t u_src, u_dst;
1065 uint32_t cipher_end, auth_end;
1067 /* Only cipher then hash needs special calculation. */
1068 if (!oop || session->chain_order != CIPHER_HASH)
1069 return op->sym->auth.data.offset;
1071 m_src = op->sym->m_src;
1072 m_dst = op->sym->m_dst;
1074 p_src = rte_pktmbuf_mtod(m_src, uint8_t *);
1075 p_dst = rte_pktmbuf_mtod(m_dst, uint8_t *);
1076 u_src = (uintptr_t)p_src;
1077 u_dst = (uintptr_t)p_dst + op->sym->auth.data.offset;
1080 * Copy the content between cipher offset and auth offset for generating
1083 if (op->sym->cipher.data.offset > op->sym->auth.data.offset)
1084 memcpy(p_dst + op->sym->auth.data.offset,
1085 p_src + op->sym->auth.data.offset,
1086 op->sym->cipher.data.offset -
1087 op->sym->auth.data.offset);
1090 * Copy the content between (cipher offset + length) and (auth offset +
1091 * length) for generating correct digest
1093 cipher_end = op->sym->cipher.data.offset + op->sym->cipher.data.length;
1094 auth_end = op->sym->auth.data.offset + op->sym->auth.data.length;
1095 if (cipher_end < auth_end)
1096 memcpy(p_dst + cipher_end, p_src + cipher_end,
1097 auth_end - cipher_end);
1100 * Since intel-ipsec-mb only supports positive values,
1101 * we need to deduct the correct offset between src and dst.
1104 return u_src < u_dst ? (u_dst - u_src) :
1105 (UINT64_MAX - u_src + u_dst + 1);
1109 set_cpu_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_session *session,
1110 union rte_crypto_sym_ofs sofs, void *buf, uint32_t len,
1111 struct rte_crypto_va_iova_ptr *iv,
1112 struct rte_crypto_va_iova_ptr *aad, void *digest, void *udata)
1114 /* Set crypto operation */
1115 job->chain_order = session->chain_order;
1117 /* Set cipher parameters */
1118 job->cipher_direction = session->cipher.direction;
1119 job->cipher_mode = session->cipher.mode;
1121 job->aes_key_len_in_bytes = session->cipher.key_length_in_bytes;
1123 /* Set authentication parameters */
1124 job->hash_alg = session->auth.algo;
1127 switch (job->hash_alg) {
1129 job->u.XCBC._k1_expanded = session->auth.xcbc.k1_expanded;
1130 job->u.XCBC._k2 = session->auth.xcbc.k2;
1131 job->u.XCBC._k3 = session->auth.xcbc.k3;
1133 job->aes_enc_key_expanded =
1134 session->cipher.expanded_aes_keys.encode;
1135 job->aes_dec_key_expanded =
1136 session->cipher.expanded_aes_keys.decode;
1140 job->u.CCM.aad = (uint8_t *)aad->va + 18;
1141 job->u.CCM.aad_len_in_bytes = session->aead.aad_len;
1142 job->aes_enc_key_expanded =
1143 session->cipher.expanded_aes_keys.encode;
1144 job->aes_dec_key_expanded =
1145 session->cipher.expanded_aes_keys.decode;
1150 job->u.CMAC._key_expanded = session->auth.cmac.expkey;
1151 job->u.CMAC._skey1 = session->auth.cmac.skey1;
1152 job->u.CMAC._skey2 = session->auth.cmac.skey2;
1153 job->aes_enc_key_expanded =
1154 session->cipher.expanded_aes_keys.encode;
1155 job->aes_dec_key_expanded =
1156 session->cipher.expanded_aes_keys.decode;
1160 if (session->cipher.mode == GCM) {
1161 job->u.GCM.aad = aad->va;
1162 job->u.GCM.aad_len_in_bytes = session->aead.aad_len;
1165 job->u.GCM.aad = buf;
1166 job->u.GCM.aad_len_in_bytes = len;
1167 job->cipher_mode = GCM;
1169 job->aes_enc_key_expanded = &session->cipher.gcm_key;
1170 job->aes_dec_key_expanded = &session->cipher.gcm_key;
1173 #if IMB_VERSION(0, 54, 3) <= IMB_VERSION_NUM
1174 case IMB_AUTH_CHACHA20_POLY1305:
1175 job->u.CHACHA20_POLY1305.aad = aad->va;
1176 job->u.CHACHA20_POLY1305.aad_len_in_bytes = session->aead.aad_len;
1177 job->aes_enc_key_expanded = session->cipher.expanded_aes_keys.encode;
1178 job->aes_dec_key_expanded = session->cipher.expanded_aes_keys.encode;
1182 job->u.HMAC._hashed_auth_key_xor_ipad =
1183 session->auth.pads.inner;
1184 job->u.HMAC._hashed_auth_key_xor_opad =
1185 session->auth.pads.outer;
1187 if (job->cipher_mode == DES3) {
1188 job->aes_enc_key_expanded =
1189 session->cipher.exp_3des_keys.ks_ptr;
1190 job->aes_dec_key_expanded =
1191 session->cipher.exp_3des_keys.ks_ptr;
1193 job->aes_enc_key_expanded =
1194 session->cipher.expanded_aes_keys.encode;
1195 job->aes_dec_key_expanded =
1196 session->cipher.expanded_aes_keys.decode;
1201 * Multi-buffer library current only support returning a truncated
1202 * digest length as specified in the relevant IPsec RFCs
1205 /* Set digest location and length */
1206 job->auth_tag_output = digest;
1207 job->auth_tag_output_len_in_bytes = session->auth.gen_digest_len;
1209 /* Set IV parameters */
1210 job->iv_len_in_bytes = session->iv.length;
1212 /* Data Parameters */
1214 job->dst = (uint8_t *)buf + sofs.ofs.cipher.head;
1215 job->cipher_start_src_offset_in_bytes = sofs.ofs.cipher.head;
1216 job->hash_start_src_offset_in_bytes = sofs.ofs.auth.head;
1217 if (job->hash_alg == AES_GMAC && session->cipher.mode != GCM) {
1218 job->msg_len_to_hash_in_bytes = 0;
1219 job->msg_len_to_cipher_in_bytes = 0;
1221 job->msg_len_to_hash_in_bytes = len - sofs.ofs.auth.head -
1223 job->msg_len_to_cipher_in_bytes = len - sofs.ofs.cipher.head -
1224 sofs.ofs.cipher.tail;
1227 job->user_data = udata;
1231 * Process a crypto operation and complete a JOB_AES_HMAC job structure for
1232 * submission to the multi buffer library for processing.
1234 * @param qp queue pair
1235 * @param job JOB_AES_HMAC structure to fill
1236 * @param m mbuf to process
1239 * - Completed JOB_AES_HMAC structure pointer on success
1240 * - NULL pointer if completion of JOB_AES_HMAC structure isn't possible
1243 set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
1244 struct rte_crypto_op *op, uint8_t *digest_idx)
1246 struct rte_mbuf *m_src = op->sym->m_src, *m_dst;
1247 struct aesni_mb_session *session;
1248 uint32_t m_offset, oop;
1250 session = get_session(qp, op);
1251 if (session == NULL) {
1252 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
1256 /* Set crypto operation */
1257 job->chain_order = session->chain_order;
1259 /* Set cipher parameters */
1260 job->cipher_direction = session->cipher.direction;
1261 job->cipher_mode = session->cipher.mode;
1263 job->aes_key_len_in_bytes = session->cipher.key_length_in_bytes;
1265 /* Set authentication parameters */
1266 job->hash_alg = session->auth.algo;
1268 const int aead = is_aead_algo(job->hash_alg, job->cipher_mode);
1270 switch (job->hash_alg) {
1272 job->u.XCBC._k1_expanded = session->auth.xcbc.k1_expanded;
1273 job->u.XCBC._k2 = session->auth.xcbc.k2;
1274 job->u.XCBC._k3 = session->auth.xcbc.k3;
1276 job->aes_enc_key_expanded =
1277 session->cipher.expanded_aes_keys.encode;
1278 job->aes_dec_key_expanded =
1279 session->cipher.expanded_aes_keys.decode;
1283 job->u.CCM.aad = op->sym->aead.aad.data + 18;
1284 job->u.CCM.aad_len_in_bytes = session->aead.aad_len;
1285 job->aes_enc_key_expanded =
1286 session->cipher.expanded_aes_keys.encode;
1287 job->aes_dec_key_expanded =
1288 session->cipher.expanded_aes_keys.decode;
1292 job->u.CMAC._key_expanded = session->auth.cmac.expkey;
1293 job->u.CMAC._skey1 = session->auth.cmac.skey1;
1294 job->u.CMAC._skey2 = session->auth.cmac.skey2;
1295 job->aes_enc_key_expanded =
1296 session->cipher.expanded_aes_keys.encode;
1297 job->aes_dec_key_expanded =
1298 session->cipher.expanded_aes_keys.decode;
1302 if (session->cipher.mode == GCM) {
1303 job->u.GCM.aad = op->sym->aead.aad.data;
1304 job->u.GCM.aad_len_in_bytes = session->aead.aad_len;
1307 job->u.GCM.aad = rte_pktmbuf_mtod_offset(m_src,
1308 uint8_t *, op->sym->auth.data.offset);
1309 job->u.GCM.aad_len_in_bytes = op->sym->auth.data.length;
1310 job->cipher_mode = GCM;
1312 job->aes_enc_key_expanded = &session->cipher.gcm_key;
1313 job->aes_dec_key_expanded = &session->cipher.gcm_key;
1315 #if IMB_VERSION(0, 53, 3) <= IMB_VERSION_NUM
1316 case IMB_AUTH_ZUC_EIA3_BITLEN:
1317 job->u.ZUC_EIA3._key = session->auth.zuc_auth_key;
1318 job->u.ZUC_EIA3._iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1319 session->auth_iv.offset);
1321 case IMB_AUTH_SNOW3G_UIA2_BITLEN:
1322 job->u.SNOW3G_UIA2._key = (void *) &session->auth.pKeySched_snow3g_auth;
1323 job->u.SNOW3G_UIA2._iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1324 session->auth_iv.offset);
1326 case IMB_AUTH_KASUMI_UIA1:
1327 job->u.KASUMI_UIA1._key = (void *) &session->auth.pKeySched_kasumi_auth;
1330 #if IMB_VERSION(0, 54, 3) <= IMB_VERSION_NUM
1331 case IMB_AUTH_CHACHA20_POLY1305:
1332 job->u.CHACHA20_POLY1305.aad = op->sym->aead.aad.data;
1333 job->u.CHACHA20_POLY1305.aad_len_in_bytes = session->aead.aad_len;
1334 job->aes_enc_key_expanded = session->cipher.expanded_aes_keys.encode;
1335 job->aes_dec_key_expanded = session->cipher.expanded_aes_keys.encode;
1339 job->u.HMAC._hashed_auth_key_xor_ipad = session->auth.pads.inner;
1340 job->u.HMAC._hashed_auth_key_xor_opad = session->auth.pads.outer;
1342 if (job->cipher_mode == DES3) {
1343 job->aes_enc_key_expanded =
1344 session->cipher.exp_3des_keys.ks_ptr;
1345 job->aes_dec_key_expanded =
1346 session->cipher.exp_3des_keys.ks_ptr;
1348 job->aes_enc_key_expanded =
1349 session->cipher.expanded_aes_keys.encode;
1350 job->aes_dec_key_expanded =
1351 session->cipher.expanded_aes_keys.decode;
1356 m_offset = op->sym->aead.data.offset;
1358 m_offset = op->sym->cipher.data.offset;
1360 #if IMB_VERSION(0, 53, 3) <= IMB_VERSION_NUM
1361 if (job->cipher_mode == IMB_CIPHER_ZUC_EEA3) {
1362 job->aes_enc_key_expanded = session->cipher.zuc_cipher_key;
1363 job->aes_dec_key_expanded = session->cipher.zuc_cipher_key;
1364 } else if (job->cipher_mode == IMB_CIPHER_SNOW3G_UEA2_BITLEN) {
1365 job->enc_keys = &session->cipher.pKeySched_snow3g_cipher;
1367 } else if (job->cipher_mode == IMB_CIPHER_KASUMI_UEA1_BITLEN) {
1368 job->enc_keys = &session->cipher.pKeySched_kasumi_cipher;
1373 if (!op->sym->m_dst) {
1374 /* in-place operation */
1377 } else if (op->sym->m_dst == op->sym->m_src) {
1378 /* in-place operation */
1382 /* out-of-place operation */
1383 m_dst = op->sym->m_dst;
1387 /* Set digest output location */
1388 if (job->hash_alg != NULL_HASH &&
1389 session->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
1390 job->auth_tag_output = qp->temp_digests[*digest_idx];
1391 *digest_idx = (*digest_idx + 1) % MAX_JOBS;
1394 job->auth_tag_output = op->sym->aead.digest.data;
1396 job->auth_tag_output = op->sym->auth.digest.data;
1398 if (session->auth.req_digest_len != session->auth.gen_digest_len) {
1399 job->auth_tag_output = qp->temp_digests[*digest_idx];
1400 *digest_idx = (*digest_idx + 1) % MAX_JOBS;
1404 * Multi-buffer library current only support returning a truncated
1405 * digest length as specified in the relevant IPsec RFCs
1408 /* Set digest length */
1409 job->auth_tag_output_len_in_bytes = session->auth.gen_digest_len;
1411 /* Set IV parameters */
1412 job->iv_len_in_bytes = session->iv.length;
1414 /* Data Parameters */
1415 job->src = rte_pktmbuf_mtod(m_src, uint8_t *);
1416 job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, m_offset);
1418 switch (job->hash_alg) {
1420 job->cipher_start_src_offset_in_bytes =
1421 op->sym->aead.data.offset;
1422 job->msg_len_to_cipher_in_bytes = op->sym->aead.data.length;
1423 job->hash_start_src_offset_in_bytes = op->sym->aead.data.offset;
1424 job->msg_len_to_hash_in_bytes = op->sym->aead.data.length;
1426 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1427 session->iv.offset + 1);
1431 if (session->cipher.mode == GCM) {
1432 job->cipher_start_src_offset_in_bytes =
1433 op->sym->aead.data.offset;
1434 job->hash_start_src_offset_in_bytes =
1435 op->sym->aead.data.offset;
1436 job->msg_len_to_cipher_in_bytes =
1437 op->sym->aead.data.length;
1438 job->msg_len_to_hash_in_bytes =
1439 op->sym->aead.data.length;
1441 job->cipher_start_src_offset_in_bytes =
1442 op->sym->auth.data.offset;
1443 job->hash_start_src_offset_in_bytes =
1444 op->sym->auth.data.offset;
1445 job->msg_len_to_cipher_in_bytes = 0;
1446 job->msg_len_to_hash_in_bytes = 0;
1449 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1450 session->iv.offset);
1453 #if IMB_VERSION(0, 54, 3) <= IMB_VERSION_NUM
1454 case IMB_AUTH_CHACHA20_POLY1305:
1455 job->cipher_start_src_offset_in_bytes = op->sym->aead.data.offset;
1456 job->hash_start_src_offset_in_bytes = op->sym->aead.data.offset;
1457 job->msg_len_to_cipher_in_bytes =
1458 op->sym->aead.data.length;
1459 job->msg_len_to_hash_in_bytes =
1460 op->sym->aead.data.length;
1462 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1463 session->iv.offset);
1467 /* For SNOW3G, length and offsets are already in bits */
1468 job->cipher_start_src_offset_in_bytes =
1469 op->sym->cipher.data.offset;
1470 job->msg_len_to_cipher_in_bytes = op->sym->cipher.data.length;
1472 job->hash_start_src_offset_in_bytes = auth_start_offset(op,
1474 job->msg_len_to_hash_in_bytes = op->sym->auth.data.length;
1476 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1477 session->iv.offset);
1480 #if IMB_VERSION(0, 53, 3) <= IMB_VERSION_NUM
1481 if (job->cipher_mode == IMB_CIPHER_ZUC_EEA3)
1482 job->msg_len_to_cipher_in_bytes >>= 3;
1483 else if (job->hash_alg == IMB_AUTH_KASUMI_UIA1)
1484 job->msg_len_to_hash_in_bytes >>= 3;
1487 /* Set user data to be crypto operation data struct */
1488 job->user_data = op;
1493 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
1495 * Process a crypto operation containing a security op and complete a
1496 * JOB_AES_HMAC job structure for submission to the multi buffer library for
1500 set_sec_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
1501 struct rte_crypto_op *op, uint8_t *digest_idx)
1503 struct rte_mbuf *m_src, *m_dst;
1504 struct rte_crypto_sym_op *sym;
1505 struct aesni_mb_session *session;
1507 session = get_session(qp, op);
1508 if (unlikely(session == NULL)) {
1509 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
1513 /* Only DOCSIS protocol operations supported now */
1514 if (session->cipher.mode != IMB_CIPHER_DOCSIS_SEC_BPI ||
1515 session->auth.algo != IMB_AUTH_DOCSIS_CRC32) {
1516 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
1523 if (likely(sym->m_dst == NULL || sym->m_dst == m_src)) {
1524 /* in-place operation */
1527 /* out-of-place operation not supported */
1528 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
1532 /* Set crypto operation */
1533 job->chain_order = session->chain_order;
1535 /* Set cipher parameters */
1536 job->cipher_direction = session->cipher.direction;
1537 job->cipher_mode = session->cipher.mode;
1539 job->aes_key_len_in_bytes = session->cipher.key_length_in_bytes;
1540 job->aes_enc_key_expanded = session->cipher.expanded_aes_keys.encode;
1541 job->aes_dec_key_expanded = session->cipher.expanded_aes_keys.decode;
1543 /* Set IV parameters */
1544 job->iv_len_in_bytes = session->iv.length;
1545 job->iv = (uint8_t *)op + session->iv.offset;
1547 /* Set authentication parameters */
1548 job->hash_alg = session->auth.algo;
1550 /* Set digest output location */
1551 job->auth_tag_output = qp->temp_digests[*digest_idx];
1552 *digest_idx = (*digest_idx + 1) % MAX_JOBS;
1554 /* Set digest length */
1555 job->auth_tag_output_len_in_bytes = session->auth.gen_digest_len;
1557 /* Set data parameters */
1558 job->src = rte_pktmbuf_mtod(m_src, uint8_t *);
1559 job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *,
1560 sym->cipher.data.offset);
1562 job->cipher_start_src_offset_in_bytes = sym->cipher.data.offset;
1563 job->msg_len_to_cipher_in_bytes = sym->cipher.data.length;
1565 job->hash_start_src_offset_in_bytes = sym->auth.data.offset;
1566 job->msg_len_to_hash_in_bytes = sym->auth.data.length;
1568 job->user_data = op;
1574 verify_docsis_sec_crc(JOB_AES_HMAC *job, uint8_t *status)
1576 uint16_t crc_offset;
1579 if (!job->msg_len_to_hash_in_bytes)
1582 crc_offset = job->hash_start_src_offset_in_bytes +
1583 job->msg_len_to_hash_in_bytes -
1584 job->cipher_start_src_offset_in_bytes;
1585 crc = job->dst + crc_offset;
1587 /* Verify CRC (at the end of the message) */
1588 if (memcmp(job->auth_tag_output, crc, RTE_ETHER_CRC_LEN) != 0)
1589 *status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
1594 verify_digest(JOB_AES_HMAC *job, void *digest, uint16_t len, uint8_t *status)
1596 /* Verify digest if required */
1597 if (memcmp(job->auth_tag_output, digest, len) != 0)
1598 *status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
1602 generate_digest(JOB_AES_HMAC *job, struct rte_crypto_op *op,
1603 struct aesni_mb_session *sess)
1605 /* No extra copy needed */
1606 if (likely(sess->auth.req_digest_len == sess->auth.gen_digest_len))
1610 * This can only happen for HMAC, so only digest
1611 * for authentication algos is required
1613 memcpy(op->sym->auth.digest.data, job->auth_tag_output,
1614 sess->auth.req_digest_len);
1618 * Process a completed job and return rte_mbuf which job processed
1620 * @param qp Queue Pair to process
1621 * @param job JOB_AES_HMAC job to process
1624 * - Returns processed crypto operation.
1625 * - Returns NULL on invalid job
1627 static inline struct rte_crypto_op *
1628 post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
1630 struct rte_crypto_op *op = (struct rte_crypto_op *)job->user_data;
1631 struct aesni_mb_session *sess = NULL;
1633 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
1634 uint8_t is_docsis_sec = 0;
1636 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
1638 * Assuming at this point that if it's a security type op, that
1639 * this is for DOCSIS
1642 sess = get_sec_session_private_data(op->sym->sec_session);
1646 sess = get_sym_session_private_data(op->sym->session,
1647 cryptodev_driver_id);
1650 if (unlikely(sess == NULL)) {
1651 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
1655 if (likely(op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)) {
1656 switch (job->status) {
1658 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1660 if (job->hash_alg == NULL_HASH)
1663 if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
1664 if (is_aead_algo(job->hash_alg, sess->cipher.mode))
1666 op->sym->aead.digest.data,
1667 sess->auth.req_digest_len,
1669 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
1670 else if (is_docsis_sec)
1671 verify_docsis_sec_crc(job,
1676 op->sym->auth.digest.data,
1677 sess->auth.req_digest_len,
1680 generate_digest(job, op, sess);
1683 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
1687 /* Free session if a session-less crypto op */
1688 if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
1689 memset(sess, 0, sizeof(struct aesni_mb_session));
1690 memset(op->sym->session, 0,
1691 rte_cryptodev_sym_get_existing_header_session_size(
1693 rte_mempool_put(qp->sess_mp_priv, sess);
1694 rte_mempool_put(qp->sess_mp, op->sym->session);
1695 op->sym->session = NULL;
1702 post_process_mb_sync_job(JOB_AES_HMAC *job)
1706 st = job->user_data;
1707 st[0] = (job->status == STS_COMPLETED) ? 0 : EBADMSG;
1711 * Process a completed JOB_AES_HMAC job and keep processing jobs until
1712 * get_completed_job return NULL
1714 * @param qp Queue Pair to process
1715 * @param job JOB_AES_HMAC job
1718 * - Number of processed jobs
1721 handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job,
1722 struct rte_crypto_op **ops, uint16_t nb_ops)
1724 struct rte_crypto_op *op = NULL;
1725 unsigned processed_jobs = 0;
1727 while (job != NULL) {
1728 op = post_process_mb_job(qp, job);
1731 ops[processed_jobs++] = op;
1732 qp->stats.dequeued_count++;
1734 qp->stats.dequeue_err_count++;
1737 if (processed_jobs == nb_ops)
1740 job = IMB_GET_COMPLETED_JOB(qp->mb_mgr);
1743 return processed_jobs;
1746 static inline uint32_t
1747 handle_completed_sync_jobs(JOB_AES_HMAC *job, MB_MGR *mb_mgr)
1751 for (i = 0; job != NULL; i++, job = IMB_GET_COMPLETED_JOB(mb_mgr))
1752 post_process_mb_sync_job(job);
1757 static inline uint32_t
1758 flush_mb_sync_mgr(MB_MGR *mb_mgr)
1762 job = IMB_FLUSH_JOB(mb_mgr);
1763 return handle_completed_sync_jobs(job, mb_mgr);
1766 static inline uint16_t
1767 flush_mb_mgr(struct aesni_mb_qp *qp, struct rte_crypto_op **ops,
1770 int processed_ops = 0;
1772 /* Flush the remaining jobs */
1773 JOB_AES_HMAC *job = IMB_FLUSH_JOB(qp->mb_mgr);
1776 processed_ops += handle_completed_jobs(qp, job,
1777 &ops[processed_ops], nb_ops - processed_ops);
1779 return processed_ops;
1782 static inline JOB_AES_HMAC *
1783 set_job_null_op(JOB_AES_HMAC *job, struct rte_crypto_op *op)
1785 job->chain_order = HASH_CIPHER;
1786 job->cipher_mode = NULL_CIPHER;
1787 job->hash_alg = NULL_HASH;
1788 job->cipher_direction = DECRYPT;
1790 /* Set user data to be crypto operation data struct */
1791 job->user_data = op;
1797 aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
1800 struct aesni_mb_qp *qp = queue_pair;
1802 struct rte_crypto_op *op;
1805 int retval, processed_jobs = 0;
1807 if (unlikely(nb_ops == 0))
1810 uint8_t digest_idx = qp->digest_idx;
1812 /* Get next free mb job struct from mb manager */
1813 job = IMB_GET_NEXT_JOB(qp->mb_mgr);
1814 if (unlikely(job == NULL)) {
1815 /* if no free mb job structs we need to flush mb_mgr */
1816 processed_jobs += flush_mb_mgr(qp,
1817 &ops[processed_jobs],
1818 nb_ops - processed_jobs);
1820 if (nb_ops == processed_jobs)
1823 job = IMB_GET_NEXT_JOB(qp->mb_mgr);
1827 * Get next operation to process from ingress queue.
1828 * There is no need to return the job to the MB_MGR
1829 * if there are no more operations to process, since the MB_MGR
1830 * can use that pointer again in next get_next calls.
1832 retval = rte_ring_dequeue(qp->ingress_queue, (void **)&op);
1836 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
1837 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
1838 retval = set_sec_mb_job_params(job, qp, op,
1842 retval = set_mb_job_params(job, qp, op, &digest_idx);
1844 if (unlikely(retval != 0)) {
1845 qp->stats.dequeue_err_count++;
1846 set_job_null_op(job, op);
1849 /* Submit job to multi-buffer for processing */
1850 #ifdef RTE_LIBRTE_PMD_AESNI_MB_DEBUG
1851 job = IMB_SUBMIT_JOB(qp->mb_mgr);
1853 job = IMB_SUBMIT_JOB_NOCHECK(qp->mb_mgr);
1856 * If submit returns a processed job then handle it,
1857 * before submitting subsequent jobs
1860 processed_jobs += handle_completed_jobs(qp, job,
1861 &ops[processed_jobs],
1862 nb_ops - processed_jobs);
1864 } while (processed_jobs < nb_ops);
1866 qp->digest_idx = digest_idx;
1868 if (processed_jobs < 1)
1869 processed_jobs += flush_mb_mgr(qp,
1870 &ops[processed_jobs],
1871 nb_ops - processed_jobs);
1873 return processed_jobs;
1877 alloc_init_mb_mgr(enum aesni_mb_vector_mode vector_mode)
1879 MB_MGR *mb_mgr = alloc_mb_mgr(0);
1883 switch (vector_mode) {
1884 case RTE_AESNI_MB_SSE:
1885 init_mb_mgr_sse(mb_mgr);
1887 case RTE_AESNI_MB_AVX:
1888 init_mb_mgr_avx(mb_mgr);
1890 case RTE_AESNI_MB_AVX2:
1891 init_mb_mgr_avx2(mb_mgr);
1893 case RTE_AESNI_MB_AVX512:
1894 init_mb_mgr_avx512(mb_mgr);
1897 AESNI_MB_LOG(ERR, "Unsupported vector mode %u\n", vector_mode);
1898 free_mb_mgr(mb_mgr);
1906 aesni_mb_fill_error_code(struct rte_crypto_sym_vec *vec, int32_t err)
1910 for (i = 0; i != vec->num; ++i)
1911 vec->status[i] = err;
1915 check_crypto_sgl(union rte_crypto_sym_ofs so, const struct rte_crypto_sgl *sgl)
1917 /* no multi-seg support with current AESNI-MB PMD */
1920 else if (so.ofs.cipher.head + so.ofs.cipher.tail > sgl->vec[0].len)
1925 static inline JOB_AES_HMAC *
1926 submit_sync_job(MB_MGR *mb_mgr)
1928 #ifdef RTE_LIBRTE_PMD_AESNI_MB_DEBUG
1929 return IMB_SUBMIT_JOB(mb_mgr);
1931 return IMB_SUBMIT_JOB_NOCHECK(mb_mgr);
1935 static inline uint32_t
1936 generate_sync_dgst(struct rte_crypto_sym_vec *vec,
1937 const uint8_t dgst[][DIGEST_LENGTH_MAX], uint32_t len)
1941 for (i = 0, k = 0; i != vec->num; i++) {
1942 if (vec->status[i] == 0) {
1943 memcpy(vec->digest[i].va, dgst[i], len);
1951 static inline uint32_t
1952 verify_sync_dgst(struct rte_crypto_sym_vec *vec,
1953 const uint8_t dgst[][DIGEST_LENGTH_MAX], uint32_t len)
1957 for (i = 0, k = 0; i != vec->num; i++) {
1958 if (vec->status[i] == 0) {
1959 if (memcmp(vec->digest[i].va, dgst[i], len) != 0)
1960 vec->status[i] = EBADMSG;
1970 aesni_mb_cpu_crypto_process_bulk(struct rte_cryptodev *dev,
1971 struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs sofs,
1972 struct rte_crypto_sym_vec *vec)
1975 uint32_t i, j, k, len;
1979 struct aesni_mb_private *priv;
1980 struct aesni_mb_session *s;
1981 uint8_t tmp_dgst[vec->num][DIGEST_LENGTH_MAX];
1983 s = get_sym_session_private_data(sess, dev->driver_id);
1985 aesni_mb_fill_error_code(vec, EINVAL);
1989 /* get per-thread MB MGR, create one if needed */
1990 mb_mgr = RTE_PER_LCORE(sync_mb_mgr);
1991 if (mb_mgr == NULL) {
1993 priv = dev->data->dev_private;
1994 mb_mgr = alloc_init_mb_mgr(priv->vector_mode);
1995 if (mb_mgr == NULL) {
1996 aesni_mb_fill_error_code(vec, ENOMEM);
1999 RTE_PER_LCORE(sync_mb_mgr) = mb_mgr;
2002 for (i = 0, j = 0, k = 0; i != vec->num; i++) {
2005 ret = check_crypto_sgl(sofs, vec->sgl + i);
2007 vec->status[i] = ret;
2011 buf = vec->sgl[i].vec[0].base;
2012 len = vec->sgl[i].vec[0].len;
2014 job = IMB_GET_NEXT_JOB(mb_mgr);
2016 k += flush_mb_sync_mgr(mb_mgr);
2017 job = IMB_GET_NEXT_JOB(mb_mgr);
2018 RTE_ASSERT(job != NULL);
2021 /* Submit job for processing */
2022 set_cpu_mb_job_params(job, s, sofs, buf, len, &vec->iv[i],
2023 &vec->aad[i], tmp_dgst[i], &vec->status[i]);
2024 job = submit_sync_job(mb_mgr);
2027 /* handle completed jobs */
2028 k += handle_completed_sync_jobs(job, mb_mgr);
2031 /* flush remaining jobs */
2033 k += flush_mb_sync_mgr(mb_mgr);
2035 /* finish processing for successful jobs: check/update digest */
2037 if (s->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY)
2038 k = verify_sync_dgst(vec,
2039 (const uint8_t (*)[DIGEST_LENGTH_MAX])tmp_dgst,
2040 s->auth.req_digest_len);
2042 k = generate_sync_dgst(vec,
2043 (const uint8_t (*)[DIGEST_LENGTH_MAX])tmp_dgst,
2044 s->auth.req_digest_len);
2050 static int cryptodev_aesni_mb_remove(struct rte_vdev_device *vdev);
2053 vec_mode_to_flags(enum aesni_mb_vector_mode mode)
2056 case RTE_AESNI_MB_SSE:
2057 return RTE_CRYPTODEV_FF_CPU_SSE;
2058 case RTE_AESNI_MB_AVX:
2059 return RTE_CRYPTODEV_FF_CPU_AVX;
2060 case RTE_AESNI_MB_AVX2:
2061 return RTE_CRYPTODEV_FF_CPU_AVX2;
2062 case RTE_AESNI_MB_AVX512:
2063 return RTE_CRYPTODEV_FF_CPU_AVX512;
2065 AESNI_MB_LOG(ERR, "Unsupported vector mode %u\n", mode);
2071 cryptodev_aesni_mb_create(const char *name,
2072 struct rte_vdev_device *vdev,
2073 struct rte_cryptodev_pmd_init_params *init_params)
2075 struct rte_cryptodev *dev;
2076 struct aesni_mb_private *internals;
2077 enum aesni_mb_vector_mode vector_mode;
2080 dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
2082 AESNI_MB_LOG(ERR, "failed to create cryptodev vdev");
2086 /* Check CPU for supported vector instruction set */
2087 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
2088 vector_mode = RTE_AESNI_MB_AVX512;
2089 else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
2090 vector_mode = RTE_AESNI_MB_AVX2;
2091 else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
2092 vector_mode = RTE_AESNI_MB_AVX;
2094 vector_mode = RTE_AESNI_MB_SSE;
2096 dev->driver_id = cryptodev_driver_id;
2097 dev->dev_ops = rte_aesni_mb_pmd_ops;
2099 /* register rx/tx burst functions for data path */
2100 dev->dequeue_burst = aesni_mb_pmd_dequeue_burst;
2101 dev->enqueue_burst = aesni_mb_pmd_enqueue_burst;
2103 dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
2104 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
2105 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
2106 RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO |
2107 RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA |
2108 RTE_CRYPTODEV_FF_SYM_SESSIONLESS;
2110 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
2111 struct rte_security_ctx *security_instance;
2112 security_instance = rte_malloc("aesni_mb_sec",
2113 sizeof(struct rte_security_ctx),
2114 RTE_CACHE_LINE_SIZE);
2115 if (security_instance == NULL) {
2116 AESNI_MB_LOG(ERR, "rte_security_ctx memory alloc failed");
2117 rte_cryptodev_pmd_destroy(dev);
2121 security_instance->device = (void *)dev;
2122 security_instance->ops = rte_aesni_mb_pmd_sec_ops;
2123 security_instance->sess_cnt = 0;
2124 dev->security_ctx = security_instance;
2125 dev->feature_flags |= RTE_CRYPTODEV_FF_SECURITY;
2128 /* Check CPU for support for AES instruction set */
2129 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES))
2130 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AESNI;
2132 AESNI_MB_LOG(WARNING, "AES instructions not supported by CPU");
2134 dev->feature_flags |= vec_mode_to_flags(vector_mode);
2136 mb_mgr = alloc_init_mb_mgr(vector_mode);
2137 if (mb_mgr == NULL) {
2138 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
2139 rte_free(dev->security_ctx);
2140 dev->security_ctx = NULL;
2142 rte_cryptodev_pmd_destroy(dev);
2146 /* Set vector instructions mode supported */
2147 internals = dev->data->dev_private;
2149 internals->vector_mode = vector_mode;
2150 internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
2151 internals->mb_mgr = mb_mgr;
2153 AESNI_MB_LOG(INFO, "IPSec Multi-buffer library version used: %s\n",
2154 imb_get_version_str());
2159 cryptodev_aesni_mb_probe(struct rte_vdev_device *vdev)
2161 struct rte_cryptodev_pmd_init_params init_params = {
2163 sizeof(struct aesni_mb_private),
2165 RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
2167 const char *name, *args;
2170 name = rte_vdev_device_name(vdev);
2174 args = rte_vdev_device_args(vdev);
2176 retval = rte_cryptodev_pmd_parse_input_args(&init_params, args);
2178 AESNI_MB_LOG(ERR, "Failed to parse initialisation arguments[%s]",
2183 return cryptodev_aesni_mb_create(name, vdev, &init_params);
2187 cryptodev_aesni_mb_remove(struct rte_vdev_device *vdev)
2189 struct rte_cryptodev *cryptodev;
2190 struct aesni_mb_private *internals;
2193 name = rte_vdev_device_name(vdev);
2197 cryptodev = rte_cryptodev_pmd_get_named_dev(name);
2198 if (cryptodev == NULL)
2201 internals = cryptodev->data->dev_private;
2203 free_mb_mgr(internals->mb_mgr);
2204 if (RTE_PER_LCORE(sync_mb_mgr)) {
2205 free_mb_mgr(RTE_PER_LCORE(sync_mb_mgr));
2206 RTE_PER_LCORE(sync_mb_mgr) = NULL;
2209 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
2210 rte_free(cryptodev->security_ctx);
2211 cryptodev->security_ctx = NULL;
2214 return rte_cryptodev_pmd_destroy(cryptodev);
2217 static struct rte_vdev_driver cryptodev_aesni_mb_pmd_drv = {
2218 .probe = cryptodev_aesni_mb_probe,
2219 .remove = cryptodev_aesni_mb_remove
2222 static struct cryptodev_driver aesni_mb_crypto_drv;
2224 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd_drv);
2225 RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd);
2226 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_MB_PMD,
2227 "max_nb_queue_pairs=<int> "
2229 RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_mb_crypto_drv,
2230 cryptodev_aesni_mb_pmd_drv.driver,
2231 cryptodev_driver_id);
2232 RTE_LOG_REGISTER(aesni_mb_logtype_driver, pmd.crypto.aesni_mb, NOTICE);