1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2021 Intel Corporation
5 #include "pmd_aesni_mb_priv.h"
7 struct aesni_mb_op_buf_data {
13 * Calculate the authentication pre-computes
15 * @param one_block_hash Function pointer
16 * to calculate digest on ipad/opad
17 * @param ipad Inner pad output byte array
18 * @param opad Outer pad output byte array
19 * @param hkey Authentication key
20 * @param hkey_len Authentication key length
21 * @param blocksize Block size of selected hash algo
24 calculate_auth_precomputes(hash_one_block_t one_block_hash,
25 uint8_t *ipad, uint8_t *opad,
26 const uint8_t *hkey, uint16_t hkey_len,
31 uint8_t ipad_buf[blocksize] __rte_aligned(16);
32 uint8_t opad_buf[blocksize] __rte_aligned(16);
34 /* Setup inner and outer pads */
35 memset(ipad_buf, HMAC_IPAD_VALUE, blocksize);
36 memset(opad_buf, HMAC_OPAD_VALUE, blocksize);
38 /* XOR hash key with inner and outer pads */
39 length = hkey_len > blocksize ? blocksize : hkey_len;
41 for (i = 0; i < length; i++) {
42 ipad_buf[i] ^= hkey[i];
43 opad_buf[i] ^= hkey[i];
46 /* Compute partial hashes */
47 (*one_block_hash)(ipad_buf, ipad);
48 (*one_block_hash)(opad_buf, opad);
51 memset(ipad_buf, 0, blocksize);
52 memset(opad_buf, 0, blocksize);
56 is_aead_algo(IMB_HASH_ALG hash_alg, IMB_CIPHER_MODE cipher_mode)
58 return (hash_alg == IMB_AUTH_CHACHA20_POLY1305 ||
59 hash_alg == IMB_AUTH_AES_CCM ||
60 (hash_alg == IMB_AUTH_AES_GMAC &&
61 cipher_mode == IMB_CIPHER_GCM));
64 /** Set session authentication parameters */
66 aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr,
67 struct aesni_mb_session *sess,
68 const struct rte_crypto_sym_xform *xform)
70 hash_one_block_t hash_oneblock_fn = NULL;
71 unsigned int key_larger_block_size = 0;
72 uint8_t hashed_key[HMAC_MAX_BLOCK_SIZE] = { 0 };
73 uint32_t auth_precompute = 1;
76 sess->auth.algo = IMB_AUTH_NULL;
80 if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
81 IPSEC_MB_LOG(ERR, "Crypto xform struct not of type auth");
85 /* Set IV parameters */
86 sess->auth_iv.offset = xform->auth.iv.offset;
87 sess->auth_iv.length = xform->auth.iv.length;
89 /* Set the request digest size */
90 sess->auth.req_digest_len = xform->auth.digest_length;
92 /* Select auth generate/verify */
93 sess->auth.operation = xform->auth.op;
95 /* Set Authentication Parameters */
96 if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL) {
97 sess->auth.algo = IMB_AUTH_NULL;
98 sess->auth.gen_digest_len = 0;
102 if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_XCBC_MAC) {
103 sess->auth.algo = IMB_AUTH_AES_XCBC;
105 uint16_t xcbc_mac_digest_len =
106 get_truncated_digest_byte_length(IMB_AUTH_AES_XCBC);
107 if (sess->auth.req_digest_len != xcbc_mac_digest_len) {
108 IPSEC_MB_LOG(ERR, "Invalid digest size\n");
111 sess->auth.gen_digest_len = sess->auth.req_digest_len;
113 IMB_AES_XCBC_KEYEXP(mb_mgr, xform->auth.key.data,
114 sess->auth.xcbc.k1_expanded,
115 sess->auth.xcbc.k2, sess->auth.xcbc.k3);
119 if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_CMAC) {
122 sess->auth.algo = IMB_AUTH_AES_CMAC;
124 uint16_t cmac_digest_len =
125 get_digest_byte_length(IMB_AUTH_AES_CMAC);
127 if (sess->auth.req_digest_len > cmac_digest_len) {
128 IPSEC_MB_LOG(ERR, "Invalid digest size\n");
132 * Multi-buffer lib supports digest sizes from 4 to 16 bytes
133 * in version 0.50 and sizes of 12 and 16 bytes,
135 * If size requested is different, generate the full digest
136 * (16 bytes) in a temporary location and then memcpy
137 * the requested number of bytes.
139 if (sess->auth.req_digest_len < 4)
140 sess->auth.gen_digest_len = cmac_digest_len;
142 sess->auth.gen_digest_len = sess->auth.req_digest_len;
144 IMB_AES_KEYEXP_128(mb_mgr, xform->auth.key.data,
145 sess->auth.cmac.expkey, dust);
146 IMB_AES_CMAC_SUBKEY_GEN_128(mb_mgr, sess->auth.cmac.expkey,
147 sess->auth.cmac.skey1, sess->auth.cmac.skey2);
151 if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
152 if (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) {
153 sess->cipher.direction = IMB_DIR_ENCRYPT;
154 sess->chain_order = IMB_ORDER_CIPHER_HASH;
156 sess->cipher.direction = IMB_DIR_DECRYPT;
158 sess->auth.algo = IMB_AUTH_AES_GMAC;
159 if (sess->auth.req_digest_len >
160 get_digest_byte_length(IMB_AUTH_AES_GMAC)) {
161 IPSEC_MB_LOG(ERR, "Invalid digest size\n");
164 sess->auth.gen_digest_len = sess->auth.req_digest_len;
165 sess->iv.length = xform->auth.iv.length;
166 sess->iv.offset = xform->auth.iv.offset;
168 switch (xform->auth.key.length) {
169 case IMB_KEY_128_BYTES:
170 IMB_AES128_GCM_PRE(mb_mgr, xform->auth.key.data,
171 &sess->cipher.gcm_key);
172 sess->cipher.key_length_in_bytes = IMB_KEY_128_BYTES;
174 case IMB_KEY_192_BYTES:
175 IMB_AES192_GCM_PRE(mb_mgr, xform->auth.key.data,
176 &sess->cipher.gcm_key);
177 sess->cipher.key_length_in_bytes = IMB_KEY_192_BYTES;
179 case IMB_KEY_256_BYTES:
180 IMB_AES256_GCM_PRE(mb_mgr, xform->auth.key.data,
181 &sess->cipher.gcm_key);
182 sess->cipher.key_length_in_bytes = IMB_KEY_256_BYTES;
185 IPSEC_MB_LOG(ERR, "Invalid authentication key length\n");
192 if (xform->auth.algo == RTE_CRYPTO_AUTH_ZUC_EIA3) {
193 if (xform->auth.key.length == 16) {
194 sess->auth.algo = IMB_AUTH_ZUC_EIA3_BITLEN;
195 } else if (xform->auth.key.length == 32) {
196 sess->auth.algo = IMB_AUTH_ZUC256_EIA3_BITLEN;
198 IPSEC_MB_LOG(ERR, "Invalid authentication key length\n");
202 uint16_t zuc_eia3_digest_len =
203 get_truncated_digest_byte_length(
204 IMB_AUTH_ZUC_EIA3_BITLEN);
205 if (sess->auth.req_digest_len != zuc_eia3_digest_len) {
206 IPSEC_MB_LOG(ERR, "Invalid digest size\n");
209 sess->auth.gen_digest_len = sess->auth.req_digest_len;
211 memcpy(sess->auth.zuc_auth_key, xform->auth.key.data,
212 xform->auth.key.length);
214 } else if (xform->auth.algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
215 sess->auth.algo = IMB_AUTH_SNOW3G_UIA2_BITLEN;
216 uint16_t snow3g_uia2_digest_len =
217 get_truncated_digest_byte_length(
218 IMB_AUTH_SNOW3G_UIA2_BITLEN);
219 if (sess->auth.req_digest_len != snow3g_uia2_digest_len) {
220 IPSEC_MB_LOG(ERR, "Invalid digest size\n");
223 sess->auth.gen_digest_len = sess->auth.req_digest_len;
225 IMB_SNOW3G_INIT_KEY_SCHED(mb_mgr, xform->auth.key.data,
226 &sess->auth.pKeySched_snow3g_auth);
228 } else if (xform->auth.algo == RTE_CRYPTO_AUTH_KASUMI_F9) {
229 sess->auth.algo = IMB_AUTH_KASUMI_UIA1;
230 uint16_t kasumi_f9_digest_len =
231 get_truncated_digest_byte_length(IMB_AUTH_KASUMI_UIA1);
232 if (sess->auth.req_digest_len != kasumi_f9_digest_len) {
233 IPSEC_MB_LOG(ERR, "Invalid digest size\n");
236 sess->auth.gen_digest_len = sess->auth.req_digest_len;
238 IMB_KASUMI_INIT_F9_KEY_SCHED(mb_mgr, xform->auth.key.data,
239 &sess->auth.pKeySched_kasumi_auth);
243 switch (xform->auth.algo) {
244 case RTE_CRYPTO_AUTH_MD5_HMAC:
245 sess->auth.algo = IMB_AUTH_MD5;
246 hash_oneblock_fn = mb_mgr->md5_one_block;
248 case RTE_CRYPTO_AUTH_SHA1_HMAC:
249 sess->auth.algo = IMB_AUTH_HMAC_SHA_1;
250 hash_oneblock_fn = mb_mgr->sha1_one_block;
251 if (xform->auth.key.length > get_auth_algo_blocksize(
252 IMB_AUTH_HMAC_SHA_1)) {
254 xform->auth.key.data,
255 xform->auth.key.length,
257 key_larger_block_size = 1;
260 case RTE_CRYPTO_AUTH_SHA1:
261 sess->auth.algo = IMB_AUTH_SHA_1;
264 case RTE_CRYPTO_AUTH_SHA224_HMAC:
265 sess->auth.algo = IMB_AUTH_HMAC_SHA_224;
266 hash_oneblock_fn = mb_mgr->sha224_one_block;
267 if (xform->auth.key.length > get_auth_algo_blocksize(
268 IMB_AUTH_HMAC_SHA_224)) {
270 xform->auth.key.data,
271 xform->auth.key.length,
273 key_larger_block_size = 1;
276 case RTE_CRYPTO_AUTH_SHA224:
277 sess->auth.algo = IMB_AUTH_SHA_224;
280 case RTE_CRYPTO_AUTH_SHA256_HMAC:
281 sess->auth.algo = IMB_AUTH_HMAC_SHA_256;
282 hash_oneblock_fn = mb_mgr->sha256_one_block;
283 if (xform->auth.key.length > get_auth_algo_blocksize(
284 IMB_AUTH_HMAC_SHA_256)) {
286 xform->auth.key.data,
287 xform->auth.key.length,
289 key_larger_block_size = 1;
292 case RTE_CRYPTO_AUTH_SHA256:
293 sess->auth.algo = IMB_AUTH_SHA_256;
296 case RTE_CRYPTO_AUTH_SHA384_HMAC:
297 sess->auth.algo = IMB_AUTH_HMAC_SHA_384;
298 hash_oneblock_fn = mb_mgr->sha384_one_block;
299 if (xform->auth.key.length > get_auth_algo_blocksize(
300 IMB_AUTH_HMAC_SHA_384)) {
302 xform->auth.key.data,
303 xform->auth.key.length,
305 key_larger_block_size = 1;
308 case RTE_CRYPTO_AUTH_SHA384:
309 sess->auth.algo = IMB_AUTH_SHA_384;
312 case RTE_CRYPTO_AUTH_SHA512_HMAC:
313 sess->auth.algo = IMB_AUTH_HMAC_SHA_512;
314 hash_oneblock_fn = mb_mgr->sha512_one_block;
315 if (xform->auth.key.length > get_auth_algo_blocksize(
316 IMB_AUTH_HMAC_SHA_512)) {
318 xform->auth.key.data,
319 xform->auth.key.length,
321 key_larger_block_size = 1;
324 case RTE_CRYPTO_AUTH_SHA512:
325 sess->auth.algo = IMB_AUTH_SHA_512;
330 "Unsupported authentication algorithm selection");
333 uint16_t trunc_digest_size =
334 get_truncated_digest_byte_length(sess->auth.algo);
335 uint16_t full_digest_size =
336 get_digest_byte_length(sess->auth.algo);
338 if (sess->auth.req_digest_len > full_digest_size ||
339 sess->auth.req_digest_len == 0) {
340 IPSEC_MB_LOG(ERR, "Invalid digest size\n");
344 if (sess->auth.req_digest_len != trunc_digest_size &&
345 sess->auth.req_digest_len != full_digest_size)
346 sess->auth.gen_digest_len = full_digest_size;
348 sess->auth.gen_digest_len = sess->auth.req_digest_len;
350 /* Plain SHA does not require precompute key */
351 if (auth_precompute == 0)
354 /* Calculate Authentication precomputes */
355 if (key_larger_block_size) {
356 calculate_auth_precomputes(hash_oneblock_fn,
357 sess->auth.pads.inner, sess->auth.pads.outer,
359 xform->auth.key.length,
360 get_auth_algo_blocksize(sess->auth.algo));
362 calculate_auth_precomputes(hash_oneblock_fn,
363 sess->auth.pads.inner, sess->auth.pads.outer,
364 xform->auth.key.data,
365 xform->auth.key.length,
366 get_auth_algo_blocksize(sess->auth.algo));
372 /** Set session cipher parameters */
374 aesni_mb_set_session_cipher_parameters(const IMB_MGR *mb_mgr,
375 struct aesni_mb_session *sess,
376 const struct rte_crypto_sym_xform *xform)
380 uint8_t is_docsis = 0;
382 uint8_t is_snow3g = 0;
383 uint8_t is_kasumi = 0;
386 sess->cipher.mode = IMB_CIPHER_NULL;
390 if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
391 IPSEC_MB_LOG(ERR, "Crypto xform struct not of type cipher");
395 /* Select cipher direction */
396 switch (xform->cipher.op) {
397 case RTE_CRYPTO_CIPHER_OP_ENCRYPT:
398 sess->cipher.direction = IMB_DIR_ENCRYPT;
400 case RTE_CRYPTO_CIPHER_OP_DECRYPT:
401 sess->cipher.direction = IMB_DIR_DECRYPT;
404 IPSEC_MB_LOG(ERR, "Invalid cipher operation parameter");
408 /* Select cipher mode */
409 switch (xform->cipher.algo) {
410 case RTE_CRYPTO_CIPHER_AES_CBC:
411 sess->cipher.mode = IMB_CIPHER_CBC;
414 case RTE_CRYPTO_CIPHER_AES_CTR:
415 sess->cipher.mode = IMB_CIPHER_CNTR;
418 case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
419 sess->cipher.mode = IMB_CIPHER_DOCSIS_SEC_BPI;
422 case RTE_CRYPTO_CIPHER_DES_CBC:
423 sess->cipher.mode = IMB_CIPHER_DES;
425 case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
426 sess->cipher.mode = IMB_CIPHER_DOCSIS_DES;
428 case RTE_CRYPTO_CIPHER_3DES_CBC:
429 sess->cipher.mode = IMB_CIPHER_DES3;
432 case RTE_CRYPTO_CIPHER_AES_ECB:
433 sess->cipher.mode = IMB_CIPHER_ECB;
436 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
437 sess->cipher.mode = IMB_CIPHER_ZUC_EEA3;
440 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
441 sess->cipher.mode = IMB_CIPHER_SNOW3G_UEA2_BITLEN;
444 case RTE_CRYPTO_CIPHER_KASUMI_F8:
445 sess->cipher.mode = IMB_CIPHER_KASUMI_UEA1_BITLEN;
448 case RTE_CRYPTO_CIPHER_NULL:
449 sess->cipher.mode = IMB_CIPHER_NULL;
450 sess->cipher.key_length_in_bytes = 0;
451 sess->iv.offset = xform->cipher.iv.offset;
452 sess->iv.length = xform->cipher.iv.length;
455 IPSEC_MB_LOG(ERR, "Unsupported cipher mode parameter");
459 /* Set IV parameters */
460 sess->iv.offset = xform->cipher.iv.offset;
461 sess->iv.length = xform->cipher.iv.length;
463 /* Check key length and choose key expansion function for AES */
465 switch (xform->cipher.key.length) {
466 case IMB_KEY_128_BYTES:
467 sess->cipher.key_length_in_bytes = IMB_KEY_128_BYTES;
468 IMB_AES_KEYEXP_128(mb_mgr, xform->cipher.key.data,
469 sess->cipher.expanded_aes_keys.encode,
470 sess->cipher.expanded_aes_keys.decode);
472 case IMB_KEY_192_BYTES:
473 sess->cipher.key_length_in_bytes = IMB_KEY_192_BYTES;
474 IMB_AES_KEYEXP_192(mb_mgr, xform->cipher.key.data,
475 sess->cipher.expanded_aes_keys.encode,
476 sess->cipher.expanded_aes_keys.decode);
478 case IMB_KEY_256_BYTES:
479 sess->cipher.key_length_in_bytes = IMB_KEY_256_BYTES;
480 IMB_AES_KEYEXP_256(mb_mgr, xform->cipher.key.data,
481 sess->cipher.expanded_aes_keys.encode,
482 sess->cipher.expanded_aes_keys.decode);
485 IPSEC_MB_LOG(ERR, "Invalid cipher key length");
488 } else if (is_docsis) {
489 switch (xform->cipher.key.length) {
490 case IMB_KEY_128_BYTES:
491 sess->cipher.key_length_in_bytes = IMB_KEY_128_BYTES;
492 IMB_AES_KEYEXP_128(mb_mgr, xform->cipher.key.data,
493 sess->cipher.expanded_aes_keys.encode,
494 sess->cipher.expanded_aes_keys.decode);
496 case IMB_KEY_256_BYTES:
497 sess->cipher.key_length_in_bytes = IMB_KEY_256_BYTES;
498 IMB_AES_KEYEXP_256(mb_mgr, xform->cipher.key.data,
499 sess->cipher.expanded_aes_keys.encode,
500 sess->cipher.expanded_aes_keys.decode);
503 IPSEC_MB_LOG(ERR, "Invalid cipher key length");
506 } else if (is_3DES) {
507 uint64_t *keys[3] = {sess->cipher.exp_3des_keys.key[0],
508 sess->cipher.exp_3des_keys.key[1],
509 sess->cipher.exp_3des_keys.key[2]};
511 switch (xform->cipher.key.length) {
513 IMB_DES_KEYSCHED(mb_mgr, keys[0],
514 xform->cipher.key.data);
515 IMB_DES_KEYSCHED(mb_mgr, keys[1],
516 xform->cipher.key.data + 8);
517 IMB_DES_KEYSCHED(mb_mgr, keys[2],
518 xform->cipher.key.data + 16);
520 /* Initialize keys - 24 bytes: [K1-K2-K3] */
521 sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
522 sess->cipher.exp_3des_keys.ks_ptr[1] = keys[1];
523 sess->cipher.exp_3des_keys.ks_ptr[2] = keys[2];
526 IMB_DES_KEYSCHED(mb_mgr, keys[0],
527 xform->cipher.key.data);
528 IMB_DES_KEYSCHED(mb_mgr, keys[1],
529 xform->cipher.key.data + 8);
530 /* Initialize keys - 16 bytes: [K1=K1,K2=K2,K3=K1] */
531 sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
532 sess->cipher.exp_3des_keys.ks_ptr[1] = keys[1];
533 sess->cipher.exp_3des_keys.ks_ptr[2] = keys[0];
536 IMB_DES_KEYSCHED(mb_mgr, keys[0],
537 xform->cipher.key.data);
539 /* Initialize keys - 8 bytes: [K1 = K2 = K3] */
540 sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
541 sess->cipher.exp_3des_keys.ks_ptr[1] = keys[0];
542 sess->cipher.exp_3des_keys.ks_ptr[2] = keys[0];
545 IPSEC_MB_LOG(ERR, "Invalid cipher key length");
549 sess->cipher.key_length_in_bytes = 24;
551 if (xform->cipher.key.length != 16 &&
552 xform->cipher.key.length != 32) {
553 IPSEC_MB_LOG(ERR, "Invalid cipher key length");
556 sess->cipher.key_length_in_bytes = xform->cipher.key.length;
557 memcpy(sess->cipher.zuc_cipher_key, xform->cipher.key.data,
558 xform->cipher.key.length);
559 } else if (is_snow3g) {
560 if (xform->cipher.key.length != 16) {
561 IPSEC_MB_LOG(ERR, "Invalid cipher key length");
564 sess->cipher.key_length_in_bytes = 16;
565 IMB_SNOW3G_INIT_KEY_SCHED(mb_mgr, xform->cipher.key.data,
566 &sess->cipher.pKeySched_snow3g_cipher);
567 } else if (is_kasumi) {
568 if (xform->cipher.key.length != 16) {
569 IPSEC_MB_LOG(ERR, "Invalid cipher key length");
572 sess->cipher.key_length_in_bytes = 16;
573 IMB_KASUMI_INIT_F8_KEY_SCHED(mb_mgr, xform->cipher.key.data,
574 &sess->cipher.pKeySched_kasumi_cipher);
576 if (xform->cipher.key.length != 8) {
577 IPSEC_MB_LOG(ERR, "Invalid cipher key length");
580 sess->cipher.key_length_in_bytes = 8;
582 IMB_DES_KEYSCHED(mb_mgr,
583 (uint64_t *)sess->cipher.expanded_aes_keys.encode,
584 xform->cipher.key.data);
585 IMB_DES_KEYSCHED(mb_mgr,
586 (uint64_t *)sess->cipher.expanded_aes_keys.decode,
587 xform->cipher.key.data);
594 aesni_mb_set_session_aead_parameters(const IMB_MGR *mb_mgr,
595 struct aesni_mb_session *sess,
596 const struct rte_crypto_sym_xform *xform)
598 switch (xform->aead.op) {
599 case RTE_CRYPTO_AEAD_OP_ENCRYPT:
600 sess->cipher.direction = IMB_DIR_ENCRYPT;
601 sess->auth.operation = RTE_CRYPTO_AUTH_OP_GENERATE;
603 case RTE_CRYPTO_AEAD_OP_DECRYPT:
604 sess->cipher.direction = IMB_DIR_DECRYPT;
605 sess->auth.operation = RTE_CRYPTO_AUTH_OP_VERIFY;
608 IPSEC_MB_LOG(ERR, "Invalid aead operation parameter");
612 /* Set IV parameters */
613 sess->iv.offset = xform->aead.iv.offset;
614 sess->iv.length = xform->aead.iv.length;
616 /* Set digest sizes */
617 sess->auth.req_digest_len = xform->aead.digest_length;
618 sess->auth.gen_digest_len = sess->auth.req_digest_len;
620 switch (xform->aead.algo) {
621 case RTE_CRYPTO_AEAD_AES_CCM:
622 sess->cipher.mode = IMB_CIPHER_CCM;
623 sess->auth.algo = IMB_AUTH_AES_CCM;
625 /* Check key length and choose key expansion function for AES */
626 switch (xform->aead.key.length) {
627 case IMB_KEY_128_BYTES:
628 sess->cipher.key_length_in_bytes = IMB_KEY_128_BYTES;
629 IMB_AES_KEYEXP_128(mb_mgr, xform->aead.key.data,
630 sess->cipher.expanded_aes_keys.encode,
631 sess->cipher.expanded_aes_keys.decode);
633 case IMB_KEY_256_BYTES:
634 sess->cipher.key_length_in_bytes = IMB_KEY_256_BYTES;
635 IMB_AES_KEYEXP_256(mb_mgr, xform->aead.key.data,
636 sess->cipher.expanded_aes_keys.encode,
637 sess->cipher.expanded_aes_keys.decode);
640 IPSEC_MB_LOG(ERR, "Invalid cipher key length");
644 /* CCM digests must be between 4 and 16 and an even number */
645 if (sess->auth.req_digest_len < AES_CCM_DIGEST_MIN_LEN ||
646 sess->auth.req_digest_len > AES_CCM_DIGEST_MAX_LEN ||
647 (sess->auth.req_digest_len & 1) == 1) {
648 IPSEC_MB_LOG(ERR, "Invalid digest size\n");
653 case RTE_CRYPTO_AEAD_AES_GCM:
654 sess->cipher.mode = IMB_CIPHER_GCM;
655 sess->auth.algo = IMB_AUTH_AES_GMAC;
657 switch (xform->aead.key.length) {
658 case IMB_KEY_128_BYTES:
659 sess->cipher.key_length_in_bytes = IMB_KEY_128_BYTES;
660 IMB_AES128_GCM_PRE(mb_mgr, xform->aead.key.data,
661 &sess->cipher.gcm_key);
663 case IMB_KEY_192_BYTES:
664 sess->cipher.key_length_in_bytes = IMB_KEY_192_BYTES;
665 IMB_AES192_GCM_PRE(mb_mgr, xform->aead.key.data,
666 &sess->cipher.gcm_key);
668 case IMB_KEY_256_BYTES:
669 sess->cipher.key_length_in_bytes = IMB_KEY_256_BYTES;
670 IMB_AES256_GCM_PRE(mb_mgr, xform->aead.key.data,
671 &sess->cipher.gcm_key);
674 IPSEC_MB_LOG(ERR, "Invalid cipher key length");
678 /* GCM digest size must be between 1 and 16 */
679 if (sess->auth.req_digest_len == 0 ||
680 sess->auth.req_digest_len > 16) {
681 IPSEC_MB_LOG(ERR, "Invalid digest size\n");
686 case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
687 sess->cipher.mode = IMB_CIPHER_CHACHA20_POLY1305;
688 sess->auth.algo = IMB_AUTH_CHACHA20_POLY1305;
690 if (xform->aead.key.length != 32) {
691 IPSEC_MB_LOG(ERR, "Invalid key length");
694 sess->cipher.key_length_in_bytes = 32;
695 memcpy(sess->cipher.expanded_aes_keys.encode,
696 xform->aead.key.data, 32);
697 if (sess->auth.req_digest_len != 16) {
698 IPSEC_MB_LOG(ERR, "Invalid digest size\n");
703 IPSEC_MB_LOG(ERR, "Unsupported aead mode parameter");
710 /** Configure a aesni multi-buffer session from a crypto xform chain */
712 aesni_mb_session_configure(IMB_MGR *mb_mgr,
714 const struct rte_crypto_sym_xform *xform)
716 const struct rte_crypto_sym_xform *auth_xform = NULL;
717 const struct rte_crypto_sym_xform *cipher_xform = NULL;
718 const struct rte_crypto_sym_xform *aead_xform = NULL;
719 enum ipsec_mb_operation mode;
720 struct aesni_mb_session *sess = (struct aesni_mb_session *) priv_sess;
723 ret = ipsec_mb_parse_xform(xform, &mode, &auth_xform,
724 &cipher_xform, &aead_xform);
728 /* Select Crypto operation - hash then cipher / cipher then hash */
730 case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT:
731 sess->chain_order = IMB_ORDER_HASH_CIPHER;
733 case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN:
734 case IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY:
735 sess->chain_order = IMB_ORDER_CIPHER_HASH;
737 case IPSEC_MB_OP_HASH_GEN_ONLY:
738 case IPSEC_MB_OP_HASH_VERIFY_ONLY:
739 case IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT:
740 sess->chain_order = IMB_ORDER_HASH_CIPHER;
743 * Multi buffer library operates only at two modes,
744 * IMB_ORDER_CIPHER_HASH and IMB_ORDER_HASH_CIPHER.
745 * When doing ciphering only, chain order depends
746 * on cipher operation: encryption is always
747 * the first operation and decryption the last one.
749 case IPSEC_MB_OP_ENCRYPT_ONLY:
750 sess->chain_order = IMB_ORDER_CIPHER_HASH;
752 case IPSEC_MB_OP_DECRYPT_ONLY:
753 sess->chain_order = IMB_ORDER_HASH_CIPHER;
755 case IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT:
756 sess->chain_order = IMB_ORDER_CIPHER_HASH;
757 sess->aead.aad_len = xform->aead.aad_length;
759 case IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT:
760 sess->chain_order = IMB_ORDER_HASH_CIPHER;
761 sess->aead.aad_len = xform->aead.aad_length;
763 case IPSEC_MB_OP_NOT_SUPPORTED:
766 "Unsupported operation chain order parameter");
770 /* Default IV length = 0 */
772 sess->auth_iv.length = 0;
774 ret = aesni_mb_set_session_auth_parameters(mb_mgr, sess, auth_xform);
777 "Invalid/unsupported authentication parameters");
781 ret = aesni_mb_set_session_cipher_parameters(mb_mgr, sess,
784 IPSEC_MB_LOG(ERR, "Invalid/unsupported cipher parameters");
789 ret = aesni_mb_set_session_aead_parameters(mb_mgr, sess,
793 "Invalid/unsupported aead parameters");
801 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
802 /** Check DOCSIS security session configuration is valid */
804 check_docsis_sec_session(struct rte_security_session_conf *conf)
806 struct rte_crypto_sym_xform *crypto_sym = conf->crypto_xform;
807 struct rte_security_docsis_xform *docsis = &conf->docsis;
809 /* Downlink: CRC generate -> Cipher encrypt */
810 if (docsis->direction == RTE_SECURITY_DOCSIS_DOWNLINK) {
812 if (crypto_sym != NULL &&
813 crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
814 crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
815 crypto_sym->cipher.algo ==
816 RTE_CRYPTO_CIPHER_AES_DOCSISBPI &&
817 (crypto_sym->cipher.key.length == IMB_KEY_128_BYTES ||
818 crypto_sym->cipher.key.length == IMB_KEY_256_BYTES) &&
819 crypto_sym->cipher.iv.length == IMB_AES_BLOCK_SIZE &&
820 crypto_sym->next == NULL) {
823 /* Uplink: Cipher decrypt -> CRC verify */
824 } else if (docsis->direction == RTE_SECURITY_DOCSIS_UPLINK) {
826 if (crypto_sym != NULL &&
827 crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
828 crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
829 crypto_sym->cipher.algo ==
830 RTE_CRYPTO_CIPHER_AES_DOCSISBPI &&
831 (crypto_sym->cipher.key.length == IMB_KEY_128_BYTES ||
832 crypto_sym->cipher.key.length == IMB_KEY_256_BYTES) &&
833 crypto_sym->cipher.iv.length == IMB_AES_BLOCK_SIZE &&
834 crypto_sym->next == NULL) {
842 /** Set DOCSIS security session auth (CRC) parameters */
844 aesni_mb_set_docsis_sec_session_auth_parameters(struct aesni_mb_session *sess,
845 struct rte_security_docsis_xform *xform)
848 IPSEC_MB_LOG(ERR, "Invalid DOCSIS xform");
852 /* Select CRC generate/verify */
853 if (xform->direction == RTE_SECURITY_DOCSIS_UPLINK) {
854 sess->auth.algo = IMB_AUTH_DOCSIS_CRC32;
855 sess->auth.operation = RTE_CRYPTO_AUTH_OP_VERIFY;
856 } else if (xform->direction == RTE_SECURITY_DOCSIS_DOWNLINK) {
857 sess->auth.algo = IMB_AUTH_DOCSIS_CRC32;
858 sess->auth.operation = RTE_CRYPTO_AUTH_OP_GENERATE;
860 IPSEC_MB_LOG(ERR, "Unsupported DOCSIS direction");
864 sess->auth.req_digest_len = RTE_ETHER_CRC_LEN;
865 sess->auth.gen_digest_len = RTE_ETHER_CRC_LEN;
871 * Parse DOCSIS security session configuration and set private session
875 aesni_mb_set_docsis_sec_session_parameters(
876 __rte_unused struct rte_cryptodev *dev,
877 struct rte_security_session_conf *conf,
880 IMB_MGR *mb_mgr = alloc_init_mb_mgr();
881 struct rte_security_docsis_xform *docsis_xform;
882 struct rte_crypto_sym_xform *cipher_xform;
883 struct aesni_mb_session *ipsec_sess = sess;
889 ret = check_docsis_sec_session(conf);
891 IPSEC_MB_LOG(ERR, "Unsupported DOCSIS security configuration");
895 switch (conf->docsis.direction) {
896 case RTE_SECURITY_DOCSIS_UPLINK:
897 ipsec_sess->chain_order = IMB_ORDER_CIPHER_HASH;
898 docsis_xform = &conf->docsis;
899 cipher_xform = conf->crypto_xform;
901 case RTE_SECURITY_DOCSIS_DOWNLINK:
902 ipsec_sess->chain_order = IMB_ORDER_HASH_CIPHER;
903 cipher_xform = conf->crypto_xform;
904 docsis_xform = &conf->docsis;
907 IPSEC_MB_LOG(ERR, "Unsupported DOCSIS security configuration");
912 /* Default IV length = 0 */
913 ipsec_sess->iv.length = 0;
915 ret = aesni_mb_set_docsis_sec_session_auth_parameters(ipsec_sess,
918 IPSEC_MB_LOG(ERR, "Invalid/unsupported DOCSIS parameters");
922 ret = aesni_mb_set_session_cipher_parameters(mb_mgr,
923 ipsec_sess, cipher_xform);
926 IPSEC_MB_LOG(ERR, "Invalid/unsupported cipher parameters");
936 static inline uint64_t
937 auth_start_offset(struct rte_crypto_op *op, struct aesni_mb_session *session,
938 uint32_t oop, const uint32_t auth_offset,
939 const uint32_t cipher_offset, const uint32_t auth_length,
940 const uint32_t cipher_length)
942 struct rte_mbuf *m_src, *m_dst;
943 uint8_t *p_src, *p_dst;
944 uintptr_t u_src, u_dst;
945 uint32_t cipher_end, auth_end;
947 /* Only cipher then hash needs special calculation. */
948 if (!oop || session->chain_order != IMB_ORDER_CIPHER_HASH)
951 m_src = op->sym->m_src;
952 m_dst = op->sym->m_dst;
954 p_src = rte_pktmbuf_mtod(m_src, uint8_t *);
955 p_dst = rte_pktmbuf_mtod(m_dst, uint8_t *);
956 u_src = (uintptr_t)p_src;
957 u_dst = (uintptr_t)p_dst + auth_offset;
960 * Copy the content between cipher offset and auth offset for generating
963 if (cipher_offset > auth_offset)
964 memcpy(p_dst + auth_offset,
970 * Copy the content between (cipher offset + length) and (auth offset +
971 * length) for generating correct digest
973 cipher_end = cipher_offset + cipher_length;
974 auth_end = auth_offset + auth_length;
975 if (cipher_end < auth_end)
976 memcpy(p_dst + cipher_end, p_src + cipher_end,
977 auth_end - cipher_end);
980 * Since intel-ipsec-mb only supports positive values,
981 * we need to deduct the correct offset between src and dst.
984 return u_src < u_dst ? (u_dst - u_src) :
985 (UINT64_MAX - u_src + u_dst + 1);
989 set_cpu_mb_job_params(IMB_JOB *job, struct aesni_mb_session *session,
990 union rte_crypto_sym_ofs sofs, void *buf, uint32_t len,
991 struct rte_crypto_va_iova_ptr *iv,
992 struct rte_crypto_va_iova_ptr *aad, void *digest, void *udata)
994 /* Set crypto operation */
995 job->chain_order = session->chain_order;
997 /* Set cipher parameters */
998 job->cipher_direction = session->cipher.direction;
999 job->cipher_mode = session->cipher.mode;
1001 job->key_len_in_bytes = session->cipher.key_length_in_bytes;
1003 /* Set authentication parameters */
1004 job->hash_alg = session->auth.algo;
1007 switch (job->hash_alg) {
1008 case IMB_AUTH_AES_XCBC:
1009 job->u.XCBC._k1_expanded = session->auth.xcbc.k1_expanded;
1010 job->u.XCBC._k2 = session->auth.xcbc.k2;
1011 job->u.XCBC._k3 = session->auth.xcbc.k3;
1013 job->enc_keys = session->cipher.expanded_aes_keys.encode;
1014 job->dec_keys = session->cipher.expanded_aes_keys.decode;
1017 case IMB_AUTH_AES_CCM:
1018 job->u.CCM.aad = (uint8_t *)aad->va + 18;
1019 job->u.CCM.aad_len_in_bytes = session->aead.aad_len;
1020 job->enc_keys = session->cipher.expanded_aes_keys.encode;
1021 job->dec_keys = session->cipher.expanded_aes_keys.decode;
1025 case IMB_AUTH_AES_CMAC:
1026 job->u.CMAC._key_expanded = session->auth.cmac.expkey;
1027 job->u.CMAC._skey1 = session->auth.cmac.skey1;
1028 job->u.CMAC._skey2 = session->auth.cmac.skey2;
1029 job->enc_keys = session->cipher.expanded_aes_keys.encode;
1030 job->dec_keys = session->cipher.expanded_aes_keys.decode;
1033 case IMB_AUTH_AES_GMAC:
1034 if (session->cipher.mode == IMB_CIPHER_GCM) {
1035 job->u.GCM.aad = aad->va;
1036 job->u.GCM.aad_len_in_bytes = session->aead.aad_len;
1039 job->u.GCM.aad = buf;
1040 job->u.GCM.aad_len_in_bytes = len;
1041 job->cipher_mode = IMB_CIPHER_GCM;
1043 job->enc_keys = &session->cipher.gcm_key;
1044 job->dec_keys = &session->cipher.gcm_key;
1047 case IMB_AUTH_CHACHA20_POLY1305:
1048 job->u.CHACHA20_POLY1305.aad = aad->va;
1049 job->u.CHACHA20_POLY1305.aad_len_in_bytes =
1050 session->aead.aad_len;
1051 job->enc_keys = session->cipher.expanded_aes_keys.encode;
1052 job->dec_keys = session->cipher.expanded_aes_keys.encode;
1055 job->u.HMAC._hashed_auth_key_xor_ipad =
1056 session->auth.pads.inner;
1057 job->u.HMAC._hashed_auth_key_xor_opad =
1058 session->auth.pads.outer;
1060 if (job->cipher_mode == IMB_CIPHER_DES3) {
1061 job->enc_keys = session->cipher.exp_3des_keys.ks_ptr;
1062 job->dec_keys = session->cipher.exp_3des_keys.ks_ptr;
1064 job->enc_keys = session->cipher.expanded_aes_keys.encode;
1065 job->dec_keys = session->cipher.expanded_aes_keys.decode;
1070 * Multi-buffer library current only support returning a truncated
1071 * digest length as specified in the relevant IPsec RFCs
1074 /* Set digest location and length */
1075 job->auth_tag_output = digest;
1076 job->auth_tag_output_len_in_bytes = session->auth.gen_digest_len;
1078 /* Set IV parameters */
1079 job->iv_len_in_bytes = session->iv.length;
1081 /* Data Parameters */
1083 job->dst = (uint8_t *)buf + sofs.ofs.cipher.head;
1084 job->cipher_start_src_offset_in_bytes = sofs.ofs.cipher.head;
1085 job->hash_start_src_offset_in_bytes = sofs.ofs.auth.head;
1086 if (job->hash_alg == IMB_AUTH_AES_GMAC &&
1087 session->cipher.mode != IMB_CIPHER_GCM) {
1088 job->msg_len_to_hash_in_bytes = 0;
1089 job->msg_len_to_cipher_in_bytes = 0;
1091 job->msg_len_to_hash_in_bytes = len - sofs.ofs.auth.head -
1093 job->msg_len_to_cipher_in_bytes = len - sofs.ofs.cipher.head -
1094 sofs.ofs.cipher.tail;
1097 job->user_data = udata;
1101 handle_aead_sgl_job(IMB_JOB *job, IMB_MGR *mb_mgr,
1102 uint32_t *total_len,
1103 struct aesni_mb_op_buf_data *src_data,
1104 struct aesni_mb_op_buf_data *dst_data)
1106 uint32_t data_len, part_len;
1108 if (*total_len == 0) {
1109 job->sgl_state = IMB_SGL_COMPLETE;
1113 if (src_data->m == NULL) {
1114 IPSEC_MB_LOG(ERR, "Invalid source buffer");
1118 job->sgl_state = IMB_SGL_UPDATE;
1120 data_len = src_data->m->data_len - src_data->offset;
1122 job->src = rte_pktmbuf_mtod_offset(src_data->m, uint8_t *,
1125 if (dst_data->m != NULL) {
1126 if (dst_data->m->data_len - dst_data->offset == 0) {
1127 dst_data->m = dst_data->m->next;
1128 if (dst_data->m == NULL) {
1129 IPSEC_MB_LOG(ERR, "Invalid destination buffer");
1132 dst_data->offset = 0;
1134 part_len = RTE_MIN(data_len, (dst_data->m->data_len -
1136 job->dst = rte_pktmbuf_mtod_offset(dst_data->m,
1137 uint8_t *, dst_data->offset);
1138 dst_data->offset += part_len;
1140 part_len = RTE_MIN(data_len, *total_len);
1141 job->dst = rte_pktmbuf_mtod_offset(src_data->m, uint8_t *,
1145 job->msg_len_to_cipher_in_bytes = part_len;
1146 job->msg_len_to_hash_in_bytes = part_len;
1148 job = IMB_SUBMIT_JOB(mb_mgr);
1150 *total_len -= part_len;
1152 if (part_len != data_len) {
1153 src_data->offset += part_len;
1155 src_data->m = src_data->m->next;
1156 src_data->offset = 0;
1164 * Process a crypto operation and complete a IMB_JOB job structure for
1165 * submission to the multi buffer library for processing.
1167 * @param qp queue pair
1168 * @param job IMB_JOB structure to fill
1169 * @param op crypto op to process
1170 * @param digest_idx ID for digest to use
1173 * - 0 on success, the IMB_JOB will be filled
1174 * - -1 if invalid session, IMB_JOB will not be filled
1177 set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp,
1178 struct rte_crypto_op *op, uint8_t *digest_idx,
1181 struct rte_mbuf *m_src = op->sym->m_src, *m_dst;
1182 struct aesni_mb_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
1183 struct aesni_mb_op_buf_data src_sgl = {0};
1184 struct aesni_mb_op_buf_data dst_sgl = {0};
1185 struct aesni_mb_session *session;
1186 uint32_t m_offset, oop;
1187 uint32_t auth_off_in_bytes;
1188 uint32_t ciph_off_in_bytes;
1189 uint32_t auth_len_in_bytes;
1190 uint32_t ciph_len_in_bytes;
1196 session = ipsec_mb_get_session_private(qp, op);
1197 if (session == NULL) {
1198 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
1202 if (op->sym->m_src->nb_segs > 1) {
1203 if (session->cipher.mode != IMB_CIPHER_GCM
1204 && session->cipher.mode !=
1205 IMB_CIPHER_CHACHA20_POLY1305) {
1206 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
1207 IPSEC_MB_LOG(ERR, "Device only supports SGL for AES-GCM"
1208 " or CHACHA20_POLY1305 algorithms.");
1214 /* Set crypto operation */
1215 job->chain_order = session->chain_order;
1217 /* Set cipher parameters */
1218 job->cipher_direction = session->cipher.direction;
1219 job->cipher_mode = session->cipher.mode;
1221 job->key_len_in_bytes = session->cipher.key_length_in_bytes;
1223 /* Set authentication parameters */
1224 job->hash_alg = session->auth.algo;
1226 const int aead = is_aead_algo(job->hash_alg, job->cipher_mode);
1228 if (job->cipher_mode == IMB_CIPHER_DES3) {
1229 job->enc_keys = session->cipher.exp_3des_keys.ks_ptr;
1230 job->dec_keys = session->cipher.exp_3des_keys.ks_ptr;
1232 job->enc_keys = session->cipher.expanded_aes_keys.encode;
1233 job->dec_keys = session->cipher.expanded_aes_keys.decode;
1236 switch (job->hash_alg) {
1237 case IMB_AUTH_AES_XCBC:
1238 job->u.XCBC._k1_expanded = session->auth.xcbc.k1_expanded;
1239 job->u.XCBC._k2 = session->auth.xcbc.k2;
1240 job->u.XCBC._k3 = session->auth.xcbc.k3;
1242 job->enc_keys = session->cipher.expanded_aes_keys.encode;
1243 job->dec_keys = session->cipher.expanded_aes_keys.decode;
1246 case IMB_AUTH_AES_CCM:
1247 job->u.CCM.aad = op->sym->aead.aad.data + 18;
1248 job->u.CCM.aad_len_in_bytes = session->aead.aad_len;
1249 job->enc_keys = session->cipher.expanded_aes_keys.encode;
1250 job->dec_keys = session->cipher.expanded_aes_keys.decode;
1253 case IMB_AUTH_AES_CMAC:
1254 job->u.CMAC._key_expanded = session->auth.cmac.expkey;
1255 job->u.CMAC._skey1 = session->auth.cmac.skey1;
1256 job->u.CMAC._skey2 = session->auth.cmac.skey2;
1257 job->enc_keys = session->cipher.expanded_aes_keys.encode;
1258 job->dec_keys = session->cipher.expanded_aes_keys.decode;
1261 case IMB_AUTH_AES_GMAC:
1262 if (session->cipher.mode == IMB_CIPHER_GCM) {
1263 job->u.GCM.aad = op->sym->aead.aad.data;
1264 job->u.GCM.aad_len_in_bytes = session->aead.aad_len;
1266 job->u.GCM.ctx = &qp_data->gcm_sgl_ctx;
1267 job->cipher_mode = IMB_CIPHER_GCM_SGL;
1268 job->hash_alg = IMB_AUTH_GCM_SGL;
1272 job->u.GCM.aad = rte_pktmbuf_mtod_offset(m_src,
1273 uint8_t *, op->sym->auth.data.offset);
1274 job->u.GCM.aad_len_in_bytes = op->sym->auth.data.length;
1275 job->cipher_mode = IMB_CIPHER_GCM;
1277 job->enc_keys = &session->cipher.gcm_key;
1278 job->dec_keys = &session->cipher.gcm_key;
1280 case IMB_AUTH_ZUC_EIA3_BITLEN:
1281 case IMB_AUTH_ZUC256_EIA3_BITLEN:
1282 job->u.ZUC_EIA3._key = session->auth.zuc_auth_key;
1283 job->u.ZUC_EIA3._iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1284 session->auth_iv.offset);
1286 case IMB_AUTH_SNOW3G_UIA2_BITLEN:
1287 job->u.SNOW3G_UIA2._key = (void *)
1288 &session->auth.pKeySched_snow3g_auth;
1289 job->u.SNOW3G_UIA2._iv =
1290 rte_crypto_op_ctod_offset(op, uint8_t *,
1291 session->auth_iv.offset);
1293 case IMB_AUTH_KASUMI_UIA1:
1294 job->u.KASUMI_UIA1._key = (void *)
1295 &session->auth.pKeySched_kasumi_auth;
1297 case IMB_AUTH_CHACHA20_POLY1305:
1298 job->u.CHACHA20_POLY1305.aad = op->sym->aead.aad.data;
1299 job->u.CHACHA20_POLY1305.aad_len_in_bytes =
1300 session->aead.aad_len;
1302 job->u.CHACHA20_POLY1305.ctx = &qp_data->chacha_sgl_ctx;
1303 job->cipher_mode = IMB_CIPHER_CHACHA20_POLY1305_SGL;
1304 job->hash_alg = IMB_AUTH_CHACHA20_POLY1305_SGL;
1306 job->enc_keys = session->cipher.expanded_aes_keys.encode;
1307 job->dec_keys = session->cipher.expanded_aes_keys.encode;
1310 job->u.HMAC._hashed_auth_key_xor_ipad =
1311 session->auth.pads.inner;
1312 job->u.HMAC._hashed_auth_key_xor_opad =
1313 session->auth.pads.outer;
1318 m_offset = op->sym->aead.data.offset;
1320 m_offset = op->sym->cipher.data.offset;
1322 if (job->cipher_mode == IMB_CIPHER_ZUC_EEA3) {
1323 job->enc_keys = session->cipher.zuc_cipher_key;
1324 job->dec_keys = session->cipher.zuc_cipher_key;
1326 } else if (job->cipher_mode == IMB_CIPHER_SNOW3G_UEA2_BITLEN) {
1327 job->enc_keys = &session->cipher.pKeySched_snow3g_cipher;
1329 } else if (job->cipher_mode == IMB_CIPHER_KASUMI_UEA1_BITLEN) {
1330 job->enc_keys = &session->cipher.pKeySched_kasumi_cipher;
1334 if (!op->sym->m_dst) {
1335 /* in-place operation */
1338 } else if (op->sym->m_dst == op->sym->m_src) {
1339 /* in-place operation */
1343 /* out-of-place operation */
1344 m_dst = op->sym->m_dst;
1348 /* Set digest output location */
1349 if (job->hash_alg != IMB_AUTH_NULL &&
1350 session->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
1351 job->auth_tag_output = qp_data->temp_digests[*digest_idx];
1352 *digest_idx = (*digest_idx + 1) % IMB_MAX_JOBS;
1355 job->auth_tag_output = op->sym->aead.digest.data;
1357 job->auth_tag_output = op->sym->auth.digest.data;
1359 if (session->auth.req_digest_len !=
1360 session->auth.gen_digest_len) {
1361 job->auth_tag_output =
1362 qp_data->temp_digests[*digest_idx];
1363 *digest_idx = (*digest_idx + 1) % IMB_MAX_JOBS;
1367 * Multi-buffer library current only support returning a truncated
1368 * digest length as specified in the relevant IPsec RFCs
1371 /* Set digest length */
1372 job->auth_tag_output_len_in_bytes = session->auth.gen_digest_len;
1374 /* Set IV parameters */
1375 job->iv_len_in_bytes = session->iv.length;
1377 /* Data Parameters */
1382 job->src = rte_pktmbuf_mtod(m_src, uint8_t *);
1383 job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, m_offset);
1386 switch (job->hash_alg) {
1387 case IMB_AUTH_AES_CCM:
1388 job->hash_start_src_offset_in_bytes = op->sym->aead.data.offset;
1389 job->msg_len_to_hash_in_bytes = op->sym->aead.data.length;
1391 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1392 session->iv.offset + 1);
1395 case IMB_AUTH_AES_GMAC:
1396 if (session->cipher.mode == IMB_CIPHER_GCM) {
1397 job->hash_start_src_offset_in_bytes =
1398 op->sym->aead.data.offset;
1399 job->msg_len_to_hash_in_bytes =
1400 op->sym->aead.data.length;
1401 } else { /* AES-GMAC only, only AAD used */
1402 job->msg_len_to_hash_in_bytes = 0;
1403 job->hash_start_src_offset_in_bytes = 0;
1406 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1407 session->iv.offset);
1410 case IMB_AUTH_GCM_SGL:
1411 case IMB_AUTH_CHACHA20_POLY1305_SGL:
1412 job->hash_start_src_offset_in_bytes = 0;
1413 job->msg_len_to_hash_in_bytes = 0;
1414 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1415 session->iv.offset);
1418 case IMB_AUTH_CHACHA20_POLY1305:
1419 job->hash_start_src_offset_in_bytes =
1420 op->sym->aead.data.offset;
1421 job->msg_len_to_hash_in_bytes =
1422 op->sym->aead.data.length;
1423 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1424 session->iv.offset);
1426 /* ZUC and SNOW3G require length in bits and offset in bytes */
1427 case IMB_AUTH_ZUC_EIA3_BITLEN:
1428 case IMB_AUTH_ZUC256_EIA3_BITLEN:
1429 case IMB_AUTH_SNOW3G_UIA2_BITLEN:
1430 auth_off_in_bytes = op->sym->auth.data.offset >> 3;
1431 ciph_off_in_bytes = op->sym->cipher.data.offset >> 3;
1432 auth_len_in_bytes = op->sym->auth.data.length >> 3;
1433 ciph_len_in_bytes = op->sym->cipher.data.length >> 3;
1435 job->hash_start_src_offset_in_bytes = auth_start_offset(op,
1436 session, oop, auth_off_in_bytes,
1437 ciph_off_in_bytes, auth_len_in_bytes,
1439 job->msg_len_to_hash_in_bits = op->sym->auth.data.length;
1441 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1442 session->iv.offset);
1445 /* KASUMI requires lengths and offset in bytes */
1446 case IMB_AUTH_KASUMI_UIA1:
1447 auth_off_in_bytes = op->sym->auth.data.offset >> 3;
1448 ciph_off_in_bytes = op->sym->cipher.data.offset >> 3;
1449 auth_len_in_bytes = op->sym->auth.data.length >> 3;
1450 ciph_len_in_bytes = op->sym->cipher.data.length >> 3;
1452 job->hash_start_src_offset_in_bytes = auth_start_offset(op,
1453 session, oop, auth_off_in_bytes,
1454 ciph_off_in_bytes, auth_len_in_bytes,
1456 job->msg_len_to_hash_in_bytes = auth_len_in_bytes;
1458 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1459 session->iv.offset);
1463 job->hash_start_src_offset_in_bytes = auth_start_offset(op,
1464 session, oop, op->sym->auth.data.offset,
1465 op->sym->cipher.data.offset,
1466 op->sym->auth.data.length,
1467 op->sym->cipher.data.length);
1468 job->msg_len_to_hash_in_bytes = op->sym->auth.data.length;
1470 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1471 session->iv.offset);
1474 switch (job->cipher_mode) {
1475 /* ZUC requires length and offset in bytes */
1476 case IMB_CIPHER_ZUC_EEA3:
1477 job->cipher_start_src_offset_in_bytes =
1478 op->sym->cipher.data.offset >> 3;
1479 job->msg_len_to_cipher_in_bytes =
1480 op->sym->cipher.data.length >> 3;
1482 /* ZUC and SNOW3G require length and offset in bits */
1483 case IMB_CIPHER_SNOW3G_UEA2_BITLEN:
1484 case IMB_CIPHER_KASUMI_UEA1_BITLEN:
1485 job->cipher_start_src_offset_in_bits =
1486 op->sym->cipher.data.offset;
1487 job->msg_len_to_cipher_in_bits =
1488 op->sym->cipher.data.length;
1490 case IMB_CIPHER_GCM:
1491 if (session->cipher.mode == IMB_CIPHER_NULL) {
1492 /* AES-GMAC only (only AAD used) */
1493 job->msg_len_to_cipher_in_bytes = 0;
1494 job->cipher_start_src_offset_in_bytes = 0;
1496 job->cipher_start_src_offset_in_bytes =
1497 op->sym->aead.data.offset;
1498 job->msg_len_to_cipher_in_bytes = op->sym->aead.data.length;
1501 case IMB_CIPHER_CCM:
1502 case IMB_CIPHER_CHACHA20_POLY1305:
1503 job->cipher_start_src_offset_in_bytes =
1504 op->sym->aead.data.offset;
1505 job->msg_len_to_cipher_in_bytes = op->sym->aead.data.length;
1507 case IMB_CIPHER_GCM_SGL:
1508 case IMB_CIPHER_CHACHA20_POLY1305_SGL:
1509 job->msg_len_to_cipher_in_bytes = 0;
1510 job->cipher_start_src_offset_in_bytes = 0;
1513 job->cipher_start_src_offset_in_bytes =
1514 op->sym->cipher.data.offset;
1515 job->msg_len_to_cipher_in_bytes = op->sym->cipher.data.length;
1518 if (job->cipher_mode == IMB_CIPHER_NULL && oop) {
1519 memcpy(job->dst + job->cipher_start_src_offset_in_bytes,
1520 job->src + job->cipher_start_src_offset_in_bytes,
1521 job->msg_len_to_cipher_in_bytes);
1524 /* Set user data to be crypto operation data struct */
1525 job->user_data = op;
1529 job->sgl_state = IMB_SGL_INIT;
1530 job = IMB_SUBMIT_JOB(mb_mgr);
1531 total_len = op->sym->aead.data.length;
1534 src_sgl.offset = m_offset;
1536 while (src_sgl.offset >= src_sgl.m->data_len) {
1537 src_sgl.offset -= src_sgl.m->data_len;
1538 src_sgl.m = src_sgl.m->next;
1540 RTE_ASSERT(src_sgl.m != NULL);
1545 dst_sgl.offset = m_offset;
1547 while (dst_sgl.offset >= dst_sgl.m->data_len) {
1548 dst_sgl.offset -= dst_sgl.m->data_len;
1549 dst_sgl.m = dst_sgl.m->next;
1551 RTE_ASSERT(dst_sgl.m != NULL);
1555 while (job->sgl_state != IMB_SGL_COMPLETE) {
1556 job = IMB_GET_NEXT_JOB(mb_mgr);
1558 ret = handle_aead_sgl_job(job, mb_mgr, &total_len,
1559 &src_sgl, &dst_sgl);
1568 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
1570 * Process a crypto operation containing a security op and complete a
1571 * IMB_JOB job structure for submission to the multi buffer library for
1575 set_sec_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp,
1576 struct rte_crypto_op *op, uint8_t *digest_idx)
1578 struct aesni_mb_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
1579 struct rte_mbuf *m_src, *m_dst;
1580 struct rte_crypto_sym_op *sym;
1581 struct aesni_mb_session *session = NULL;
1583 if (unlikely(op->sess_type != RTE_CRYPTO_OP_SECURITY_SESSION)) {
1584 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
1587 session = (struct aesni_mb_session *)
1588 get_sec_session_private_data(op->sym->sec_session);
1590 if (unlikely(session == NULL)) {
1591 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
1594 /* Only DOCSIS protocol operations supported now */
1595 if (session->cipher.mode != IMB_CIPHER_DOCSIS_SEC_BPI ||
1596 session->auth.algo != IMB_AUTH_DOCSIS_CRC32) {
1597 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
1604 if (likely(sym->m_dst == NULL || sym->m_dst == m_src)) {
1605 /* in-place operation */
1608 /* out-of-place operation not supported */
1609 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
1613 /* Set crypto operation */
1614 job->chain_order = session->chain_order;
1616 /* Set cipher parameters */
1617 job->cipher_direction = session->cipher.direction;
1618 job->cipher_mode = session->cipher.mode;
1620 job->key_len_in_bytes = session->cipher.key_length_in_bytes;
1621 job->enc_keys = session->cipher.expanded_aes_keys.encode;
1622 job->dec_keys = session->cipher.expanded_aes_keys.decode;
1624 /* Set IV parameters */
1625 job->iv_len_in_bytes = session->iv.length;
1626 job->iv = (uint8_t *)op + session->iv.offset;
1628 /* Set authentication parameters */
1629 job->hash_alg = session->auth.algo;
1631 /* Set digest output location */
1632 job->auth_tag_output = qp_data->temp_digests[*digest_idx];
1633 *digest_idx = (*digest_idx + 1) % IMB_MAX_JOBS;
1635 /* Set digest length */
1636 job->auth_tag_output_len_in_bytes = session->auth.gen_digest_len;
1638 /* Set data parameters */
1639 job->src = rte_pktmbuf_mtod(m_src, uint8_t *);
1640 job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *,
1641 sym->cipher.data.offset);
1643 job->cipher_start_src_offset_in_bytes = sym->cipher.data.offset;
1644 job->msg_len_to_cipher_in_bytes = sym->cipher.data.length;
1646 job->hash_start_src_offset_in_bytes = sym->auth.data.offset;
1647 job->msg_len_to_hash_in_bytes = sym->auth.data.length;
1649 job->user_data = op;
1655 verify_docsis_sec_crc(IMB_JOB *job, uint8_t *status)
1657 uint16_t crc_offset;
1660 if (!job->msg_len_to_hash_in_bytes)
1663 crc_offset = job->hash_start_src_offset_in_bytes +
1664 job->msg_len_to_hash_in_bytes -
1665 job->cipher_start_src_offset_in_bytes;
1666 crc = job->dst + crc_offset;
1668 /* Verify CRC (at the end of the message) */
1669 if (memcmp(job->auth_tag_output, crc, RTE_ETHER_CRC_LEN) != 0)
1670 *status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
1675 verify_digest(IMB_JOB *job, void *digest, uint16_t len, uint8_t *status)
1677 /* Verify digest if required */
1678 if (memcmp(job->auth_tag_output, digest, len) != 0)
1679 *status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
1683 generate_digest(IMB_JOB *job, struct rte_crypto_op *op,
1684 struct aesni_mb_session *sess)
1686 /* No extra copy needed */
1687 if (likely(sess->auth.req_digest_len == sess->auth.gen_digest_len))
1691 * This can only happen for HMAC, so only digest
1692 * for authentication algos is required
1694 memcpy(op->sym->auth.digest.data, job->auth_tag_output,
1695 sess->auth.req_digest_len);
1699 * Process a completed job and return rte_mbuf which job processed
1701 * @param qp Queue Pair to process
1702 * @param job IMB_JOB job to process
1705 * - Returns processed crypto operation.
1706 * - Returns NULL on invalid job
1708 static inline struct rte_crypto_op *
1709 post_process_mb_job(struct ipsec_mb_qp *qp, IMB_JOB *job)
1711 struct rte_crypto_op *op = (struct rte_crypto_op *)job->user_data;
1712 struct aesni_mb_session *sess = NULL;
1713 uint32_t driver_id = ipsec_mb_get_driver_id(
1714 IPSEC_MB_PMD_TYPE_AESNI_MB);
1716 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
1717 uint8_t is_docsis_sec = 0;
1719 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
1721 * Assuming at this point that if it's a security type op, that
1722 * this is for DOCSIS
1725 sess = get_sec_session_private_data(op->sym->sec_session);
1729 sess = get_sym_session_private_data(op->sym->session,
1733 if (unlikely(sess == NULL)) {
1734 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
1738 if (likely(op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)) {
1739 switch (job->status) {
1740 case IMB_STATUS_COMPLETED:
1741 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1743 if (job->hash_alg == IMB_AUTH_NULL)
1746 if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
1747 if (is_aead_algo(job->hash_alg,
1750 op->sym->aead.digest.data,
1751 sess->auth.req_digest_len,
1753 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
1754 else if (is_docsis_sec)
1755 verify_docsis_sec_crc(job,
1760 op->sym->auth.digest.data,
1761 sess->auth.req_digest_len,
1764 generate_digest(job, op, sess);
1767 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
1771 /* Free session if a session-less crypto op */
1772 if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
1773 memset(sess, 0, sizeof(struct aesni_mb_session));
1774 memset(op->sym->session, 0,
1775 rte_cryptodev_sym_get_existing_header_session_size(
1777 rte_mempool_put(qp->sess_mp_priv, sess);
1778 rte_mempool_put(qp->sess_mp, op->sym->session);
1779 op->sym->session = NULL;
1786 post_process_mb_sync_job(IMB_JOB *job)
1790 st = job->user_data;
1791 st[0] = (job->status == IMB_STATUS_COMPLETED) ? 0 : EBADMSG;
1795 * Process a completed IMB_JOB job and keep processing jobs until
1796 * get_completed_job return NULL
1798 * @param qp Queue Pair to process
1799 * @param mb_mgr IMB_MGR to use
1800 * @param job IMB_JOB job
1801 * @param ops crypto ops to fill
1802 * @param nb_ops number of crypto ops
1805 * - Number of processed jobs
1808 handle_completed_jobs(struct ipsec_mb_qp *qp, IMB_MGR *mb_mgr,
1809 IMB_JOB *job, struct rte_crypto_op **ops,
1812 struct rte_crypto_op *op = NULL;
1813 uint16_t processed_jobs = 0;
1815 while (job != NULL) {
1816 op = post_process_mb_job(qp, job);
1819 ops[processed_jobs++] = op;
1820 qp->stats.dequeued_count++;
1822 qp->stats.dequeue_err_count++;
1825 if (processed_jobs == nb_ops)
1828 job = IMB_GET_COMPLETED_JOB(mb_mgr);
1831 return processed_jobs;
1834 static inline uint32_t
1835 handle_completed_sync_jobs(IMB_JOB *job, IMB_MGR *mb_mgr)
1839 for (i = 0; job != NULL; i++, job = IMB_GET_COMPLETED_JOB(mb_mgr))
1840 post_process_mb_sync_job(job);
1845 static inline uint32_t
1846 flush_mb_sync_mgr(IMB_MGR *mb_mgr)
1850 job = IMB_FLUSH_JOB(mb_mgr);
1851 return handle_completed_sync_jobs(job, mb_mgr);
1854 static inline uint16_t
1855 flush_mb_mgr(struct ipsec_mb_qp *qp, IMB_MGR *mb_mgr,
1856 struct rte_crypto_op **ops, uint16_t nb_ops)
1858 int processed_ops = 0;
1860 /* Flush the remaining jobs */
1861 IMB_JOB *job = IMB_FLUSH_JOB(mb_mgr);
1864 processed_ops += handle_completed_jobs(qp, mb_mgr, job,
1865 &ops[processed_ops], nb_ops - processed_ops);
1867 return processed_ops;
1870 static inline IMB_JOB *
1871 set_job_null_op(IMB_JOB *job, struct rte_crypto_op *op)
1873 job->chain_order = IMB_ORDER_HASH_CIPHER;
1874 job->cipher_mode = IMB_CIPHER_NULL;
1875 job->hash_alg = IMB_AUTH_NULL;
1876 job->cipher_direction = IMB_DIR_DECRYPT;
1878 /* Set user data to be crypto operation data struct */
1879 job->user_data = op;
1885 aesni_mb_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
1888 struct ipsec_mb_qp *qp = queue_pair;
1889 IMB_MGR *mb_mgr = qp->mb_mgr;
1890 struct rte_crypto_op *op;
1892 int retval, processed_jobs = 0;
1894 if (unlikely(nb_ops == 0 || mb_mgr == NULL))
1897 uint8_t digest_idx = qp->digest_idx;
1900 /* Get next free mb job struct from mb manager */
1901 job = IMB_GET_NEXT_JOB(mb_mgr);
1902 if (unlikely(job == NULL)) {
1903 /* if no free mb job structs we need to flush mb_mgr */
1904 processed_jobs += flush_mb_mgr(qp, mb_mgr,
1905 &ops[processed_jobs],
1906 nb_ops - processed_jobs);
1908 if (nb_ops == processed_jobs)
1911 job = IMB_GET_NEXT_JOB(mb_mgr);
1915 * Get next operation to process from ingress queue.
1916 * There is no need to return the job to the IMB_MGR
1917 * if there are no more operations to process, since the IMB_MGR
1918 * can use that pointer again in next get_next calls.
1920 retval = rte_ring_dequeue(qp->ingress_queue, (void **)&op);
1924 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
1925 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
1926 retval = set_sec_mb_job_params(job, qp, op,
1930 retval = set_mb_job_params(job, qp, op,
1931 &digest_idx, mb_mgr);
1933 if (unlikely(retval != 0)) {
1934 qp->stats.dequeue_err_count++;
1935 set_job_null_op(job, op);
1938 /* Submit job to multi-buffer for processing */
1939 #ifdef RTE_LIBRTE_PMD_AESNI_MB_DEBUG
1940 job = IMB_SUBMIT_JOB(mb_mgr);
1942 job = IMB_SUBMIT_JOB_NOCHECK(mb_mgr);
1945 * If submit returns a processed job then handle it,
1946 * before submitting subsequent jobs
1949 processed_jobs += handle_completed_jobs(qp, mb_mgr,
1950 job, &ops[processed_jobs],
1951 nb_ops - processed_jobs);
1953 } while (processed_jobs < nb_ops);
1955 qp->digest_idx = digest_idx;
1957 if (processed_jobs < 1)
1958 processed_jobs += flush_mb_mgr(qp, mb_mgr,
1959 &ops[processed_jobs],
1960 nb_ops - processed_jobs);
1962 return processed_jobs;
1967 ipsec_mb_fill_error_code(struct rte_crypto_sym_vec *vec, int32_t err)
1971 for (i = 0; i != vec->num; ++i)
1972 vec->status[i] = err;
1976 check_crypto_sgl(union rte_crypto_sym_ofs so, const struct rte_crypto_sgl *sgl)
1978 /* no multi-seg support with current AESNI-MB PMD */
1981 else if (so.ofs.cipher.head + so.ofs.cipher.tail > sgl->vec[0].len)
1986 static inline IMB_JOB *
1987 submit_sync_job(IMB_MGR *mb_mgr)
1989 #ifdef RTE_LIBRTE_PMD_AESNI_MB_DEBUG
1990 return IMB_SUBMIT_JOB(mb_mgr);
1992 return IMB_SUBMIT_JOB_NOCHECK(mb_mgr);
1996 static inline uint32_t
1997 generate_sync_dgst(struct rte_crypto_sym_vec *vec,
1998 const uint8_t dgst[][DIGEST_LENGTH_MAX], uint32_t len)
2002 for (i = 0, k = 0; i != vec->num; i++) {
2003 if (vec->status[i] == 0) {
2004 memcpy(vec->digest[i].va, dgst[i], len);
2012 static inline uint32_t
2013 verify_sync_dgst(struct rte_crypto_sym_vec *vec,
2014 const uint8_t dgst[][DIGEST_LENGTH_MAX], uint32_t len)
2018 for (i = 0, k = 0; i != vec->num; i++) {
2019 if (vec->status[i] == 0) {
2020 if (memcmp(vec->digest[i].va, dgst[i], len) != 0)
2021 vec->status[i] = EBADMSG;
2031 aesni_mb_process_bulk(struct rte_cryptodev *dev,
2032 struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs sofs,
2033 struct rte_crypto_sym_vec *vec)
2036 uint32_t i, j, k, len;
2040 struct aesni_mb_session *s;
2041 uint8_t tmp_dgst[vec->num][DIGEST_LENGTH_MAX];
2043 s = get_sym_session_private_data(sess, dev->driver_id);
2045 ipsec_mb_fill_error_code(vec, EINVAL);
2049 /* get per-thread MB MGR, create one if needed */
2050 mb_mgr = get_per_thread_mb_mgr();
2051 if (unlikely(mb_mgr == NULL))
2054 for (i = 0, j = 0, k = 0; i != vec->num; i++) {
2055 ret = check_crypto_sgl(sofs, vec->src_sgl + i);
2057 vec->status[i] = ret;
2061 buf = vec->src_sgl[i].vec[0].base;
2062 len = vec->src_sgl[i].vec[0].len;
2064 job = IMB_GET_NEXT_JOB(mb_mgr);
2066 k += flush_mb_sync_mgr(mb_mgr);
2067 job = IMB_GET_NEXT_JOB(mb_mgr);
2068 RTE_ASSERT(job != NULL);
2071 /* Submit job for processing */
2072 set_cpu_mb_job_params(job, s, sofs, buf, len, &vec->iv[i],
2073 &vec->aad[i], tmp_dgst[i], &vec->status[i]);
2074 job = submit_sync_job(mb_mgr);
2077 /* handle completed jobs */
2078 k += handle_completed_sync_jobs(job, mb_mgr);
2081 /* flush remaining jobs */
2083 k += flush_mb_sync_mgr(mb_mgr);
2085 /* finish processing for successful jobs: check/update digest */
2087 if (s->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY)
2088 k = verify_sync_dgst(vec,
2089 (const uint8_t (*)[DIGEST_LENGTH_MAX])tmp_dgst,
2090 s->auth.req_digest_len);
2092 k = generate_sync_dgst(vec,
2093 (const uint8_t (*)[DIGEST_LENGTH_MAX])tmp_dgst,
2094 s->auth.req_digest_len);
2100 struct rte_cryptodev_ops aesni_mb_pmd_ops = {
2101 .dev_configure = ipsec_mb_config,
2102 .dev_start = ipsec_mb_start,
2103 .dev_stop = ipsec_mb_stop,
2104 .dev_close = ipsec_mb_close,
2106 .stats_get = ipsec_mb_stats_get,
2107 .stats_reset = ipsec_mb_stats_reset,
2109 .dev_infos_get = ipsec_mb_info_get,
2111 .queue_pair_setup = ipsec_mb_qp_setup,
2112 .queue_pair_release = ipsec_mb_qp_release,
2114 .sym_cpu_process = aesni_mb_process_bulk,
2116 .sym_session_get_size = ipsec_mb_sym_session_get_size,
2117 .sym_session_configure = ipsec_mb_sym_session_configure,
2118 .sym_session_clear = ipsec_mb_sym_session_clear
2121 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
2123 * Configure a aesni multi-buffer session from a security session
2127 aesni_mb_pmd_sec_sess_create(void *dev, struct rte_security_session_conf *conf,
2128 struct rte_security_session *sess,
2129 struct rte_mempool *mempool)
2131 void *sess_private_data;
2132 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2135 if (conf->action_type != RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL ||
2136 conf->protocol != RTE_SECURITY_PROTOCOL_DOCSIS) {
2137 IPSEC_MB_LOG(ERR, "Invalid security protocol");
2141 if (rte_mempool_get(mempool, &sess_private_data)) {
2142 IPSEC_MB_LOG(ERR, "Couldn't get object from session mempool");
2146 ret = aesni_mb_set_docsis_sec_session_parameters(cdev, conf,
2150 IPSEC_MB_LOG(ERR, "Failed to configure session parameters");
2152 /* Return session to mempool */
2153 rte_mempool_put(mempool, sess_private_data);
2157 set_sec_session_private_data(sess, sess_private_data);
2162 /** Clear the memory of session so it does not leave key material behind */
2164 aesni_mb_pmd_sec_sess_destroy(void *dev __rte_unused,
2165 struct rte_security_session *sess)
2167 void *sess_priv = get_sec_session_private_data(sess);
2170 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2172 memset(sess_priv, 0, sizeof(struct aesni_mb_session));
2173 set_sec_session_private_data(sess, NULL);
2174 rte_mempool_put(sess_mp, sess_priv);
2179 /** Get security capabilities for aesni multi-buffer */
2180 static const struct rte_security_capability *
2181 aesni_mb_pmd_sec_capa_get(void *device __rte_unused)
2183 return aesni_mb_pmd_security_cap;
2186 static struct rte_security_ops aesni_mb_pmd_sec_ops = {
2187 .session_create = aesni_mb_pmd_sec_sess_create,
2188 .session_update = NULL,
2189 .session_stats_get = NULL,
2190 .session_destroy = aesni_mb_pmd_sec_sess_destroy,
2191 .set_pkt_metadata = NULL,
2192 .capabilities_get = aesni_mb_pmd_sec_capa_get
2195 struct rte_security_ops *rte_aesni_mb_pmd_sec_ops = &aesni_mb_pmd_sec_ops;
2198 aesni_mb_configure_dev(struct rte_cryptodev *dev)
2200 struct rte_security_ctx *security_instance;
2202 security_instance = rte_malloc("aesni_mb_sec",
2203 sizeof(struct rte_security_ctx),
2204 RTE_CACHE_LINE_SIZE);
2205 if (security_instance != NULL) {
2206 security_instance->device = (void *)dev;
2207 security_instance->ops = rte_aesni_mb_pmd_sec_ops;
2208 security_instance->sess_cnt = 0;
2209 dev->security_ctx = security_instance;
2220 aesni_mb_probe(struct rte_vdev_device *vdev)
2222 return ipsec_mb_create(vdev, IPSEC_MB_PMD_TYPE_AESNI_MB);
2225 static struct rte_vdev_driver cryptodev_aesni_mb_pmd_drv = {
2226 .probe = aesni_mb_probe,
2227 .remove = ipsec_mb_remove
2230 static struct cryptodev_driver aesni_mb_crypto_drv;
2232 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_MB_PMD,
2233 cryptodev_aesni_mb_pmd_drv);
2234 RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd);
2235 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_MB_PMD,
2236 "max_nb_queue_pairs=<int> socket_id=<int>");
2237 RTE_PMD_REGISTER_CRYPTO_DRIVER(
2238 aesni_mb_crypto_drv,
2239 cryptodev_aesni_mb_pmd_drv.driver,
2240 pmd_driver_id_aesni_mb);
2242 /* Constructor function to register aesni-mb PMD */
2243 RTE_INIT(ipsec_mb_register_aesni_mb)
2245 struct ipsec_mb_internals *aesni_mb_data =
2246 &ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_AESNI_MB];
2248 aesni_mb_data->caps = aesni_mb_capabilities;
2249 aesni_mb_data->dequeue_burst = aesni_mb_dequeue_burst;
2250 aesni_mb_data->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
2251 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
2252 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
2253 RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO |
2254 RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA |
2255 RTE_CRYPTODEV_FF_SYM_SESSIONLESS;
2257 aesni_mb_data->internals_priv_size = 0;
2258 aesni_mb_data->ops = &aesni_mb_pmd_ops;
2259 aesni_mb_data->qp_priv_size = sizeof(struct aesni_mb_qp_data);
2260 aesni_mb_data->queue_pair_configure = NULL;
2261 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
2262 aesni_mb_data->security_ops = &aesni_mb_pmd_sec_ops;
2263 aesni_mb_data->dev_config = aesni_mb_configure_dev;
2264 aesni_mb_data->feature_flags |= RTE_CRYPTODEV_FF_SECURITY;
2266 aesni_mb_data->session_configure = aesni_mb_session_configure;
2267 aesni_mb_data->session_priv_size = sizeof(struct aesni_mb_session);