1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2021 Intel Corporation
5 #include "pmd_aesni_mb_priv.h"
8 * Calculate the authentication pre-computes
10 * @param one_block_hash Function pointer
11 * to calculate digest on ipad/opad
12 * @param ipad Inner pad output byte array
13 * @param opad Outer pad output byte array
14 * @param hkey Authentication key
15 * @param hkey_len Authentication key length
16 * @param blocksize Block size of selected hash algo
19 calculate_auth_precomputes(hash_one_block_t one_block_hash,
20 uint8_t *ipad, uint8_t *opad,
21 const uint8_t *hkey, uint16_t hkey_len,
26 uint8_t ipad_buf[blocksize] __rte_aligned(16);
27 uint8_t opad_buf[blocksize] __rte_aligned(16);
29 /* Setup inner and outer pads */
30 memset(ipad_buf, HMAC_IPAD_VALUE, blocksize);
31 memset(opad_buf, HMAC_OPAD_VALUE, blocksize);
33 /* XOR hash key with inner and outer pads */
34 length = hkey_len > blocksize ? blocksize : hkey_len;
36 for (i = 0; i < length; i++) {
37 ipad_buf[i] ^= hkey[i];
38 opad_buf[i] ^= hkey[i];
41 /* Compute partial hashes */
42 (*one_block_hash)(ipad_buf, ipad);
43 (*one_block_hash)(opad_buf, opad);
46 memset(ipad_buf, 0, blocksize);
47 memset(opad_buf, 0, blocksize);
51 is_aead_algo(IMB_HASH_ALG hash_alg, IMB_CIPHER_MODE cipher_mode)
53 return (hash_alg == IMB_AUTH_CHACHA20_POLY1305 ||
54 hash_alg == IMB_AUTH_AES_CCM ||
55 (hash_alg == IMB_AUTH_AES_GMAC &&
56 cipher_mode == IMB_CIPHER_GCM));
59 /** Set session authentication parameters */
61 aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr,
62 struct aesni_mb_session *sess,
63 const struct rte_crypto_sym_xform *xform)
65 hash_one_block_t hash_oneblock_fn = NULL;
66 unsigned int key_larger_block_size = 0;
67 uint8_t hashed_key[HMAC_MAX_BLOCK_SIZE] = { 0 };
68 uint32_t auth_precompute = 1;
71 sess->auth.algo = IMB_AUTH_NULL;
75 if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
76 IPSEC_MB_LOG(ERR, "Crypto xform struct not of type auth");
80 /* Set IV parameters */
81 sess->auth_iv.offset = xform->auth.iv.offset;
82 sess->auth_iv.length = xform->auth.iv.length;
84 /* Set the request digest size */
85 sess->auth.req_digest_len = xform->auth.digest_length;
87 /* Select auth generate/verify */
88 sess->auth.operation = xform->auth.op;
90 /* Set Authentication Parameters */
91 if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_XCBC_MAC) {
92 sess->auth.algo = IMB_AUTH_AES_XCBC;
94 uint16_t xcbc_mac_digest_len =
95 get_truncated_digest_byte_length(IMB_AUTH_AES_XCBC);
96 if (sess->auth.req_digest_len != xcbc_mac_digest_len) {
97 IPSEC_MB_LOG(ERR, "Invalid digest size\n");
100 sess->auth.gen_digest_len = sess->auth.req_digest_len;
102 IMB_AES_XCBC_KEYEXP(mb_mgr, xform->auth.key.data,
103 sess->auth.xcbc.k1_expanded,
104 sess->auth.xcbc.k2, sess->auth.xcbc.k3);
108 if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_CMAC) {
111 sess->auth.algo = IMB_AUTH_AES_CMAC;
113 uint16_t cmac_digest_len =
114 get_digest_byte_length(IMB_AUTH_AES_CMAC);
116 if (sess->auth.req_digest_len > cmac_digest_len) {
117 IPSEC_MB_LOG(ERR, "Invalid digest size\n");
121 * Multi-buffer lib supports digest sizes from 4 to 16 bytes
122 * in version 0.50 and sizes of 12 and 16 bytes,
124 * If size requested is different, generate the full digest
125 * (16 bytes) in a temporary location and then memcpy
126 * the requested number of bytes.
128 if (sess->auth.req_digest_len < 4)
129 sess->auth.gen_digest_len = cmac_digest_len;
131 sess->auth.gen_digest_len = sess->auth.req_digest_len;
133 IMB_AES_KEYEXP_128(mb_mgr, xform->auth.key.data,
134 sess->auth.cmac.expkey, dust);
135 IMB_AES_CMAC_SUBKEY_GEN_128(mb_mgr, sess->auth.cmac.expkey,
136 sess->auth.cmac.skey1, sess->auth.cmac.skey2);
140 if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
141 if (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) {
142 sess->cipher.direction = IMB_DIR_ENCRYPT;
143 sess->chain_order = IMB_ORDER_CIPHER_HASH;
145 sess->cipher.direction = IMB_DIR_DECRYPT;
147 sess->auth.algo = IMB_AUTH_AES_GMAC;
148 if (sess->auth.req_digest_len >
149 get_digest_byte_length(IMB_AUTH_AES_GMAC)) {
150 IPSEC_MB_LOG(ERR, "Invalid digest size\n");
153 sess->auth.gen_digest_len = sess->auth.req_digest_len;
154 sess->iv.length = xform->auth.iv.length;
155 sess->iv.offset = xform->auth.iv.offset;
157 switch (xform->auth.key.length) {
158 case IMB_KEY_128_BYTES:
159 IMB_AES128_GCM_PRE(mb_mgr, xform->auth.key.data,
160 &sess->cipher.gcm_key);
161 sess->cipher.key_length_in_bytes = IMB_KEY_128_BYTES;
163 case IMB_KEY_192_BYTES:
164 IMB_AES192_GCM_PRE(mb_mgr, xform->auth.key.data,
165 &sess->cipher.gcm_key);
166 sess->cipher.key_length_in_bytes = IMB_KEY_192_BYTES;
168 case IMB_KEY_256_BYTES:
169 IMB_AES256_GCM_PRE(mb_mgr, xform->auth.key.data,
170 &sess->cipher.gcm_key);
171 sess->cipher.key_length_in_bytes = IMB_KEY_256_BYTES;
174 IPSEC_MB_LOG(ERR, "Invalid authentication key length\n");
181 if (xform->auth.algo == RTE_CRYPTO_AUTH_ZUC_EIA3) {
182 sess->auth.algo = IMB_AUTH_ZUC_EIA3_BITLEN;
183 uint16_t zuc_eia3_digest_len =
184 get_truncated_digest_byte_length(
185 IMB_AUTH_ZUC_EIA3_BITLEN);
186 if (sess->auth.req_digest_len != zuc_eia3_digest_len) {
187 IPSEC_MB_LOG(ERR, "Invalid digest size\n");
190 sess->auth.gen_digest_len = sess->auth.req_digest_len;
192 memcpy(sess->auth.zuc_auth_key, xform->auth.key.data, 16);
194 } else if (xform->auth.algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
195 sess->auth.algo = IMB_AUTH_SNOW3G_UIA2_BITLEN;
196 uint16_t snow3g_uia2_digest_len =
197 get_truncated_digest_byte_length(
198 IMB_AUTH_SNOW3G_UIA2_BITLEN);
199 if (sess->auth.req_digest_len != snow3g_uia2_digest_len) {
200 IPSEC_MB_LOG(ERR, "Invalid digest size\n");
203 sess->auth.gen_digest_len = sess->auth.req_digest_len;
205 IMB_SNOW3G_INIT_KEY_SCHED(mb_mgr, xform->auth.key.data,
206 &sess->auth.pKeySched_snow3g_auth);
208 } else if (xform->auth.algo == RTE_CRYPTO_AUTH_KASUMI_F9) {
209 sess->auth.algo = IMB_AUTH_KASUMI_UIA1;
210 uint16_t kasumi_f9_digest_len =
211 get_truncated_digest_byte_length(IMB_AUTH_KASUMI_UIA1);
212 if (sess->auth.req_digest_len != kasumi_f9_digest_len) {
213 IPSEC_MB_LOG(ERR, "Invalid digest size\n");
216 sess->auth.gen_digest_len = sess->auth.req_digest_len;
218 IMB_KASUMI_INIT_F9_KEY_SCHED(mb_mgr, xform->auth.key.data,
219 &sess->auth.pKeySched_kasumi_auth);
223 switch (xform->auth.algo) {
224 case RTE_CRYPTO_AUTH_MD5_HMAC:
225 sess->auth.algo = IMB_AUTH_MD5;
226 hash_oneblock_fn = mb_mgr->md5_one_block;
228 case RTE_CRYPTO_AUTH_SHA1_HMAC:
229 sess->auth.algo = IMB_AUTH_HMAC_SHA_1;
230 hash_oneblock_fn = mb_mgr->sha1_one_block;
231 if (xform->auth.key.length > get_auth_algo_blocksize(
232 IMB_AUTH_HMAC_SHA_1)) {
234 xform->auth.key.data,
235 xform->auth.key.length,
237 key_larger_block_size = 1;
240 case RTE_CRYPTO_AUTH_SHA1:
241 sess->auth.algo = IMB_AUTH_SHA_1;
244 case RTE_CRYPTO_AUTH_SHA224_HMAC:
245 sess->auth.algo = IMB_AUTH_HMAC_SHA_224;
246 hash_oneblock_fn = mb_mgr->sha224_one_block;
247 if (xform->auth.key.length > get_auth_algo_blocksize(
248 IMB_AUTH_HMAC_SHA_224)) {
250 xform->auth.key.data,
251 xform->auth.key.length,
253 key_larger_block_size = 1;
256 case RTE_CRYPTO_AUTH_SHA224:
257 sess->auth.algo = IMB_AUTH_SHA_224;
260 case RTE_CRYPTO_AUTH_SHA256_HMAC:
261 sess->auth.algo = IMB_AUTH_HMAC_SHA_256;
262 hash_oneblock_fn = mb_mgr->sha256_one_block;
263 if (xform->auth.key.length > get_auth_algo_blocksize(
264 IMB_AUTH_HMAC_SHA_256)) {
266 xform->auth.key.data,
267 xform->auth.key.length,
269 key_larger_block_size = 1;
272 case RTE_CRYPTO_AUTH_SHA256:
273 sess->auth.algo = IMB_AUTH_SHA_256;
276 case RTE_CRYPTO_AUTH_SHA384_HMAC:
277 sess->auth.algo = IMB_AUTH_HMAC_SHA_384;
278 hash_oneblock_fn = mb_mgr->sha384_one_block;
279 if (xform->auth.key.length > get_auth_algo_blocksize(
280 IMB_AUTH_HMAC_SHA_384)) {
282 xform->auth.key.data,
283 xform->auth.key.length,
285 key_larger_block_size = 1;
288 case RTE_CRYPTO_AUTH_SHA384:
289 sess->auth.algo = IMB_AUTH_SHA_384;
292 case RTE_CRYPTO_AUTH_SHA512_HMAC:
293 sess->auth.algo = IMB_AUTH_HMAC_SHA_512;
294 hash_oneblock_fn = mb_mgr->sha512_one_block;
295 if (xform->auth.key.length > get_auth_algo_blocksize(
296 IMB_AUTH_HMAC_SHA_512)) {
298 xform->auth.key.data,
299 xform->auth.key.length,
301 key_larger_block_size = 1;
304 case RTE_CRYPTO_AUTH_SHA512:
305 sess->auth.algo = IMB_AUTH_SHA_512;
310 "Unsupported authentication algorithm selection");
313 uint16_t trunc_digest_size =
314 get_truncated_digest_byte_length(sess->auth.algo);
315 uint16_t full_digest_size =
316 get_digest_byte_length(sess->auth.algo);
318 if (sess->auth.req_digest_len > full_digest_size ||
319 sess->auth.req_digest_len == 0) {
320 IPSEC_MB_LOG(ERR, "Invalid digest size\n");
324 if (sess->auth.req_digest_len != trunc_digest_size &&
325 sess->auth.req_digest_len != full_digest_size)
326 sess->auth.gen_digest_len = full_digest_size;
328 sess->auth.gen_digest_len = sess->auth.req_digest_len;
330 /* Plain SHA does not require precompute key */
331 if (auth_precompute == 0)
334 /* Calculate Authentication precomputes */
335 if (key_larger_block_size) {
336 calculate_auth_precomputes(hash_oneblock_fn,
337 sess->auth.pads.inner, sess->auth.pads.outer,
339 xform->auth.key.length,
340 get_auth_algo_blocksize(sess->auth.algo));
342 calculate_auth_precomputes(hash_oneblock_fn,
343 sess->auth.pads.inner, sess->auth.pads.outer,
344 xform->auth.key.data,
345 xform->auth.key.length,
346 get_auth_algo_blocksize(sess->auth.algo));
352 /** Set session cipher parameters */
354 aesni_mb_set_session_cipher_parameters(const IMB_MGR *mb_mgr,
355 struct aesni_mb_session *sess,
356 const struct rte_crypto_sym_xform *xform)
360 uint8_t is_docsis = 0;
362 uint8_t is_snow3g = 0;
363 uint8_t is_kasumi = 0;
366 sess->cipher.mode = IMB_CIPHER_NULL;
370 if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
371 IPSEC_MB_LOG(ERR, "Crypto xform struct not of type cipher");
375 /* Select cipher direction */
376 switch (xform->cipher.op) {
377 case RTE_CRYPTO_CIPHER_OP_ENCRYPT:
378 sess->cipher.direction = IMB_DIR_ENCRYPT;
380 case RTE_CRYPTO_CIPHER_OP_DECRYPT:
381 sess->cipher.direction = IMB_DIR_DECRYPT;
384 IPSEC_MB_LOG(ERR, "Invalid cipher operation parameter");
388 /* Select cipher mode */
389 switch (xform->cipher.algo) {
390 case RTE_CRYPTO_CIPHER_AES_CBC:
391 sess->cipher.mode = IMB_CIPHER_CBC;
394 case RTE_CRYPTO_CIPHER_AES_CTR:
395 sess->cipher.mode = IMB_CIPHER_CNTR;
398 case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
399 sess->cipher.mode = IMB_CIPHER_DOCSIS_SEC_BPI;
402 case RTE_CRYPTO_CIPHER_DES_CBC:
403 sess->cipher.mode = IMB_CIPHER_DES;
405 case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
406 sess->cipher.mode = IMB_CIPHER_DOCSIS_DES;
408 case RTE_CRYPTO_CIPHER_3DES_CBC:
409 sess->cipher.mode = IMB_CIPHER_DES3;
412 case RTE_CRYPTO_CIPHER_AES_ECB:
413 sess->cipher.mode = IMB_CIPHER_ECB;
416 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
417 sess->cipher.mode = IMB_CIPHER_ZUC_EEA3;
420 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
421 sess->cipher.mode = IMB_CIPHER_SNOW3G_UEA2_BITLEN;
424 case RTE_CRYPTO_CIPHER_KASUMI_F8:
425 sess->cipher.mode = IMB_CIPHER_KASUMI_UEA1_BITLEN;
429 IPSEC_MB_LOG(ERR, "Unsupported cipher mode parameter");
433 /* Set IV parameters */
434 sess->iv.offset = xform->cipher.iv.offset;
435 sess->iv.length = xform->cipher.iv.length;
437 /* Check key length and choose key expansion function for AES */
439 switch (xform->cipher.key.length) {
440 case IMB_KEY_128_BYTES:
441 sess->cipher.key_length_in_bytes = IMB_KEY_128_BYTES;
442 IMB_AES_KEYEXP_128(mb_mgr, xform->cipher.key.data,
443 sess->cipher.expanded_aes_keys.encode,
444 sess->cipher.expanded_aes_keys.decode);
446 case IMB_KEY_192_BYTES:
447 sess->cipher.key_length_in_bytes = IMB_KEY_192_BYTES;
448 IMB_AES_KEYEXP_192(mb_mgr, xform->cipher.key.data,
449 sess->cipher.expanded_aes_keys.encode,
450 sess->cipher.expanded_aes_keys.decode);
452 case IMB_KEY_256_BYTES:
453 sess->cipher.key_length_in_bytes = IMB_KEY_256_BYTES;
454 IMB_AES_KEYEXP_256(mb_mgr, xform->cipher.key.data,
455 sess->cipher.expanded_aes_keys.encode,
456 sess->cipher.expanded_aes_keys.decode);
459 IPSEC_MB_LOG(ERR, "Invalid cipher key length");
462 } else if (is_docsis) {
463 switch (xform->cipher.key.length) {
464 case IMB_KEY_128_BYTES:
465 sess->cipher.key_length_in_bytes = IMB_KEY_128_BYTES;
466 IMB_AES_KEYEXP_128(mb_mgr, xform->cipher.key.data,
467 sess->cipher.expanded_aes_keys.encode,
468 sess->cipher.expanded_aes_keys.decode);
470 case IMB_KEY_256_BYTES:
471 sess->cipher.key_length_in_bytes = IMB_KEY_256_BYTES;
472 IMB_AES_KEYEXP_256(mb_mgr, xform->cipher.key.data,
473 sess->cipher.expanded_aes_keys.encode,
474 sess->cipher.expanded_aes_keys.decode);
477 IPSEC_MB_LOG(ERR, "Invalid cipher key length");
480 } else if (is_3DES) {
481 uint64_t *keys[3] = {sess->cipher.exp_3des_keys.key[0],
482 sess->cipher.exp_3des_keys.key[1],
483 sess->cipher.exp_3des_keys.key[2]};
485 switch (xform->cipher.key.length) {
487 IMB_DES_KEYSCHED(mb_mgr, keys[0],
488 xform->cipher.key.data);
489 IMB_DES_KEYSCHED(mb_mgr, keys[1],
490 xform->cipher.key.data + 8);
491 IMB_DES_KEYSCHED(mb_mgr, keys[2],
492 xform->cipher.key.data + 16);
494 /* Initialize keys - 24 bytes: [K1-K2-K3] */
495 sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
496 sess->cipher.exp_3des_keys.ks_ptr[1] = keys[1];
497 sess->cipher.exp_3des_keys.ks_ptr[2] = keys[2];
500 IMB_DES_KEYSCHED(mb_mgr, keys[0],
501 xform->cipher.key.data);
502 IMB_DES_KEYSCHED(mb_mgr, keys[1],
503 xform->cipher.key.data + 8);
504 /* Initialize keys - 16 bytes: [K1=K1,K2=K2,K3=K1] */
505 sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
506 sess->cipher.exp_3des_keys.ks_ptr[1] = keys[1];
507 sess->cipher.exp_3des_keys.ks_ptr[2] = keys[0];
510 IMB_DES_KEYSCHED(mb_mgr, keys[0],
511 xform->cipher.key.data);
513 /* Initialize keys - 8 bytes: [K1 = K2 = K3] */
514 sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
515 sess->cipher.exp_3des_keys.ks_ptr[1] = keys[0];
516 sess->cipher.exp_3des_keys.ks_ptr[2] = keys[0];
519 IPSEC_MB_LOG(ERR, "Invalid cipher key length");
523 sess->cipher.key_length_in_bytes = 24;
525 if (xform->cipher.key.length != 16) {
526 IPSEC_MB_LOG(ERR, "Invalid cipher key length");
529 sess->cipher.key_length_in_bytes = 16;
530 memcpy(sess->cipher.zuc_cipher_key, xform->cipher.key.data,
532 } else if (is_snow3g) {
533 if (xform->cipher.key.length != 16) {
534 IPSEC_MB_LOG(ERR, "Invalid cipher key length");
537 sess->cipher.key_length_in_bytes = 16;
538 IMB_SNOW3G_INIT_KEY_SCHED(mb_mgr, xform->cipher.key.data,
539 &sess->cipher.pKeySched_snow3g_cipher);
540 } else if (is_kasumi) {
541 if (xform->cipher.key.length != 16) {
542 IPSEC_MB_LOG(ERR, "Invalid cipher key length");
545 sess->cipher.key_length_in_bytes = 16;
546 IMB_KASUMI_INIT_F8_KEY_SCHED(mb_mgr, xform->cipher.key.data,
547 &sess->cipher.pKeySched_kasumi_cipher);
549 if (xform->cipher.key.length != 8) {
550 IPSEC_MB_LOG(ERR, "Invalid cipher key length");
553 sess->cipher.key_length_in_bytes = 8;
555 IMB_DES_KEYSCHED(mb_mgr,
556 (uint64_t *)sess->cipher.expanded_aes_keys.encode,
557 xform->cipher.key.data);
558 IMB_DES_KEYSCHED(mb_mgr,
559 (uint64_t *)sess->cipher.expanded_aes_keys.decode,
560 xform->cipher.key.data);
567 aesni_mb_set_session_aead_parameters(const IMB_MGR *mb_mgr,
568 struct aesni_mb_session *sess,
569 const struct rte_crypto_sym_xform *xform)
571 switch (xform->aead.op) {
572 case RTE_CRYPTO_AEAD_OP_ENCRYPT:
573 sess->cipher.direction = IMB_DIR_ENCRYPT;
574 sess->auth.operation = RTE_CRYPTO_AUTH_OP_GENERATE;
576 case RTE_CRYPTO_AEAD_OP_DECRYPT:
577 sess->cipher.direction = IMB_DIR_DECRYPT;
578 sess->auth.operation = RTE_CRYPTO_AUTH_OP_VERIFY;
581 IPSEC_MB_LOG(ERR, "Invalid aead operation parameter");
585 /* Set IV parameters */
586 sess->iv.offset = xform->aead.iv.offset;
587 sess->iv.length = xform->aead.iv.length;
589 /* Set digest sizes */
590 sess->auth.req_digest_len = xform->aead.digest_length;
591 sess->auth.gen_digest_len = sess->auth.req_digest_len;
593 switch (xform->aead.algo) {
594 case RTE_CRYPTO_AEAD_AES_CCM:
595 sess->cipher.mode = IMB_CIPHER_CCM;
596 sess->auth.algo = IMB_AUTH_AES_CCM;
598 /* Check key length and choose key expansion function for AES */
599 switch (xform->aead.key.length) {
600 case IMB_KEY_128_BYTES:
601 sess->cipher.key_length_in_bytes = IMB_KEY_128_BYTES;
602 IMB_AES_KEYEXP_128(mb_mgr, xform->aead.key.data,
603 sess->cipher.expanded_aes_keys.encode,
604 sess->cipher.expanded_aes_keys.decode);
606 case IMB_KEY_256_BYTES:
607 sess->cipher.key_length_in_bytes = IMB_KEY_256_BYTES;
608 IMB_AES_KEYEXP_256(mb_mgr, xform->aead.key.data,
609 sess->cipher.expanded_aes_keys.encode,
610 sess->cipher.expanded_aes_keys.decode);
613 IPSEC_MB_LOG(ERR, "Invalid cipher key length");
617 /* CCM digests must be between 4 and 16 and an even number */
618 if (sess->auth.req_digest_len < AES_CCM_DIGEST_MIN_LEN ||
619 sess->auth.req_digest_len > AES_CCM_DIGEST_MAX_LEN ||
620 (sess->auth.req_digest_len & 1) == 1) {
621 IPSEC_MB_LOG(ERR, "Invalid digest size\n");
626 case RTE_CRYPTO_AEAD_AES_GCM:
627 sess->cipher.mode = IMB_CIPHER_GCM;
628 sess->auth.algo = IMB_AUTH_AES_GMAC;
630 switch (xform->aead.key.length) {
631 case IMB_KEY_128_BYTES:
632 sess->cipher.key_length_in_bytes = IMB_KEY_128_BYTES;
633 IMB_AES128_GCM_PRE(mb_mgr, xform->aead.key.data,
634 &sess->cipher.gcm_key);
636 case IMB_KEY_192_BYTES:
637 sess->cipher.key_length_in_bytes = IMB_KEY_192_BYTES;
638 IMB_AES192_GCM_PRE(mb_mgr, xform->aead.key.data,
639 &sess->cipher.gcm_key);
641 case IMB_KEY_256_BYTES:
642 sess->cipher.key_length_in_bytes = IMB_KEY_256_BYTES;
643 IMB_AES256_GCM_PRE(mb_mgr, xform->aead.key.data,
644 &sess->cipher.gcm_key);
647 IPSEC_MB_LOG(ERR, "Invalid cipher key length");
651 /* GCM digest size must be between 1 and 16 */
652 if (sess->auth.req_digest_len == 0 ||
653 sess->auth.req_digest_len > 16) {
654 IPSEC_MB_LOG(ERR, "Invalid digest size\n");
659 case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
660 sess->cipher.mode = IMB_CIPHER_CHACHA20_POLY1305;
661 sess->auth.algo = IMB_AUTH_CHACHA20_POLY1305;
663 if (xform->aead.key.length != 32) {
664 IPSEC_MB_LOG(ERR, "Invalid key length");
667 sess->cipher.key_length_in_bytes = 32;
668 memcpy(sess->cipher.expanded_aes_keys.encode,
669 xform->aead.key.data, 32);
670 if (sess->auth.req_digest_len != 16) {
671 IPSEC_MB_LOG(ERR, "Invalid digest size\n");
676 IPSEC_MB_LOG(ERR, "Unsupported aead mode parameter");
683 /** Configure a aesni multi-buffer session from a crypto xform chain */
685 aesni_mb_session_configure(IMB_MGR *mb_mgr,
687 const struct rte_crypto_sym_xform *xform)
689 const struct rte_crypto_sym_xform *auth_xform = NULL;
690 const struct rte_crypto_sym_xform *cipher_xform = NULL;
691 const struct rte_crypto_sym_xform *aead_xform = NULL;
692 enum ipsec_mb_operation mode;
693 struct aesni_mb_session *sess = (struct aesni_mb_session *) priv_sess;
696 ret = ipsec_mb_parse_xform(xform, &mode, &auth_xform,
697 &cipher_xform, &aead_xform);
701 /* Select Crypto operation - hash then cipher / cipher then hash */
703 case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT:
704 sess->chain_order = IMB_ORDER_HASH_CIPHER;
706 case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN:
707 case IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY:
708 sess->chain_order = IMB_ORDER_CIPHER_HASH;
710 case IPSEC_MB_OP_HASH_GEN_ONLY:
711 case IPSEC_MB_OP_HASH_VERIFY_ONLY:
712 case IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT:
713 sess->chain_order = IMB_ORDER_HASH_CIPHER;
716 * Multi buffer library operates only at two modes,
717 * IMB_ORDER_CIPHER_HASH and IMB_ORDER_HASH_CIPHER.
718 * When doing ciphering only, chain order depends
719 * on cipher operation: encryption is always
720 * the first operation and decryption the last one.
722 case IPSEC_MB_OP_ENCRYPT_ONLY:
723 sess->chain_order = IMB_ORDER_CIPHER_HASH;
725 case IPSEC_MB_OP_DECRYPT_ONLY:
726 sess->chain_order = IMB_ORDER_HASH_CIPHER;
728 case IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT:
729 sess->chain_order = IMB_ORDER_CIPHER_HASH;
730 sess->aead.aad_len = xform->aead.aad_length;
732 case IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT:
733 sess->chain_order = IMB_ORDER_HASH_CIPHER;
734 sess->aead.aad_len = xform->aead.aad_length;
736 case IPSEC_MB_OP_NOT_SUPPORTED:
739 "Unsupported operation chain order parameter");
743 /* Default IV length = 0 */
745 sess->auth_iv.length = 0;
747 ret = aesni_mb_set_session_auth_parameters(mb_mgr, sess, auth_xform);
750 "Invalid/unsupported authentication parameters");
754 ret = aesni_mb_set_session_cipher_parameters(mb_mgr, sess,
757 IPSEC_MB_LOG(ERR, "Invalid/unsupported cipher parameters");
762 ret = aesni_mb_set_session_aead_parameters(mb_mgr, sess,
766 "Invalid/unsupported aead parameters");
774 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
775 /** Check DOCSIS security session configuration is valid */
777 check_docsis_sec_session(struct rte_security_session_conf *conf)
779 struct rte_crypto_sym_xform *crypto_sym = conf->crypto_xform;
780 struct rte_security_docsis_xform *docsis = &conf->docsis;
782 /* Downlink: CRC generate -> Cipher encrypt */
783 if (docsis->direction == RTE_SECURITY_DOCSIS_DOWNLINK) {
785 if (crypto_sym != NULL &&
786 crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
787 crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
788 crypto_sym->cipher.algo ==
789 RTE_CRYPTO_CIPHER_AES_DOCSISBPI &&
790 (crypto_sym->cipher.key.length == IMB_KEY_128_BYTES ||
791 crypto_sym->cipher.key.length == IMB_KEY_256_BYTES) &&
792 crypto_sym->cipher.iv.length == IMB_AES_BLOCK_SIZE &&
793 crypto_sym->next == NULL) {
796 /* Uplink: Cipher decrypt -> CRC verify */
797 } else if (docsis->direction == RTE_SECURITY_DOCSIS_UPLINK) {
799 if (crypto_sym != NULL &&
800 crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
801 crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
802 crypto_sym->cipher.algo ==
803 RTE_CRYPTO_CIPHER_AES_DOCSISBPI &&
804 (crypto_sym->cipher.key.length == IMB_KEY_128_BYTES ||
805 crypto_sym->cipher.key.length == IMB_KEY_256_BYTES) &&
806 crypto_sym->cipher.iv.length == IMB_AES_BLOCK_SIZE &&
807 crypto_sym->next == NULL) {
815 /** Set DOCSIS security session auth (CRC) parameters */
817 aesni_mb_set_docsis_sec_session_auth_parameters(struct aesni_mb_session *sess,
818 struct rte_security_docsis_xform *xform)
821 IPSEC_MB_LOG(ERR, "Invalid DOCSIS xform");
825 /* Select CRC generate/verify */
826 if (xform->direction == RTE_SECURITY_DOCSIS_UPLINK) {
827 sess->auth.algo = IMB_AUTH_DOCSIS_CRC32;
828 sess->auth.operation = RTE_CRYPTO_AUTH_OP_VERIFY;
829 } else if (xform->direction == RTE_SECURITY_DOCSIS_DOWNLINK) {
830 sess->auth.algo = IMB_AUTH_DOCSIS_CRC32;
831 sess->auth.operation = RTE_CRYPTO_AUTH_OP_GENERATE;
833 IPSEC_MB_LOG(ERR, "Unsupported DOCSIS direction");
837 sess->auth.req_digest_len = RTE_ETHER_CRC_LEN;
838 sess->auth.gen_digest_len = RTE_ETHER_CRC_LEN;
844 * Parse DOCSIS security session configuration and set private session
848 aesni_mb_set_docsis_sec_session_parameters(
849 __rte_unused struct rte_cryptodev *dev,
850 struct rte_security_session_conf *conf,
853 IMB_MGR *mb_mgr = alloc_init_mb_mgr();
854 struct rte_security_docsis_xform *docsis_xform;
855 struct rte_crypto_sym_xform *cipher_xform;
856 struct aesni_mb_session *ipsec_sess = sess;
862 ret = check_docsis_sec_session(conf);
864 IPSEC_MB_LOG(ERR, "Unsupported DOCSIS security configuration");
868 switch (conf->docsis.direction) {
869 case RTE_SECURITY_DOCSIS_UPLINK:
870 ipsec_sess->chain_order = IMB_ORDER_CIPHER_HASH;
871 docsis_xform = &conf->docsis;
872 cipher_xform = conf->crypto_xform;
874 case RTE_SECURITY_DOCSIS_DOWNLINK:
875 ipsec_sess->chain_order = IMB_ORDER_HASH_CIPHER;
876 cipher_xform = conf->crypto_xform;
877 docsis_xform = &conf->docsis;
880 IPSEC_MB_LOG(ERR, "Unsupported DOCSIS security configuration");
885 /* Default IV length = 0 */
886 ipsec_sess->iv.length = 0;
888 ret = aesni_mb_set_docsis_sec_session_auth_parameters(ipsec_sess,
891 IPSEC_MB_LOG(ERR, "Invalid/unsupported DOCSIS parameters");
895 ret = aesni_mb_set_session_cipher_parameters(mb_mgr,
896 ipsec_sess, cipher_xform);
899 IPSEC_MB_LOG(ERR, "Invalid/unsupported cipher parameters");
909 static inline uint64_t
910 auth_start_offset(struct rte_crypto_op *op, struct aesni_mb_session *session,
913 struct rte_mbuf *m_src, *m_dst;
914 uint8_t *p_src, *p_dst;
915 uintptr_t u_src, u_dst;
916 uint32_t cipher_end, auth_end;
918 /* Only cipher then hash needs special calculation. */
919 if (!oop || session->chain_order != IMB_ORDER_CIPHER_HASH)
920 return op->sym->auth.data.offset;
922 m_src = op->sym->m_src;
923 m_dst = op->sym->m_dst;
925 p_src = rte_pktmbuf_mtod(m_src, uint8_t *);
926 p_dst = rte_pktmbuf_mtod(m_dst, uint8_t *);
927 u_src = (uintptr_t)p_src;
928 u_dst = (uintptr_t)p_dst + op->sym->auth.data.offset;
931 * Copy the content between cipher offset and auth offset for generating
934 if (op->sym->cipher.data.offset > op->sym->auth.data.offset)
935 memcpy(p_dst + op->sym->auth.data.offset,
936 p_src + op->sym->auth.data.offset,
937 op->sym->cipher.data.offset -
938 op->sym->auth.data.offset);
941 * Copy the content between (cipher offset + length) and (auth offset +
942 * length) for generating correct digest
944 cipher_end = op->sym->cipher.data.offset + op->sym->cipher.data.length;
945 auth_end = op->sym->auth.data.offset + op->sym->auth.data.length;
946 if (cipher_end < auth_end)
947 memcpy(p_dst + cipher_end, p_src + cipher_end,
948 auth_end - cipher_end);
951 * Since intel-ipsec-mb only supports positive values,
952 * we need to deduct the correct offset between src and dst.
955 return u_src < u_dst ? (u_dst - u_src) :
956 (UINT64_MAX - u_src + u_dst + 1);
960 set_cpu_mb_job_params(IMB_JOB *job, struct aesni_mb_session *session,
961 union rte_crypto_sym_ofs sofs, void *buf, uint32_t len,
962 struct rte_crypto_va_iova_ptr *iv,
963 struct rte_crypto_va_iova_ptr *aad, void *digest, void *udata)
965 /* Set crypto operation */
966 job->chain_order = session->chain_order;
968 /* Set cipher parameters */
969 job->cipher_direction = session->cipher.direction;
970 job->cipher_mode = session->cipher.mode;
972 job->key_len_in_bytes = session->cipher.key_length_in_bytes;
974 /* Set authentication parameters */
975 job->hash_alg = session->auth.algo;
978 switch (job->hash_alg) {
979 case IMB_AUTH_AES_XCBC:
980 job->u.XCBC._k1_expanded = session->auth.xcbc.k1_expanded;
981 job->u.XCBC._k2 = session->auth.xcbc.k2;
982 job->u.XCBC._k3 = session->auth.xcbc.k3;
984 job->enc_keys = session->cipher.expanded_aes_keys.encode;
985 job->dec_keys = session->cipher.expanded_aes_keys.decode;
988 case IMB_AUTH_AES_CCM:
989 job->u.CCM.aad = (uint8_t *)aad->va + 18;
990 job->u.CCM.aad_len_in_bytes = session->aead.aad_len;
991 job->enc_keys = session->cipher.expanded_aes_keys.encode;
992 job->dec_keys = session->cipher.expanded_aes_keys.decode;
996 case IMB_AUTH_AES_CMAC:
997 job->u.CMAC._key_expanded = session->auth.cmac.expkey;
998 job->u.CMAC._skey1 = session->auth.cmac.skey1;
999 job->u.CMAC._skey2 = session->auth.cmac.skey2;
1000 job->enc_keys = session->cipher.expanded_aes_keys.encode;
1001 job->dec_keys = session->cipher.expanded_aes_keys.decode;
1004 case IMB_AUTH_AES_GMAC:
1005 if (session->cipher.mode == IMB_CIPHER_GCM) {
1006 job->u.GCM.aad = aad->va;
1007 job->u.GCM.aad_len_in_bytes = session->aead.aad_len;
1010 job->u.GCM.aad = buf;
1011 job->u.GCM.aad_len_in_bytes = len;
1012 job->cipher_mode = IMB_CIPHER_GCM;
1014 job->enc_keys = &session->cipher.gcm_key;
1015 job->dec_keys = &session->cipher.gcm_key;
1018 case IMB_AUTH_CHACHA20_POLY1305:
1019 job->u.CHACHA20_POLY1305.aad = aad->va;
1020 job->u.CHACHA20_POLY1305.aad_len_in_bytes =
1021 session->aead.aad_len;
1022 job->enc_keys = session->cipher.expanded_aes_keys.encode;
1023 job->dec_keys = session->cipher.expanded_aes_keys.encode;
1026 job->u.HMAC._hashed_auth_key_xor_ipad =
1027 session->auth.pads.inner;
1028 job->u.HMAC._hashed_auth_key_xor_opad =
1029 session->auth.pads.outer;
1031 if (job->cipher_mode == IMB_CIPHER_DES3) {
1032 job->enc_keys = session->cipher.exp_3des_keys.ks_ptr;
1033 job->dec_keys = session->cipher.exp_3des_keys.ks_ptr;
1035 job->enc_keys = session->cipher.expanded_aes_keys.encode;
1036 job->dec_keys = session->cipher.expanded_aes_keys.decode;
1041 * Multi-buffer library current only support returning a truncated
1042 * digest length as specified in the relevant IPsec RFCs
1045 /* Set digest location and length */
1046 job->auth_tag_output = digest;
1047 job->auth_tag_output_len_in_bytes = session->auth.gen_digest_len;
1049 /* Set IV parameters */
1050 job->iv_len_in_bytes = session->iv.length;
1052 /* Data Parameters */
1054 job->dst = (uint8_t *)buf + sofs.ofs.cipher.head;
1055 job->cipher_start_src_offset_in_bytes = sofs.ofs.cipher.head;
1056 job->hash_start_src_offset_in_bytes = sofs.ofs.auth.head;
1057 if (job->hash_alg == IMB_AUTH_AES_GMAC &&
1058 session->cipher.mode != IMB_CIPHER_GCM) {
1059 job->msg_len_to_hash_in_bytes = 0;
1060 job->msg_len_to_cipher_in_bytes = 0;
1062 job->msg_len_to_hash_in_bytes = len - sofs.ofs.auth.head -
1064 job->msg_len_to_cipher_in_bytes = len - sofs.ofs.cipher.head -
1065 sofs.ofs.cipher.tail;
1068 job->user_data = udata;
1072 * Process a crypto operation and complete a IMB_JOB job structure for
1073 * submission to the multi buffer library for processing.
1075 * @param qp queue pair
1076 * @param job IMB_JOB structure to fill
1077 * @param op crypto op to process
1078 * @param digest_idx ID for digest to use
1081 * - 0 on success, the IMB_JOB will be filled
1082 * - -1 if invalid session, IMB_JOB will not be filled
1085 set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp,
1086 struct rte_crypto_op *op, uint8_t *digest_idx)
1088 struct rte_mbuf *m_src = op->sym->m_src, *m_dst;
1089 struct aesni_mb_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
1090 struct aesni_mb_session *session;
1091 uint32_t m_offset, oop;
1093 session = ipsec_mb_get_session_private(qp, op);
1094 if (session == NULL) {
1095 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
1099 /* Set crypto operation */
1100 job->chain_order = session->chain_order;
1102 /* Set cipher parameters */
1103 job->cipher_direction = session->cipher.direction;
1104 job->cipher_mode = session->cipher.mode;
1106 job->key_len_in_bytes = session->cipher.key_length_in_bytes;
1108 /* Set authentication parameters */
1109 job->hash_alg = session->auth.algo;
1111 const int aead = is_aead_algo(job->hash_alg, job->cipher_mode);
1113 switch (job->hash_alg) {
1114 case IMB_AUTH_AES_XCBC:
1115 job->u.XCBC._k1_expanded = session->auth.xcbc.k1_expanded;
1116 job->u.XCBC._k2 = session->auth.xcbc.k2;
1117 job->u.XCBC._k3 = session->auth.xcbc.k3;
1119 job->enc_keys = session->cipher.expanded_aes_keys.encode;
1120 job->dec_keys = session->cipher.expanded_aes_keys.decode;
1123 case IMB_AUTH_AES_CCM:
1124 job->u.CCM.aad = op->sym->aead.aad.data + 18;
1125 job->u.CCM.aad_len_in_bytes = session->aead.aad_len;
1126 job->enc_keys = session->cipher.expanded_aes_keys.encode;
1127 job->dec_keys = session->cipher.expanded_aes_keys.decode;
1130 case IMB_AUTH_AES_CMAC:
1131 job->u.CMAC._key_expanded = session->auth.cmac.expkey;
1132 job->u.CMAC._skey1 = session->auth.cmac.skey1;
1133 job->u.CMAC._skey2 = session->auth.cmac.skey2;
1134 job->enc_keys = session->cipher.expanded_aes_keys.encode;
1135 job->dec_keys = session->cipher.expanded_aes_keys.decode;
1138 case IMB_AUTH_AES_GMAC:
1139 if (session->cipher.mode == IMB_CIPHER_GCM) {
1140 job->u.GCM.aad = op->sym->aead.aad.data;
1141 job->u.GCM.aad_len_in_bytes = session->aead.aad_len;
1144 job->u.GCM.aad = rte_pktmbuf_mtod_offset(m_src,
1145 uint8_t *, op->sym->auth.data.offset);
1146 job->u.GCM.aad_len_in_bytes = op->sym->auth.data.length;
1147 job->cipher_mode = IMB_CIPHER_GCM;
1149 job->enc_keys = &session->cipher.gcm_key;
1150 job->dec_keys = &session->cipher.gcm_key;
1152 case IMB_AUTH_ZUC_EIA3_BITLEN:
1153 job->u.ZUC_EIA3._key = session->auth.zuc_auth_key;
1154 job->u.ZUC_EIA3._iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1155 session->auth_iv.offset);
1157 case IMB_AUTH_SNOW3G_UIA2_BITLEN:
1158 job->u.SNOW3G_UIA2._key = (void *)
1159 &session->auth.pKeySched_snow3g_auth;
1160 job->u.SNOW3G_UIA2._iv =
1161 rte_crypto_op_ctod_offset(op, uint8_t *,
1162 session->auth_iv.offset);
1164 case IMB_AUTH_KASUMI_UIA1:
1165 job->u.KASUMI_UIA1._key = (void *)
1166 &session->auth.pKeySched_kasumi_auth;
1168 case IMB_AUTH_CHACHA20_POLY1305:
1169 job->u.CHACHA20_POLY1305.aad = op->sym->aead.aad.data;
1170 job->u.CHACHA20_POLY1305.aad_len_in_bytes =
1171 session->aead.aad_len;
1172 job->enc_keys = session->cipher.expanded_aes_keys.encode;
1173 job->dec_keys = session->cipher.expanded_aes_keys.encode;
1176 job->u.HMAC._hashed_auth_key_xor_ipad =
1177 session->auth.pads.inner;
1178 job->u.HMAC._hashed_auth_key_xor_opad =
1179 session->auth.pads.outer;
1181 if (job->cipher_mode == IMB_CIPHER_DES3) {
1182 job->enc_keys = session->cipher.exp_3des_keys.ks_ptr;
1183 job->dec_keys = session->cipher.exp_3des_keys.ks_ptr;
1185 job->enc_keys = session->cipher.expanded_aes_keys.encode;
1186 job->dec_keys = session->cipher.expanded_aes_keys.decode;
1191 m_offset = op->sym->aead.data.offset;
1193 m_offset = op->sym->cipher.data.offset;
1195 if (job->cipher_mode == IMB_CIPHER_ZUC_EEA3) {
1196 job->enc_keys = session->cipher.zuc_cipher_key;
1197 job->dec_keys = session->cipher.zuc_cipher_key;
1198 } else if (job->cipher_mode == IMB_CIPHER_SNOW3G_UEA2_BITLEN) {
1199 job->enc_keys = &session->cipher.pKeySched_snow3g_cipher;
1201 } else if (job->cipher_mode == IMB_CIPHER_KASUMI_UEA1_BITLEN) {
1202 job->enc_keys = &session->cipher.pKeySched_kasumi_cipher;
1206 if (!op->sym->m_dst) {
1207 /* in-place operation */
1210 } else if (op->sym->m_dst == op->sym->m_src) {
1211 /* in-place operation */
1215 /* out-of-place operation */
1216 m_dst = op->sym->m_dst;
1220 /* Set digest output location */
1221 if (job->hash_alg != IMB_AUTH_NULL &&
1222 session->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
1223 job->auth_tag_output = qp_data->temp_digests[*digest_idx];
1224 *digest_idx = (*digest_idx + 1) % IMB_MAX_JOBS;
1227 job->auth_tag_output = op->sym->aead.digest.data;
1229 job->auth_tag_output = op->sym->auth.digest.data;
1231 if (session->auth.req_digest_len !=
1232 session->auth.gen_digest_len) {
1233 job->auth_tag_output =
1234 qp_data->temp_digests[*digest_idx];
1235 *digest_idx = (*digest_idx + 1) % IMB_MAX_JOBS;
1239 * Multi-buffer library current only support returning a truncated
1240 * digest length as specified in the relevant IPsec RFCs
1243 /* Set digest length */
1244 job->auth_tag_output_len_in_bytes = session->auth.gen_digest_len;
1246 /* Set IV parameters */
1247 job->iv_len_in_bytes = session->iv.length;
1249 /* Data Parameters */
1250 job->src = rte_pktmbuf_mtod(m_src, uint8_t *);
1251 job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, m_offset);
1253 switch (job->hash_alg) {
1254 case IMB_AUTH_AES_CCM:
1255 job->cipher_start_src_offset_in_bytes =
1256 op->sym->aead.data.offset;
1257 job->msg_len_to_cipher_in_bytes = op->sym->aead.data.length;
1258 job->hash_start_src_offset_in_bytes = op->sym->aead.data.offset;
1259 job->msg_len_to_hash_in_bytes = op->sym->aead.data.length;
1261 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1262 session->iv.offset + 1);
1265 case IMB_AUTH_AES_GMAC:
1266 if (session->cipher.mode == IMB_CIPHER_GCM) {
1267 job->cipher_start_src_offset_in_bytes =
1268 op->sym->aead.data.offset;
1269 job->hash_start_src_offset_in_bytes =
1270 op->sym->aead.data.offset;
1271 job->msg_len_to_cipher_in_bytes =
1272 op->sym->aead.data.length;
1273 job->msg_len_to_hash_in_bytes =
1274 op->sym->aead.data.length;
1276 job->cipher_start_src_offset_in_bytes =
1277 op->sym->auth.data.offset;
1278 job->hash_start_src_offset_in_bytes =
1279 op->sym->auth.data.offset;
1280 job->msg_len_to_cipher_in_bytes = 0;
1281 job->msg_len_to_hash_in_bytes = 0;
1284 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1285 session->iv.offset);
1288 case IMB_AUTH_CHACHA20_POLY1305:
1289 job->cipher_start_src_offset_in_bytes =
1290 op->sym->aead.data.offset;
1291 job->hash_start_src_offset_in_bytes =
1292 op->sym->aead.data.offset;
1293 job->msg_len_to_cipher_in_bytes =
1294 op->sym->aead.data.length;
1295 job->msg_len_to_hash_in_bytes =
1296 op->sym->aead.data.length;
1298 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1299 session->iv.offset);
1302 /* For SNOW3G, length and offsets are already in bits */
1303 job->cipher_start_src_offset_in_bytes =
1304 op->sym->cipher.data.offset;
1305 job->msg_len_to_cipher_in_bytes = op->sym->cipher.data.length;
1307 job->hash_start_src_offset_in_bytes = auth_start_offset(op,
1309 job->msg_len_to_hash_in_bytes = op->sym->auth.data.length;
1311 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1312 session->iv.offset);
1315 if (job->cipher_mode == IMB_CIPHER_ZUC_EEA3)
1316 job->msg_len_to_cipher_in_bytes >>= 3;
1317 else if (job->hash_alg == IMB_AUTH_KASUMI_UIA1)
1318 job->msg_len_to_hash_in_bytes >>= 3;
1320 /* Set user data to be crypto operation data struct */
1321 job->user_data = op;
1326 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
1328 * Process a crypto operation containing a security op and complete a
1329 * IMB_JOB job structure for submission to the multi buffer library for
1333 set_sec_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp,
1334 struct rte_crypto_op *op, uint8_t *digest_idx)
1336 struct aesni_mb_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
1337 struct rte_mbuf *m_src, *m_dst;
1338 struct rte_crypto_sym_op *sym;
1339 struct aesni_mb_session *session = NULL;
1341 if (unlikely(op->sess_type != RTE_CRYPTO_OP_SECURITY_SESSION)) {
1342 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
1345 session = (struct aesni_mb_session *)
1346 get_sec_session_private_data(op->sym->sec_session);
1348 if (unlikely(session == NULL)) {
1349 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
1352 /* Only DOCSIS protocol operations supported now */
1353 if (session->cipher.mode != IMB_CIPHER_DOCSIS_SEC_BPI ||
1354 session->auth.algo != IMB_AUTH_DOCSIS_CRC32) {
1355 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
1362 if (likely(sym->m_dst == NULL || sym->m_dst == m_src)) {
1363 /* in-place operation */
1366 /* out-of-place operation not supported */
1367 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
1371 /* Set crypto operation */
1372 job->chain_order = session->chain_order;
1374 /* Set cipher parameters */
1375 job->cipher_direction = session->cipher.direction;
1376 job->cipher_mode = session->cipher.mode;
1378 job->key_len_in_bytes = session->cipher.key_length_in_bytes;
1379 job->enc_keys = session->cipher.expanded_aes_keys.encode;
1380 job->dec_keys = session->cipher.expanded_aes_keys.decode;
1382 /* Set IV parameters */
1383 job->iv_len_in_bytes = session->iv.length;
1384 job->iv = (uint8_t *)op + session->iv.offset;
1386 /* Set authentication parameters */
1387 job->hash_alg = session->auth.algo;
1389 /* Set digest output location */
1390 job->auth_tag_output = qp_data->temp_digests[*digest_idx];
1391 *digest_idx = (*digest_idx + 1) % IMB_MAX_JOBS;
1393 /* Set digest length */
1394 job->auth_tag_output_len_in_bytes = session->auth.gen_digest_len;
1396 /* Set data parameters */
1397 job->src = rte_pktmbuf_mtod(m_src, uint8_t *);
1398 job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *,
1399 sym->cipher.data.offset);
1401 job->cipher_start_src_offset_in_bytes = sym->cipher.data.offset;
1402 job->msg_len_to_cipher_in_bytes = sym->cipher.data.length;
1404 job->hash_start_src_offset_in_bytes = sym->auth.data.offset;
1405 job->msg_len_to_hash_in_bytes = sym->auth.data.length;
1407 job->user_data = op;
1413 verify_docsis_sec_crc(IMB_JOB *job, uint8_t *status)
1415 uint16_t crc_offset;
1418 if (!job->msg_len_to_hash_in_bytes)
1421 crc_offset = job->hash_start_src_offset_in_bytes +
1422 job->msg_len_to_hash_in_bytes -
1423 job->cipher_start_src_offset_in_bytes;
1424 crc = job->dst + crc_offset;
1426 /* Verify CRC (at the end of the message) */
1427 if (memcmp(job->auth_tag_output, crc, RTE_ETHER_CRC_LEN) != 0)
1428 *status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
1433 verify_digest(IMB_JOB *job, void *digest, uint16_t len, uint8_t *status)
1435 /* Verify digest if required */
1436 if (memcmp(job->auth_tag_output, digest, len) != 0)
1437 *status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
1441 generate_digest(IMB_JOB *job, struct rte_crypto_op *op,
1442 struct aesni_mb_session *sess)
1444 /* No extra copy needed */
1445 if (likely(sess->auth.req_digest_len == sess->auth.gen_digest_len))
1449 * This can only happen for HMAC, so only digest
1450 * for authentication algos is required
1452 memcpy(op->sym->auth.digest.data, job->auth_tag_output,
1453 sess->auth.req_digest_len);
1457 * Process a completed job and return rte_mbuf which job processed
1459 * @param qp Queue Pair to process
1460 * @param job IMB_JOB job to process
1463 * - Returns processed crypto operation.
1464 * - Returns NULL on invalid job
1466 static inline struct rte_crypto_op *
1467 post_process_mb_job(struct ipsec_mb_qp *qp, IMB_JOB *job)
1469 struct rte_crypto_op *op = (struct rte_crypto_op *)job->user_data;
1470 struct aesni_mb_session *sess = NULL;
1471 uint32_t driver_id = ipsec_mb_get_driver_id(
1472 IPSEC_MB_PMD_TYPE_AESNI_MB);
1474 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
1475 uint8_t is_docsis_sec = 0;
1477 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
1479 * Assuming at this point that if it's a security type op, that
1480 * this is for DOCSIS
1483 sess = get_sec_session_private_data(op->sym->sec_session);
1487 sess = get_sym_session_private_data(op->sym->session,
1491 if (unlikely(sess == NULL)) {
1492 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
1496 if (likely(op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)) {
1497 switch (job->status) {
1498 case IMB_STATUS_COMPLETED:
1499 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1501 if (job->hash_alg == IMB_AUTH_NULL)
1504 if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
1505 if (is_aead_algo(job->hash_alg,
1508 op->sym->aead.digest.data,
1509 sess->auth.req_digest_len,
1511 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
1512 else if (is_docsis_sec)
1513 verify_docsis_sec_crc(job,
1518 op->sym->auth.digest.data,
1519 sess->auth.req_digest_len,
1522 generate_digest(job, op, sess);
1525 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
1529 /* Free session if a session-less crypto op */
1530 if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
1531 memset(sess, 0, sizeof(struct aesni_mb_session));
1532 memset(op->sym->session, 0,
1533 rte_cryptodev_sym_get_existing_header_session_size(
1535 rte_mempool_put(qp->sess_mp_priv, sess);
1536 rte_mempool_put(qp->sess_mp, op->sym->session);
1537 op->sym->session = NULL;
1544 post_process_mb_sync_job(IMB_JOB *job)
1548 st = job->user_data;
1549 st[0] = (job->status == IMB_STATUS_COMPLETED) ? 0 : EBADMSG;
1553 * Process a completed IMB_JOB job and keep processing jobs until
1554 * get_completed_job return NULL
1556 * @param qp Queue Pair to process
1557 * @param mb_mgr IMB_MGR to use
1558 * @param job IMB_JOB job
1559 * @param ops crypto ops to fill
1560 * @param nb_ops number of crypto ops
1563 * - Number of processed jobs
1566 handle_completed_jobs(struct ipsec_mb_qp *qp, IMB_MGR *mb_mgr,
1567 IMB_JOB *job, struct rte_crypto_op **ops,
1570 struct rte_crypto_op *op = NULL;
1571 uint16_t processed_jobs = 0;
1573 while (job != NULL) {
1574 op = post_process_mb_job(qp, job);
1577 ops[processed_jobs++] = op;
1578 qp->stats.dequeued_count++;
1580 qp->stats.dequeue_err_count++;
1583 if (processed_jobs == nb_ops)
1586 job = IMB_GET_COMPLETED_JOB(mb_mgr);
1589 return processed_jobs;
1592 static inline uint32_t
1593 handle_completed_sync_jobs(IMB_JOB *job, IMB_MGR *mb_mgr)
1597 for (i = 0; job != NULL; i++, job = IMB_GET_COMPLETED_JOB(mb_mgr))
1598 post_process_mb_sync_job(job);
1603 static inline uint32_t
1604 flush_mb_sync_mgr(IMB_MGR *mb_mgr)
1608 job = IMB_FLUSH_JOB(mb_mgr);
1609 return handle_completed_sync_jobs(job, mb_mgr);
1612 static inline uint16_t
1613 flush_mb_mgr(struct ipsec_mb_qp *qp, IMB_MGR *mb_mgr,
1614 struct rte_crypto_op **ops, uint16_t nb_ops)
1616 int processed_ops = 0;
1618 /* Flush the remaining jobs */
1619 IMB_JOB *job = IMB_FLUSH_JOB(mb_mgr);
1622 processed_ops += handle_completed_jobs(qp, mb_mgr, job,
1623 &ops[processed_ops], nb_ops - processed_ops);
1625 return processed_ops;
1628 static inline IMB_JOB *
1629 set_job_null_op(IMB_JOB *job, struct rte_crypto_op *op)
1631 job->chain_order = IMB_ORDER_HASH_CIPHER;
1632 job->cipher_mode = IMB_CIPHER_NULL;
1633 job->hash_alg = IMB_AUTH_NULL;
1634 job->cipher_direction = IMB_DIR_DECRYPT;
1636 /* Set user data to be crypto operation data struct */
1637 job->user_data = op;
1643 aesni_mb_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
1646 struct ipsec_mb_qp *qp = queue_pair;
1647 IMB_MGR *mb_mgr = qp->mb_mgr;
1648 struct rte_crypto_op *op;
1650 int retval, processed_jobs = 0;
1652 if (unlikely(nb_ops == 0 || mb_mgr == NULL))
1655 uint8_t digest_idx = qp->digest_idx;
1658 /* Get next free mb job struct from mb manager */
1659 job = IMB_GET_NEXT_JOB(mb_mgr);
1660 if (unlikely(job == NULL)) {
1661 /* if no free mb job structs we need to flush mb_mgr */
1662 processed_jobs += flush_mb_mgr(qp, mb_mgr,
1663 &ops[processed_jobs],
1664 nb_ops - processed_jobs);
1666 if (nb_ops == processed_jobs)
1669 job = IMB_GET_NEXT_JOB(mb_mgr);
1673 * Get next operation to process from ingress queue.
1674 * There is no need to return the job to the IMB_MGR
1675 * if there are no more operations to process, since the IMB_MGR
1676 * can use that pointer again in next get_next calls.
1678 retval = rte_ring_dequeue(qp->ingress_queue, (void **)&op);
1682 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
1683 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
1684 retval = set_sec_mb_job_params(job, qp, op,
1688 retval = set_mb_job_params(job, qp, op,
1691 if (unlikely(retval != 0)) {
1692 qp->stats.dequeue_err_count++;
1693 set_job_null_op(job, op);
1696 /* Submit job to multi-buffer for processing */
1697 #ifdef RTE_LIBRTE_PMD_AESNI_MB_DEBUG
1698 job = IMB_SUBMIT_JOB(mb_mgr);
1700 job = IMB_SUBMIT_JOB_NOCHECK(mb_mgr);
1703 * If submit returns a processed job then handle it,
1704 * before submitting subsequent jobs
1707 processed_jobs += handle_completed_jobs(qp, mb_mgr,
1708 job, &ops[processed_jobs],
1709 nb_ops - processed_jobs);
1711 } while (processed_jobs < nb_ops);
1713 qp->digest_idx = digest_idx;
1715 if (processed_jobs < 1)
1716 processed_jobs += flush_mb_mgr(qp, mb_mgr,
1717 &ops[processed_jobs],
1718 nb_ops - processed_jobs);
1720 return processed_jobs;
1725 ipsec_mb_fill_error_code(struct rte_crypto_sym_vec *vec, int32_t err)
1729 for (i = 0; i != vec->num; ++i)
1730 vec->status[i] = err;
1734 check_crypto_sgl(union rte_crypto_sym_ofs so, const struct rte_crypto_sgl *sgl)
1736 /* no multi-seg support with current AESNI-MB PMD */
1739 else if (so.ofs.cipher.head + so.ofs.cipher.tail > sgl->vec[0].len)
1744 static inline IMB_JOB *
1745 submit_sync_job(IMB_MGR *mb_mgr)
1747 #ifdef RTE_LIBRTE_PMD_AESNI_MB_DEBUG
1748 return IMB_SUBMIT_JOB(mb_mgr);
1750 return IMB_SUBMIT_JOB_NOCHECK(mb_mgr);
1754 static inline uint32_t
1755 generate_sync_dgst(struct rte_crypto_sym_vec *vec,
1756 const uint8_t dgst[][DIGEST_LENGTH_MAX], uint32_t len)
1760 for (i = 0, k = 0; i != vec->num; i++) {
1761 if (vec->status[i] == 0) {
1762 memcpy(vec->digest[i].va, dgst[i], len);
1770 static inline uint32_t
1771 verify_sync_dgst(struct rte_crypto_sym_vec *vec,
1772 const uint8_t dgst[][DIGEST_LENGTH_MAX], uint32_t len)
1776 for (i = 0, k = 0; i != vec->num; i++) {
1777 if (vec->status[i] == 0) {
1778 if (memcmp(vec->digest[i].va, dgst[i], len) != 0)
1779 vec->status[i] = EBADMSG;
1789 aesni_mb_process_bulk(struct rte_cryptodev *dev,
1790 struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs sofs,
1791 struct rte_crypto_sym_vec *vec)
1794 uint32_t i, j, k, len;
1798 struct aesni_mb_session *s;
1799 uint8_t tmp_dgst[vec->num][DIGEST_LENGTH_MAX];
1801 s = get_sym_session_private_data(sess, dev->driver_id);
1803 ipsec_mb_fill_error_code(vec, EINVAL);
1807 /* get per-thread MB MGR, create one if needed */
1808 mb_mgr = get_per_thread_mb_mgr();
1809 if (unlikely(mb_mgr == NULL))
1812 for (i = 0, j = 0, k = 0; i != vec->num; i++) {
1813 ret = check_crypto_sgl(sofs, vec->src_sgl + i);
1815 vec->status[i] = ret;
1819 buf = vec->src_sgl[i].vec[0].base;
1820 len = vec->src_sgl[i].vec[0].len;
1822 job = IMB_GET_NEXT_JOB(mb_mgr);
1824 k += flush_mb_sync_mgr(mb_mgr);
1825 job = IMB_GET_NEXT_JOB(mb_mgr);
1826 RTE_ASSERT(job != NULL);
1829 /* Submit job for processing */
1830 set_cpu_mb_job_params(job, s, sofs, buf, len, &vec->iv[i],
1831 &vec->aad[i], tmp_dgst[i], &vec->status[i]);
1832 job = submit_sync_job(mb_mgr);
1835 /* handle completed jobs */
1836 k += handle_completed_sync_jobs(job, mb_mgr);
1839 /* flush remaining jobs */
1841 k += flush_mb_sync_mgr(mb_mgr);
1843 /* finish processing for successful jobs: check/update digest */
1845 if (s->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY)
1846 k = verify_sync_dgst(vec,
1847 (const uint8_t (*)[DIGEST_LENGTH_MAX])tmp_dgst,
1848 s->auth.req_digest_len);
1850 k = generate_sync_dgst(vec,
1851 (const uint8_t (*)[DIGEST_LENGTH_MAX])tmp_dgst,
1852 s->auth.req_digest_len);
1858 struct rte_cryptodev_ops aesni_mb_pmd_ops = {
1859 .dev_configure = ipsec_mb_config,
1860 .dev_start = ipsec_mb_start,
1861 .dev_stop = ipsec_mb_stop,
1862 .dev_close = ipsec_mb_close,
1864 .stats_get = ipsec_mb_stats_get,
1865 .stats_reset = ipsec_mb_stats_reset,
1867 .dev_infos_get = ipsec_mb_info_get,
1869 .queue_pair_setup = ipsec_mb_qp_setup,
1870 .queue_pair_release = ipsec_mb_qp_release,
1872 .sym_cpu_process = aesni_mb_process_bulk,
1874 .sym_session_get_size = ipsec_mb_sym_session_get_size,
1875 .sym_session_configure = ipsec_mb_sym_session_configure,
1876 .sym_session_clear = ipsec_mb_sym_session_clear
1879 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
1881 * Configure a aesni multi-buffer session from a security session
1885 aesni_mb_pmd_sec_sess_create(void *dev, struct rte_security_session_conf *conf,
1886 struct rte_security_session *sess,
1887 struct rte_mempool *mempool)
1889 void *sess_private_data;
1890 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
1893 if (conf->action_type != RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL ||
1894 conf->protocol != RTE_SECURITY_PROTOCOL_DOCSIS) {
1895 IPSEC_MB_LOG(ERR, "Invalid security protocol");
1899 if (rte_mempool_get(mempool, &sess_private_data)) {
1900 IPSEC_MB_LOG(ERR, "Couldn't get object from session mempool");
1904 ret = aesni_mb_set_docsis_sec_session_parameters(cdev, conf,
1908 IPSEC_MB_LOG(ERR, "Failed to configure session parameters");
1910 /* Return session to mempool */
1911 rte_mempool_put(mempool, sess_private_data);
1915 set_sec_session_private_data(sess, sess_private_data);
1920 /** Clear the memory of session so it does not leave key material behind */
1922 aesni_mb_pmd_sec_sess_destroy(void *dev __rte_unused,
1923 struct rte_security_session *sess)
1925 void *sess_priv = get_sec_session_private_data(sess);
1928 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
1930 memset(sess_priv, 0, sizeof(struct aesni_mb_session));
1931 set_sec_session_private_data(sess, NULL);
1932 rte_mempool_put(sess_mp, sess_priv);
1937 /** Get security capabilities for aesni multi-buffer */
1938 static const struct rte_security_capability *
1939 aesni_mb_pmd_sec_capa_get(void *device __rte_unused)
1941 return aesni_mb_pmd_security_cap;
1944 static struct rte_security_ops aesni_mb_pmd_sec_ops = {
1945 .session_create = aesni_mb_pmd_sec_sess_create,
1946 .session_update = NULL,
1947 .session_stats_get = NULL,
1948 .session_destroy = aesni_mb_pmd_sec_sess_destroy,
1949 .set_pkt_metadata = NULL,
1950 .capabilities_get = aesni_mb_pmd_sec_capa_get
1953 struct rte_security_ops *rte_aesni_mb_pmd_sec_ops = &aesni_mb_pmd_sec_ops;
1956 aesni_mb_configure_dev(struct rte_cryptodev *dev)
1958 struct rte_security_ctx *security_instance;
1960 security_instance = rte_malloc("aesni_mb_sec",
1961 sizeof(struct rte_security_ctx),
1962 RTE_CACHE_LINE_SIZE);
1963 if (security_instance != NULL) {
1964 security_instance->device = (void *)dev;
1965 security_instance->ops = rte_aesni_mb_pmd_sec_ops;
1966 security_instance->sess_cnt = 0;
1967 dev->security_ctx = security_instance;
1978 aesni_mb_probe(struct rte_vdev_device *vdev)
1980 return ipsec_mb_create(vdev, IPSEC_MB_PMD_TYPE_AESNI_MB);
1983 static struct rte_vdev_driver cryptodev_aesni_mb_pmd_drv = {
1984 .probe = aesni_mb_probe,
1985 .remove = ipsec_mb_remove
1988 static struct cryptodev_driver aesni_mb_crypto_drv;
1990 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_MB_PMD,
1991 cryptodev_aesni_mb_pmd_drv);
1992 RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd);
1993 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_MB_PMD,
1994 "max_nb_queue_pairs=<int> socket_id=<int>");
1995 RTE_PMD_REGISTER_CRYPTO_DRIVER(
1996 aesni_mb_crypto_drv,
1997 cryptodev_aesni_mb_pmd_drv.driver,
1998 pmd_driver_id_aesni_mb);
2000 /* Constructor function to register aesni-mb PMD */
2001 RTE_INIT(ipsec_mb_register_aesni_mb)
2003 struct ipsec_mb_internals *aesni_mb_data =
2004 &ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_AESNI_MB];
2006 aesni_mb_data->caps = aesni_mb_capabilities;
2007 aesni_mb_data->dequeue_burst = aesni_mb_dequeue_burst;
2008 aesni_mb_data->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
2009 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
2010 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
2011 RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO |
2012 RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA |
2013 RTE_CRYPTODEV_FF_SYM_SESSIONLESS;
2015 aesni_mb_data->internals_priv_size = 0;
2016 aesni_mb_data->ops = &aesni_mb_pmd_ops;
2017 aesni_mb_data->qp_priv_size = sizeof(struct aesni_mb_qp_data);
2018 aesni_mb_data->queue_pair_configure = NULL;
2019 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
2020 aesni_mb_data->security_ops = &aesni_mb_pmd_sec_ops;
2021 aesni_mb_data->dev_config = aesni_mb_configure_dev;
2022 aesni_mb_data->feature_flags |= RTE_CRYPTODEV_FF_SECURITY;
2024 aesni_mb_data->session_configure = aesni_mb_session_configure;
2025 aesni_mb_data->session_priv_size = sizeof(struct aesni_mb_session);