1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2017 Intel Corporation
5 #include <intel-ipsec-mb.h>
7 #include <rte_common.h>
8 #include <rte_hexdump.h>
9 #include <rte_cryptodev.h>
10 #include <rte_cryptodev_pmd.h>
11 #include <rte_bus_vdev.h>
12 #include <rte_malloc.h>
13 #include <rte_cpuflags.h>
15 #include "rte_aesni_mb_pmd_private.h"
17 #define AES_CCM_DIGEST_MIN_LEN 4
18 #define AES_CCM_DIGEST_MAX_LEN 16
19 #define HMAC_MAX_BLOCK_SIZE 128
20 static uint8_t cryptodev_driver_id;
22 typedef void (*hash_one_block_t)(const void *data, void *digest);
23 typedef void (*aes_keyexp_t)(const void *key, void *enc_exp_keys, void *dec_exp_keys);
26 * Calculate the authentication pre-computes
28 * @param one_block_hash Function pointer to calculate digest on ipad/opad
29 * @param ipad Inner pad output byte array
30 * @param opad Outer pad output byte array
31 * @param hkey Authentication key
32 * @param hkey_len Authentication key length
33 * @param blocksize Block size of selected hash algo
36 calculate_auth_precomputes(hash_one_block_t one_block_hash,
37 uint8_t *ipad, uint8_t *opad,
38 const uint8_t *hkey, uint16_t hkey_len,
43 uint8_t ipad_buf[blocksize] __rte_aligned(16);
44 uint8_t opad_buf[blocksize] __rte_aligned(16);
46 /* Setup inner and outer pads */
47 memset(ipad_buf, HMAC_IPAD_VALUE, blocksize);
48 memset(opad_buf, HMAC_OPAD_VALUE, blocksize);
50 /* XOR hash key with inner and outer pads */
51 length = hkey_len > blocksize ? blocksize : hkey_len;
53 for (i = 0; i < length; i++) {
54 ipad_buf[i] ^= hkey[i];
55 opad_buf[i] ^= hkey[i];
58 /* Compute partial hashes */
59 (*one_block_hash)(ipad_buf, ipad);
60 (*one_block_hash)(opad_buf, opad);
63 memset(ipad_buf, 0, blocksize);
64 memset(opad_buf, 0, blocksize);
67 /** Get xform chain order */
68 static enum aesni_mb_operation
69 aesni_mb_get_chain_order(const struct rte_crypto_sym_xform *xform)
72 return AESNI_MB_OP_NOT_SUPPORTED;
74 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
75 if (xform->next == NULL)
76 return AESNI_MB_OP_CIPHER_ONLY;
77 if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
78 return AESNI_MB_OP_CIPHER_HASH;
81 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
82 if (xform->next == NULL)
83 return AESNI_MB_OP_HASH_ONLY;
84 if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
85 return AESNI_MB_OP_HASH_CIPHER;
87 #if IMB_VERSION_NUM > IMB_VERSION(0, 52, 0)
88 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
89 if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
91 * CCM requires to hash first and cipher later
94 if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM)
95 return AESNI_MB_OP_AEAD_HASH_CIPHER;
97 return AESNI_MB_OP_AEAD_CIPHER_HASH;
99 if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM)
100 return AESNI_MB_OP_AEAD_CIPHER_HASH;
102 return AESNI_MB_OP_AEAD_HASH_CIPHER;
106 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
107 if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM ||
108 xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM) {
109 if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
110 return AESNI_MB_OP_AEAD_CIPHER_HASH;
112 return AESNI_MB_OP_AEAD_HASH_CIPHER;
117 return AESNI_MB_OP_NOT_SUPPORTED;
120 /** Set session authentication parameters */
122 aesni_mb_set_session_auth_parameters(const MB_MGR *mb_mgr,
123 struct aesni_mb_session *sess,
124 const struct rte_crypto_sym_xform *xform)
126 hash_one_block_t hash_oneblock_fn = NULL;
127 unsigned int key_larger_block_size = 0;
128 uint8_t hashed_key[HMAC_MAX_BLOCK_SIZE] = { 0 };
129 uint32_t auth_precompute = 1;
132 sess->auth.algo = NULL_HASH;
136 if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
137 AESNI_MB_LOG(ERR, "Crypto xform struct not of type auth");
141 /* Set the request digest size */
142 sess->auth.req_digest_len = xform->auth.digest_length;
144 /* Select auth generate/verify */
145 sess->auth.operation = xform->auth.op;
147 /* Set Authentication Parameters */
148 if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_XCBC_MAC) {
149 sess->auth.algo = AES_XCBC;
151 uint16_t xcbc_mac_digest_len =
152 get_truncated_digest_byte_length(AES_XCBC);
153 if (sess->auth.req_digest_len != xcbc_mac_digest_len) {
154 AESNI_MB_LOG(ERR, "Invalid digest size\n");
157 sess->auth.gen_digest_len = sess->auth.req_digest_len;
159 IMB_AES_XCBC_KEYEXP(mb_mgr, xform->auth.key.data,
160 sess->auth.xcbc.k1_expanded,
161 sess->auth.xcbc.k2, sess->auth.xcbc.k3);
165 if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_CMAC) {
168 sess->auth.algo = AES_CMAC;
170 uint16_t cmac_digest_len = get_digest_byte_length(AES_CMAC);
172 if (sess->auth.req_digest_len > cmac_digest_len) {
173 AESNI_MB_LOG(ERR, "Invalid digest size\n");
177 * Multi-buffer lib supports digest sizes from 4 to 16 bytes
178 * in version 0.50 and sizes of 12 and 16 bytes,
180 * If size requested is different, generate the full digest
181 * (16 bytes) in a temporary location and then memcpy
182 * the requested number of bytes.
184 if (sess->auth.req_digest_len < 4)
185 sess->auth.gen_digest_len = cmac_digest_len;
187 sess->auth.gen_digest_len = sess->auth.req_digest_len;
189 IMB_AES_KEYEXP_128(mb_mgr, xform->auth.key.data,
190 sess->auth.cmac.expkey, dust);
191 IMB_AES_CMAC_SUBKEY_GEN_128(mb_mgr, sess->auth.cmac.expkey,
192 sess->auth.cmac.skey1, sess->auth.cmac.skey2);
196 if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
197 if (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) {
198 sess->cipher.direction = ENCRYPT;
199 sess->chain_order = CIPHER_HASH;
201 sess->cipher.direction = DECRYPT;
203 sess->auth.algo = AES_GMAC;
205 * Multi-buffer lib supports 8, 12 and 16 bytes of digest.
206 * If size requested is different, generate the full digest
207 * (16 bytes) in a temporary location and then memcpy
208 * the requested number of bytes.
210 if (sess->auth.req_digest_len != 16 &&
211 sess->auth.req_digest_len != 12 &&
212 sess->auth.req_digest_len != 8) {
213 sess->auth.gen_digest_len = 16;
215 sess->auth.gen_digest_len = sess->auth.req_digest_len;
217 sess->iv.length = xform->auth.iv.length;
218 sess->iv.offset = xform->auth.iv.offset;
220 switch (xform->auth.key.length) {
222 IMB_AES128_GCM_PRE(mb_mgr, xform->auth.key.data,
223 &sess->cipher.gcm_key);
224 sess->cipher.key_length_in_bytes = AES_128_BYTES;
227 IMB_AES192_GCM_PRE(mb_mgr, xform->auth.key.data,
228 &sess->cipher.gcm_key);
229 sess->cipher.key_length_in_bytes = AES_192_BYTES;
232 IMB_AES256_GCM_PRE(mb_mgr, xform->auth.key.data,
233 &sess->cipher.gcm_key);
234 sess->cipher.key_length_in_bytes = AES_256_BYTES;
237 RTE_LOG(ERR, PMD, "failed to parse test type\n");
244 switch (xform->auth.algo) {
245 case RTE_CRYPTO_AUTH_MD5_HMAC:
246 sess->auth.algo = MD5;
247 hash_oneblock_fn = mb_mgr->md5_one_block;
249 case RTE_CRYPTO_AUTH_SHA1_HMAC:
250 sess->auth.algo = SHA1;
251 hash_oneblock_fn = mb_mgr->sha1_one_block;
252 if (xform->auth.key.length > get_auth_algo_blocksize(SHA1)) {
254 xform->auth.key.data,
255 xform->auth.key.length,
257 key_larger_block_size = 1;
260 case RTE_CRYPTO_AUTH_SHA1:
261 sess->auth.algo = PLAIN_SHA1;
264 case RTE_CRYPTO_AUTH_SHA224_HMAC:
265 sess->auth.algo = SHA_224;
266 hash_oneblock_fn = mb_mgr->sha224_one_block;
267 if (xform->auth.key.length > get_auth_algo_blocksize(SHA_224)) {
269 xform->auth.key.data,
270 xform->auth.key.length,
272 key_larger_block_size = 1;
275 case RTE_CRYPTO_AUTH_SHA224:
276 sess->auth.algo = PLAIN_SHA_224;
279 case RTE_CRYPTO_AUTH_SHA256_HMAC:
280 sess->auth.algo = SHA_256;
281 hash_oneblock_fn = mb_mgr->sha256_one_block;
282 if (xform->auth.key.length > get_auth_algo_blocksize(SHA_256)) {
284 xform->auth.key.data,
285 xform->auth.key.length,
287 key_larger_block_size = 1;
290 case RTE_CRYPTO_AUTH_SHA256:
291 sess->auth.algo = PLAIN_SHA_256;
294 case RTE_CRYPTO_AUTH_SHA384_HMAC:
295 sess->auth.algo = SHA_384;
296 hash_oneblock_fn = mb_mgr->sha384_one_block;
297 if (xform->auth.key.length > get_auth_algo_blocksize(SHA_384)) {
299 xform->auth.key.data,
300 xform->auth.key.length,
302 key_larger_block_size = 1;
305 case RTE_CRYPTO_AUTH_SHA384:
306 sess->auth.algo = PLAIN_SHA_384;
309 case RTE_CRYPTO_AUTH_SHA512_HMAC:
310 sess->auth.algo = SHA_512;
311 hash_oneblock_fn = mb_mgr->sha512_one_block;
312 if (xform->auth.key.length > get_auth_algo_blocksize(SHA_512)) {
314 xform->auth.key.data,
315 xform->auth.key.length,
317 key_larger_block_size = 1;
320 case RTE_CRYPTO_AUTH_SHA512:
321 sess->auth.algo = PLAIN_SHA_512;
325 AESNI_MB_LOG(ERR, "Unsupported authentication algorithm selection");
328 uint16_t trunc_digest_size =
329 get_truncated_digest_byte_length(sess->auth.algo);
330 uint16_t full_digest_size =
331 get_digest_byte_length(sess->auth.algo);
333 if (sess->auth.req_digest_len > full_digest_size ||
334 sess->auth.req_digest_len == 0) {
335 AESNI_MB_LOG(ERR, "Invalid digest size\n");
339 if (sess->auth.req_digest_len != trunc_digest_size &&
340 sess->auth.req_digest_len != full_digest_size)
341 sess->auth.gen_digest_len = full_digest_size;
343 sess->auth.gen_digest_len = sess->auth.req_digest_len;
345 /* Plain SHA does not require precompute key */
346 if (auth_precompute == 0)
349 /* Calculate Authentication precomputes */
350 if (key_larger_block_size) {
351 calculate_auth_precomputes(hash_oneblock_fn,
352 sess->auth.pads.inner, sess->auth.pads.outer,
354 xform->auth.key.length,
355 get_auth_algo_blocksize(sess->auth.algo));
357 calculate_auth_precomputes(hash_oneblock_fn,
358 sess->auth.pads.inner, sess->auth.pads.outer,
359 xform->auth.key.data,
360 xform->auth.key.length,
361 get_auth_algo_blocksize(sess->auth.algo));
367 /** Set session cipher parameters */
369 aesni_mb_set_session_cipher_parameters(const MB_MGR *mb_mgr,
370 struct aesni_mb_session *sess,
371 const struct rte_crypto_sym_xform *xform)
377 sess->cipher.mode = NULL_CIPHER;
381 if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
382 AESNI_MB_LOG(ERR, "Crypto xform struct not of type cipher");
386 /* Select cipher direction */
387 switch (xform->cipher.op) {
388 case RTE_CRYPTO_CIPHER_OP_ENCRYPT:
389 sess->cipher.direction = ENCRYPT;
391 case RTE_CRYPTO_CIPHER_OP_DECRYPT:
392 sess->cipher.direction = DECRYPT;
395 AESNI_MB_LOG(ERR, "Invalid cipher operation parameter");
399 /* Select cipher mode */
400 switch (xform->cipher.algo) {
401 case RTE_CRYPTO_CIPHER_AES_CBC:
402 sess->cipher.mode = CBC;
405 case RTE_CRYPTO_CIPHER_AES_CTR:
406 sess->cipher.mode = CNTR;
409 case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
410 sess->cipher.mode = DOCSIS_SEC_BPI;
413 case RTE_CRYPTO_CIPHER_DES_CBC:
414 sess->cipher.mode = DES;
416 case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
417 sess->cipher.mode = DOCSIS_DES;
419 case RTE_CRYPTO_CIPHER_3DES_CBC:
420 sess->cipher.mode = DES3;
424 AESNI_MB_LOG(ERR, "Unsupported cipher mode parameter");
428 /* Set IV parameters */
429 sess->iv.offset = xform->cipher.iv.offset;
430 sess->iv.length = xform->cipher.iv.length;
432 /* Check key length and choose key expansion function for AES */
434 switch (xform->cipher.key.length) {
436 sess->cipher.key_length_in_bytes = AES_128_BYTES;
437 IMB_AES_KEYEXP_128(mb_mgr, xform->cipher.key.data,
438 sess->cipher.expanded_aes_keys.encode,
439 sess->cipher.expanded_aes_keys.decode);
442 sess->cipher.key_length_in_bytes = AES_192_BYTES;
443 IMB_AES_KEYEXP_192(mb_mgr, xform->cipher.key.data,
444 sess->cipher.expanded_aes_keys.encode,
445 sess->cipher.expanded_aes_keys.decode);
448 sess->cipher.key_length_in_bytes = AES_256_BYTES;
449 IMB_AES_KEYEXP_256(mb_mgr, xform->cipher.key.data,
450 sess->cipher.expanded_aes_keys.encode,
451 sess->cipher.expanded_aes_keys.decode);
454 AESNI_MB_LOG(ERR, "Invalid cipher key length");
457 } else if (is_3DES) {
458 uint64_t *keys[3] = {sess->cipher.exp_3des_keys.key[0],
459 sess->cipher.exp_3des_keys.key[1],
460 sess->cipher.exp_3des_keys.key[2]};
462 switch (xform->cipher.key.length) {
464 IMB_DES_KEYSCHED(mb_mgr, keys[0],
465 xform->cipher.key.data);
466 IMB_DES_KEYSCHED(mb_mgr, keys[1],
467 xform->cipher.key.data + 8);
468 IMB_DES_KEYSCHED(mb_mgr, keys[2],
469 xform->cipher.key.data + 16);
471 /* Initialize keys - 24 bytes: [K1-K2-K3] */
472 sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
473 sess->cipher.exp_3des_keys.ks_ptr[1] = keys[1];
474 sess->cipher.exp_3des_keys.ks_ptr[2] = keys[2];
477 IMB_DES_KEYSCHED(mb_mgr, keys[0],
478 xform->cipher.key.data);
479 IMB_DES_KEYSCHED(mb_mgr, keys[1],
480 xform->cipher.key.data + 8);
481 /* Initialize keys - 16 bytes: [K1=K1,K2=K2,K3=K1] */
482 sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
483 sess->cipher.exp_3des_keys.ks_ptr[1] = keys[1];
484 sess->cipher.exp_3des_keys.ks_ptr[2] = keys[0];
487 IMB_DES_KEYSCHED(mb_mgr, keys[0],
488 xform->cipher.key.data);
490 /* Initialize keys - 8 bytes: [K1 = K2 = K3] */
491 sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
492 sess->cipher.exp_3des_keys.ks_ptr[1] = keys[0];
493 sess->cipher.exp_3des_keys.ks_ptr[2] = keys[0];
496 AESNI_MB_LOG(ERR, "Invalid cipher key length");
500 sess->cipher.key_length_in_bytes = 24;
502 if (xform->cipher.key.length != 8) {
503 AESNI_MB_LOG(ERR, "Invalid cipher key length");
506 sess->cipher.key_length_in_bytes = 8;
508 IMB_DES_KEYSCHED(mb_mgr,
509 (uint64_t *)sess->cipher.expanded_aes_keys.encode,
510 xform->cipher.key.data);
511 IMB_DES_KEYSCHED(mb_mgr,
512 (uint64_t *)sess->cipher.expanded_aes_keys.decode,
513 xform->cipher.key.data);
520 aesni_mb_set_session_aead_parameters(const MB_MGR *mb_mgr,
521 struct aesni_mb_session *sess,
522 const struct rte_crypto_sym_xform *xform)
524 switch (xform->aead.op) {
525 case RTE_CRYPTO_AEAD_OP_ENCRYPT:
526 sess->cipher.direction = ENCRYPT;
527 sess->auth.operation = RTE_CRYPTO_AUTH_OP_GENERATE;
529 case RTE_CRYPTO_AEAD_OP_DECRYPT:
530 sess->cipher.direction = DECRYPT;
531 sess->auth.operation = RTE_CRYPTO_AUTH_OP_VERIFY;
534 AESNI_MB_LOG(ERR, "Invalid aead operation parameter");
538 switch (xform->aead.algo) {
539 case RTE_CRYPTO_AEAD_AES_CCM:
540 sess->cipher.mode = CCM;
541 sess->auth.algo = AES_CCM;
543 /* Check key length and choose key expansion function for AES */
544 switch (xform->aead.key.length) {
546 sess->cipher.key_length_in_bytes = AES_128_BYTES;
547 IMB_AES_KEYEXP_128(mb_mgr, xform->aead.key.data,
548 sess->cipher.expanded_aes_keys.encode,
549 sess->cipher.expanded_aes_keys.decode);
552 AESNI_MB_LOG(ERR, "Invalid cipher key length");
558 case RTE_CRYPTO_AEAD_AES_GCM:
559 sess->cipher.mode = GCM;
560 sess->auth.algo = AES_GMAC;
562 switch (xform->aead.key.length) {
564 sess->cipher.key_length_in_bytes = AES_128_BYTES;
565 IMB_AES128_GCM_PRE(mb_mgr, xform->aead.key.data,
566 &sess->cipher.gcm_key);
569 sess->cipher.key_length_in_bytes = AES_192_BYTES;
570 IMB_AES192_GCM_PRE(mb_mgr, xform->aead.key.data,
571 &sess->cipher.gcm_key);
574 sess->cipher.key_length_in_bytes = AES_256_BYTES;
575 IMB_AES256_GCM_PRE(mb_mgr, xform->aead.key.data,
576 &sess->cipher.gcm_key);
579 AESNI_MB_LOG(ERR, "Invalid cipher key length");
586 AESNI_MB_LOG(ERR, "Unsupported aead mode parameter");
590 /* Set IV parameters */
591 sess->iv.offset = xform->aead.iv.offset;
592 sess->iv.length = xform->aead.iv.length;
594 sess->auth.req_digest_len = xform->aead.digest_length;
595 /* CCM digests must be between 4 and 16 and an even number */
596 if (sess->auth.req_digest_len < AES_CCM_DIGEST_MIN_LEN ||
597 sess->auth.req_digest_len > AES_CCM_DIGEST_MAX_LEN ||
598 (sess->auth.req_digest_len & 1) == 1) {
599 AESNI_MB_LOG(ERR, "Invalid digest size\n");
602 sess->auth.gen_digest_len = sess->auth.req_digest_len;
607 /** Parse crypto xform chain and set private session parameters */
609 aesni_mb_set_session_parameters(const MB_MGR *mb_mgr,
610 struct aesni_mb_session *sess,
611 const struct rte_crypto_sym_xform *xform)
613 const struct rte_crypto_sym_xform *auth_xform = NULL;
614 const struct rte_crypto_sym_xform *cipher_xform = NULL;
615 const struct rte_crypto_sym_xform *aead_xform = NULL;
618 /* Select Crypto operation - hash then cipher / cipher then hash */
619 switch (aesni_mb_get_chain_order(xform)) {
620 case AESNI_MB_OP_HASH_CIPHER:
621 sess->chain_order = HASH_CIPHER;
623 cipher_xform = xform->next;
625 case AESNI_MB_OP_CIPHER_HASH:
626 sess->chain_order = CIPHER_HASH;
627 auth_xform = xform->next;
628 cipher_xform = xform;
630 case AESNI_MB_OP_HASH_ONLY:
631 sess->chain_order = HASH_CIPHER;
635 case AESNI_MB_OP_CIPHER_ONLY:
637 * Multi buffer library operates only at two modes,
638 * CIPHER_HASH and HASH_CIPHER. When doing ciphering only,
639 * chain order depends on cipher operation: encryption is always
640 * the first operation and decryption the last one.
642 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
643 sess->chain_order = CIPHER_HASH;
645 sess->chain_order = HASH_CIPHER;
647 cipher_xform = xform;
649 case AESNI_MB_OP_AEAD_CIPHER_HASH:
650 sess->chain_order = CIPHER_HASH;
651 sess->aead.aad_len = xform->aead.aad_length;
654 case AESNI_MB_OP_AEAD_HASH_CIPHER:
655 sess->chain_order = HASH_CIPHER;
656 sess->aead.aad_len = xform->aead.aad_length;
659 case AESNI_MB_OP_NOT_SUPPORTED:
661 AESNI_MB_LOG(ERR, "Unsupported operation chain order parameter");
665 /* Default IV length = 0 */
668 ret = aesni_mb_set_session_auth_parameters(mb_mgr, sess, auth_xform);
670 AESNI_MB_LOG(ERR, "Invalid/unsupported authentication parameters");
674 ret = aesni_mb_set_session_cipher_parameters(mb_mgr, sess,
677 AESNI_MB_LOG(ERR, "Invalid/unsupported cipher parameters");
682 ret = aesni_mb_set_session_aead_parameters(mb_mgr, sess,
685 AESNI_MB_LOG(ERR, "Invalid/unsupported aead parameters");
694 * burst enqueue, place crypto operations on ingress queue for processing.
696 * @param __qp Queue Pair to process
697 * @param ops Crypto operations for processing
698 * @param nb_ops Number of crypto operations for processing
701 * - Number of crypto operations enqueued
704 aesni_mb_pmd_enqueue_burst(void *__qp, struct rte_crypto_op **ops,
707 struct aesni_mb_qp *qp = __qp;
709 unsigned int nb_enqueued;
711 nb_enqueued = rte_ring_enqueue_burst(qp->ingress_queue,
712 (void **)ops, nb_ops, NULL);
714 qp->stats.enqueued_count += nb_enqueued;
719 /** Get multi buffer session */
720 static inline struct aesni_mb_session *
721 get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op)
723 struct aesni_mb_session *sess = NULL;
725 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
726 if (likely(op->sym->session != NULL))
727 sess = (struct aesni_mb_session *)
728 get_sym_session_private_data(
730 cryptodev_driver_id);
733 void *_sess_private_data = NULL;
735 if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
738 if (rte_mempool_get(qp->sess_mp_priv,
739 (void **)&_sess_private_data))
742 sess = (struct aesni_mb_session *)_sess_private_data;
744 if (unlikely(aesni_mb_set_session_parameters(qp->mb_mgr,
745 sess, op->sym->xform) != 0)) {
746 rte_mempool_put(qp->sess_mp, _sess);
747 rte_mempool_put(qp->sess_mp_priv, _sess_private_data);
750 op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
751 set_sym_session_private_data(op->sym->session,
752 cryptodev_driver_id, _sess_private_data);
755 if (unlikely(sess == NULL))
756 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
761 static inline uint64_t
762 auth_start_offset(struct rte_crypto_op *op, struct aesni_mb_session *session,
765 struct rte_mbuf *m_src, *m_dst;
766 uint8_t *p_src, *p_dst;
767 uintptr_t u_src, u_dst;
768 uint32_t cipher_end, auth_end;
770 /* Only cipher then hash needs special calculation. */
771 if (!oop || session->chain_order != CIPHER_HASH)
772 return op->sym->auth.data.offset;
774 m_src = op->sym->m_src;
775 m_dst = op->sym->m_dst;
777 p_src = rte_pktmbuf_mtod(m_src, uint8_t *);
778 p_dst = rte_pktmbuf_mtod(m_dst, uint8_t *);
779 u_src = (uintptr_t)p_src;
780 u_dst = (uintptr_t)p_dst + op->sym->auth.data.offset;
783 * Copy the content between cipher offset and auth offset for generating
786 if (op->sym->cipher.data.offset > op->sym->auth.data.offset)
787 memcpy(p_dst + op->sym->auth.data.offset,
788 p_src + op->sym->auth.data.offset,
789 op->sym->cipher.data.offset -
790 op->sym->auth.data.offset);
793 * Copy the content between (cipher offset + length) and (auth offset +
794 * length) for generating correct digest
796 cipher_end = op->sym->cipher.data.offset + op->sym->cipher.data.length;
797 auth_end = op->sym->auth.data.offset + op->sym->auth.data.length;
798 if (cipher_end < auth_end)
799 memcpy(p_dst + cipher_end, p_src + cipher_end,
800 auth_end - cipher_end);
803 * Since intel-ipsec-mb only supports positive values,
804 * we need to deduct the correct offset between src and dst.
807 return u_src < u_dst ? (u_dst - u_src) :
808 (UINT64_MAX - u_src + u_dst + 1);
812 * Process a crypto operation and complete a JOB_AES_HMAC job structure for
813 * submission to the multi buffer library for processing.
815 * @param qp queue pair
816 * @param job JOB_AES_HMAC structure to fill
817 * @param m mbuf to process
820 * - Completed JOB_AES_HMAC structure pointer on success
821 * - NULL pointer if completion of JOB_AES_HMAC structure isn't possible
824 set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
825 struct rte_crypto_op *op, uint8_t *digest_idx)
827 struct rte_mbuf *m_src = op->sym->m_src, *m_dst;
828 struct aesni_mb_session *session;
829 uint32_t m_offset, oop;
831 session = get_session(qp, op);
832 if (session == NULL) {
833 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
837 /* Set crypto operation */
838 job->chain_order = session->chain_order;
840 /* Set cipher parameters */
841 job->cipher_direction = session->cipher.direction;
842 job->cipher_mode = session->cipher.mode;
844 job->aes_key_len_in_bytes = session->cipher.key_length_in_bytes;
846 /* Set authentication parameters */
847 job->hash_alg = session->auth.algo;
849 switch (job->hash_alg) {
851 job->u.XCBC._k1_expanded = session->auth.xcbc.k1_expanded;
852 job->u.XCBC._k2 = session->auth.xcbc.k2;
853 job->u.XCBC._k3 = session->auth.xcbc.k3;
855 job->aes_enc_key_expanded =
856 session->cipher.expanded_aes_keys.encode;
857 job->aes_dec_key_expanded =
858 session->cipher.expanded_aes_keys.decode;
862 job->u.CCM.aad = op->sym->aead.aad.data + 18;
863 job->u.CCM.aad_len_in_bytes = session->aead.aad_len;
864 job->aes_enc_key_expanded =
865 session->cipher.expanded_aes_keys.encode;
866 job->aes_dec_key_expanded =
867 session->cipher.expanded_aes_keys.decode;
871 job->u.CMAC._key_expanded = session->auth.cmac.expkey;
872 job->u.CMAC._skey1 = session->auth.cmac.skey1;
873 job->u.CMAC._skey2 = session->auth.cmac.skey2;
874 job->aes_enc_key_expanded =
875 session->cipher.expanded_aes_keys.encode;
876 job->aes_dec_key_expanded =
877 session->cipher.expanded_aes_keys.decode;
881 if (session->cipher.mode == GCM) {
882 job->u.GCM.aad = op->sym->aead.aad.data;
883 job->u.GCM.aad_len_in_bytes = session->aead.aad_len;
886 job->u.GCM.aad = rte_pktmbuf_mtod_offset(m_src,
887 uint8_t *, op->sym->auth.data.offset);
888 job->u.GCM.aad_len_in_bytes = op->sym->auth.data.length;
889 job->cipher_mode = GCM;
891 job->aes_enc_key_expanded = &session->cipher.gcm_key;
892 job->aes_dec_key_expanded = &session->cipher.gcm_key;
896 job->u.HMAC._hashed_auth_key_xor_ipad = session->auth.pads.inner;
897 job->u.HMAC._hashed_auth_key_xor_opad = session->auth.pads.outer;
899 if (job->cipher_mode == DES3) {
900 job->aes_enc_key_expanded =
901 session->cipher.exp_3des_keys.ks_ptr;
902 job->aes_dec_key_expanded =
903 session->cipher.exp_3des_keys.ks_ptr;
905 job->aes_enc_key_expanded =
906 session->cipher.expanded_aes_keys.encode;
907 job->aes_dec_key_expanded =
908 session->cipher.expanded_aes_keys.decode;
912 if (!op->sym->m_dst) {
913 /* in-place operation */
916 } else if (op->sym->m_dst == op->sym->m_src) {
917 /* in-place operation */
921 /* out-of-place operation */
922 m_dst = op->sym->m_dst;
926 if (job->hash_alg == AES_CCM || (job->hash_alg == AES_GMAC &&
927 session->cipher.mode == GCM))
928 m_offset = op->sym->aead.data.offset;
930 m_offset = op->sym->cipher.data.offset;
932 /* Set digest output location */
933 if (job->hash_alg != NULL_HASH &&
934 session->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
935 job->auth_tag_output = qp->temp_digests[*digest_idx];
936 *digest_idx = (*digest_idx + 1) % MAX_JOBS;
938 if (job->hash_alg == AES_CCM || (job->hash_alg == AES_GMAC &&
939 session->cipher.mode == GCM))
940 job->auth_tag_output = op->sym->aead.digest.data;
942 job->auth_tag_output = op->sym->auth.digest.data;
944 if (session->auth.req_digest_len != session->auth.gen_digest_len) {
945 job->auth_tag_output = qp->temp_digests[*digest_idx];
946 *digest_idx = (*digest_idx + 1) % MAX_JOBS;
950 * Multi-buffer library current only support returning a truncated
951 * digest length as specified in the relevant IPsec RFCs
954 /* Set digest length */
955 job->auth_tag_output_len_in_bytes = session->auth.gen_digest_len;
957 /* Set IV parameters */
958 job->iv_len_in_bytes = session->iv.length;
960 /* Data Parameters */
961 job->src = rte_pktmbuf_mtod(m_src, uint8_t *);
962 job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, m_offset);
964 switch (job->hash_alg) {
966 job->cipher_start_src_offset_in_bytes =
967 op->sym->aead.data.offset;
968 job->msg_len_to_cipher_in_bytes = op->sym->aead.data.length;
969 job->hash_start_src_offset_in_bytes = op->sym->aead.data.offset;
970 job->msg_len_to_hash_in_bytes = op->sym->aead.data.length;
972 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
973 session->iv.offset + 1);
977 if (session->cipher.mode == GCM) {
978 job->cipher_start_src_offset_in_bytes =
979 op->sym->aead.data.offset;
980 job->hash_start_src_offset_in_bytes =
981 op->sym->aead.data.offset;
982 job->msg_len_to_cipher_in_bytes =
983 op->sym->aead.data.length;
984 job->msg_len_to_hash_in_bytes =
985 op->sym->aead.data.length;
987 job->cipher_start_src_offset_in_bytes =
988 op->sym->auth.data.offset;
989 job->hash_start_src_offset_in_bytes =
990 op->sym->auth.data.offset;
991 job->msg_len_to_cipher_in_bytes = 0;
992 job->msg_len_to_hash_in_bytes = 0;
995 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1000 job->cipher_start_src_offset_in_bytes =
1001 op->sym->cipher.data.offset;
1002 job->msg_len_to_cipher_in_bytes = op->sym->cipher.data.length;
1004 job->hash_start_src_offset_in_bytes = auth_start_offset(op,
1006 job->msg_len_to_hash_in_bytes = op->sym->auth.data.length;
1008 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1009 session->iv.offset);
1012 /* Set user data to be crypto operation data struct */
1013 job->user_data = op;
1019 verify_digest(JOB_AES_HMAC *job, void *digest, uint16_t len, uint8_t *status)
1021 /* Verify digest if required */
1022 if (memcmp(job->auth_tag_output, digest, len) != 0)
1023 *status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
1027 generate_digest(JOB_AES_HMAC *job, struct rte_crypto_op *op,
1028 struct aesni_mb_session *sess)
1030 /* No extra copy needed */
1031 if (likely(sess->auth.req_digest_len == sess->auth.gen_digest_len))
1035 * This can only happen for HMAC, so only digest
1036 * for authentication algos is required
1038 memcpy(op->sym->auth.digest.data, job->auth_tag_output,
1039 sess->auth.req_digest_len);
1043 * Process a completed job and return rte_mbuf which job processed
1045 * @param qp Queue Pair to process
1046 * @param job JOB_AES_HMAC job to process
1049 * - Returns processed crypto operation.
1050 * - Returns NULL on invalid job
1052 static inline struct rte_crypto_op *
1053 post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
1055 struct rte_crypto_op *op = (struct rte_crypto_op *)job->user_data;
1056 struct aesni_mb_session *sess = get_sym_session_private_data(
1058 cryptodev_driver_id);
1060 if (likely(op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)) {
1061 switch (job->status) {
1063 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1065 if (job->hash_alg == NULL_HASH)
1068 if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
1069 if (job->hash_alg == AES_CCM ||
1070 (job->hash_alg == AES_GMAC &&
1071 sess->cipher.mode == GCM))
1073 op->sym->aead.digest.data,
1074 sess->auth.req_digest_len,
1078 op->sym->auth.digest.data,
1079 sess->auth.req_digest_len,
1082 generate_digest(job, op, sess);
1085 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
1089 /* Free session if a session-less crypto op */
1090 if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
1091 memset(sess, 0, sizeof(struct aesni_mb_session));
1092 memset(op->sym->session, 0,
1093 rte_cryptodev_sym_get_existing_header_session_size(
1095 rte_mempool_put(qp->sess_mp_priv, sess);
1096 rte_mempool_put(qp->sess_mp, op->sym->session);
1097 op->sym->session = NULL;
1104 * Process a completed JOB_AES_HMAC job and keep processing jobs until
1105 * get_completed_job return NULL
1107 * @param qp Queue Pair to process
1108 * @param job JOB_AES_HMAC job
1111 * - Number of processed jobs
1114 handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job,
1115 struct rte_crypto_op **ops, uint16_t nb_ops)
1117 struct rte_crypto_op *op = NULL;
1118 unsigned processed_jobs = 0;
1120 while (job != NULL) {
1121 op = post_process_mb_job(qp, job);
1124 ops[processed_jobs++] = op;
1125 qp->stats.dequeued_count++;
1127 qp->stats.dequeue_err_count++;
1130 if (processed_jobs == nb_ops)
1133 job = IMB_GET_COMPLETED_JOB(qp->mb_mgr);
1136 return processed_jobs;
1139 static inline uint16_t
1140 flush_mb_mgr(struct aesni_mb_qp *qp, struct rte_crypto_op **ops,
1143 int processed_ops = 0;
1145 /* Flush the remaining jobs */
1146 JOB_AES_HMAC *job = IMB_FLUSH_JOB(qp->mb_mgr);
1149 processed_ops += handle_completed_jobs(qp, job,
1150 &ops[processed_ops], nb_ops - processed_ops);
1152 return processed_ops;
1155 static inline JOB_AES_HMAC *
1156 set_job_null_op(JOB_AES_HMAC *job, struct rte_crypto_op *op)
1158 job->chain_order = HASH_CIPHER;
1159 job->cipher_mode = NULL_CIPHER;
1160 job->hash_alg = NULL_HASH;
1161 job->cipher_direction = DECRYPT;
1163 /* Set user data to be crypto operation data struct */
1164 job->user_data = op;
1170 aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
1173 struct aesni_mb_qp *qp = queue_pair;
1175 struct rte_crypto_op *op;
1178 int retval, processed_jobs = 0;
1180 if (unlikely(nb_ops == 0))
1183 uint8_t digest_idx = qp->digest_idx;
1185 /* Get next free mb job struct from mb manager */
1186 job = IMB_GET_NEXT_JOB(qp->mb_mgr);
1187 if (unlikely(job == NULL)) {
1188 /* if no free mb job structs we need to flush mb_mgr */
1189 processed_jobs += flush_mb_mgr(qp,
1190 &ops[processed_jobs],
1191 nb_ops - processed_jobs);
1193 if (nb_ops == processed_jobs)
1196 job = IMB_GET_NEXT_JOB(qp->mb_mgr);
1200 * Get next operation to process from ingress queue.
1201 * There is no need to return the job to the MB_MGR
1202 * if there are no more operations to process, since the MB_MGR
1203 * can use that pointer again in next get_next calls.
1205 retval = rte_ring_dequeue(qp->ingress_queue, (void **)&op);
1209 retval = set_mb_job_params(job, qp, op, &digest_idx);
1210 if (unlikely(retval != 0)) {
1211 qp->stats.dequeue_err_count++;
1212 set_job_null_op(job, op);
1215 /* Submit job to multi-buffer for processing */
1216 #ifdef RTE_LIBRTE_PMD_AESNI_MB_DEBUG
1217 job = IMB_SUBMIT_JOB(qp->mb_mgr);
1219 job = IMB_SUBMIT_JOB_NOCHECK(qp->mb_mgr);
1222 * If submit returns a processed job then handle it,
1223 * before submitting subsequent jobs
1226 processed_jobs += handle_completed_jobs(qp, job,
1227 &ops[processed_jobs],
1228 nb_ops - processed_jobs);
1230 } while (processed_jobs < nb_ops);
1232 qp->digest_idx = digest_idx;
1234 if (processed_jobs < 1)
1235 processed_jobs += flush_mb_mgr(qp,
1236 &ops[processed_jobs],
1237 nb_ops - processed_jobs);
1239 return processed_jobs;
1242 static int cryptodev_aesni_mb_remove(struct rte_vdev_device *vdev);
1245 cryptodev_aesni_mb_create(const char *name,
1246 struct rte_vdev_device *vdev,
1247 struct rte_cryptodev_pmd_init_params *init_params)
1249 struct rte_cryptodev *dev;
1250 struct aesni_mb_private *internals;
1251 enum aesni_mb_vector_mode vector_mode;
1254 /* Check CPU for support for AES instruction set */
1255 if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) {
1256 AESNI_MB_LOG(ERR, "AES instructions not supported by CPU");
1260 dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
1262 AESNI_MB_LOG(ERR, "failed to create cryptodev vdev");
1266 /* Check CPU for supported vector instruction set */
1267 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
1268 vector_mode = RTE_AESNI_MB_AVX512;
1269 else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
1270 vector_mode = RTE_AESNI_MB_AVX2;
1271 else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
1272 vector_mode = RTE_AESNI_MB_AVX;
1274 vector_mode = RTE_AESNI_MB_SSE;
1276 dev->driver_id = cryptodev_driver_id;
1277 dev->dev_ops = rte_aesni_mb_pmd_ops;
1279 /* register rx/tx burst functions for data path */
1280 dev->dequeue_burst = aesni_mb_pmd_dequeue_burst;
1281 dev->enqueue_burst = aesni_mb_pmd_enqueue_burst;
1283 dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
1284 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
1285 RTE_CRYPTODEV_FF_CPU_AESNI |
1286 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
1289 mb_mgr = alloc_mb_mgr(0);
1293 switch (vector_mode) {
1294 case RTE_AESNI_MB_SSE:
1295 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
1296 init_mb_mgr_sse(mb_mgr);
1298 case RTE_AESNI_MB_AVX:
1299 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
1300 init_mb_mgr_avx(mb_mgr);
1302 case RTE_AESNI_MB_AVX2:
1303 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
1304 init_mb_mgr_avx2(mb_mgr);
1306 case RTE_AESNI_MB_AVX512:
1307 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX512;
1308 init_mb_mgr_avx512(mb_mgr);
1311 AESNI_MB_LOG(ERR, "Unsupported vector mode %u\n", vector_mode);
1315 /* Set vector instructions mode supported */
1316 internals = dev->data->dev_private;
1318 internals->vector_mode = vector_mode;
1319 internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
1320 internals->mb_mgr = mb_mgr;
1322 AESNI_MB_LOG(INFO, "IPSec Multi-buffer library version used: %s\n",
1323 imb_get_version_str());
1329 free_mb_mgr(mb_mgr);
1331 rte_cryptodev_pmd_destroy(dev);
1337 cryptodev_aesni_mb_probe(struct rte_vdev_device *vdev)
1339 struct rte_cryptodev_pmd_init_params init_params = {
1341 sizeof(struct aesni_mb_private),
1343 RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
1345 const char *name, *args;
1348 name = rte_vdev_device_name(vdev);
1352 args = rte_vdev_device_args(vdev);
1354 retval = rte_cryptodev_pmd_parse_input_args(&init_params, args);
1356 AESNI_MB_LOG(ERR, "Failed to parse initialisation arguments[%s]",
1361 return cryptodev_aesni_mb_create(name, vdev, &init_params);
1365 cryptodev_aesni_mb_remove(struct rte_vdev_device *vdev)
1367 struct rte_cryptodev *cryptodev;
1368 struct aesni_mb_private *internals;
1371 name = rte_vdev_device_name(vdev);
1375 cryptodev = rte_cryptodev_pmd_get_named_dev(name);
1376 if (cryptodev == NULL)
1379 internals = cryptodev->data->dev_private;
1381 free_mb_mgr(internals->mb_mgr);
1383 return rte_cryptodev_pmd_destroy(cryptodev);
1386 static struct rte_vdev_driver cryptodev_aesni_mb_pmd_drv = {
1387 .probe = cryptodev_aesni_mb_probe,
1388 .remove = cryptodev_aesni_mb_remove
1391 static struct cryptodev_driver aesni_mb_crypto_drv;
1393 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd_drv);
1394 RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd);
1395 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_MB_PMD,
1396 "max_nb_queue_pairs=<int> "
1398 RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_mb_crypto_drv,
1399 cryptodev_aesni_mb_pmd_drv.driver,
1400 cryptodev_driver_id);
1402 RTE_INIT(aesni_mb_init_log)
1404 aesni_mb_logtype_driver = rte_log_register("pmd.crypto.aesni_mb");