1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2017 Intel Corporation
5 #include <intel-ipsec-mb.h>
7 #include <rte_common.h>
8 #include <rte_hexdump.h>
9 #include <rte_cryptodev.h>
10 #include <rte_cryptodev_pmd.h>
11 #include <rte_bus_vdev.h>
12 #include <rte_malloc.h>
13 #include <rte_cpuflags.h>
15 #include "rte_aesni_mb_pmd_private.h"
17 #define AES_CCM_DIGEST_MIN_LEN 4
18 #define AES_CCM_DIGEST_MAX_LEN 16
19 #define HMAC_MAX_BLOCK_SIZE 128
20 static uint8_t cryptodev_driver_id;
22 typedef void (*hash_one_block_t)(const void *data, void *digest);
23 typedef void (*aes_keyexp_t)(const void *key, void *enc_exp_keys, void *dec_exp_keys);
26 * Calculate the authentication pre-computes
28 * @param one_block_hash Function pointer to calculate digest on ipad/opad
29 * @param ipad Inner pad output byte array
30 * @param opad Outer pad output byte array
31 * @param hkey Authentication key
32 * @param hkey_len Authentication key length
33 * @param blocksize Block size of selected hash algo
36 calculate_auth_precomputes(hash_one_block_t one_block_hash,
37 uint8_t *ipad, uint8_t *opad,
38 uint8_t *hkey, uint16_t hkey_len,
43 uint8_t ipad_buf[blocksize] __rte_aligned(16);
44 uint8_t opad_buf[blocksize] __rte_aligned(16);
46 /* Setup inner and outer pads */
47 memset(ipad_buf, HMAC_IPAD_VALUE, blocksize);
48 memset(opad_buf, HMAC_OPAD_VALUE, blocksize);
50 /* XOR hash key with inner and outer pads */
51 length = hkey_len > blocksize ? blocksize : hkey_len;
53 for (i = 0; i < length; i++) {
54 ipad_buf[i] ^= hkey[i];
55 opad_buf[i] ^= hkey[i];
58 /* Compute partial hashes */
59 (*one_block_hash)(ipad_buf, ipad);
60 (*one_block_hash)(opad_buf, opad);
63 memset(ipad_buf, 0, blocksize);
64 memset(opad_buf, 0, blocksize);
67 /** Get xform chain order */
68 static enum aesni_mb_operation
69 aesni_mb_get_chain_order(const struct rte_crypto_sym_xform *xform)
72 return AESNI_MB_OP_NOT_SUPPORTED;
74 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
75 if (xform->next == NULL)
76 return AESNI_MB_OP_CIPHER_ONLY;
77 if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
78 return AESNI_MB_OP_CIPHER_HASH;
81 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
82 if (xform->next == NULL)
83 return AESNI_MB_OP_HASH_ONLY;
84 if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
85 return AESNI_MB_OP_HASH_CIPHER;
88 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
89 if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM ||
90 xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM) {
91 if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
92 return AESNI_MB_OP_AEAD_CIPHER_HASH;
94 return AESNI_MB_OP_AEAD_HASH_CIPHER;
98 return AESNI_MB_OP_NOT_SUPPORTED;
101 /** Set session authentication parameters */
103 aesni_mb_set_session_auth_parameters(const MB_MGR *mb_mgr,
104 struct aesni_mb_session *sess,
105 const struct rte_crypto_sym_xform *xform)
107 hash_one_block_t hash_oneblock_fn;
108 unsigned int key_larger_block_size = 0;
109 uint8_t hashed_key[HMAC_MAX_BLOCK_SIZE] = { 0 };
112 sess->auth.algo = NULL_HASH;
116 if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
117 AESNI_MB_LOG(ERR, "Crypto xform struct not of type auth");
121 /* Set the request digest size */
122 sess->auth.req_digest_len = xform->auth.digest_length;
124 /* Select auth generate/verify */
125 sess->auth.operation = xform->auth.op;
127 /* Set Authentication Parameters */
128 if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_XCBC_MAC) {
129 sess->auth.algo = AES_XCBC;
131 uint16_t xcbc_mac_digest_len =
132 get_truncated_digest_byte_length(AES_XCBC);
133 if (sess->auth.req_digest_len != xcbc_mac_digest_len) {
134 AESNI_MB_LOG(ERR, "Invalid digest size\n");
137 sess->auth.gen_digest_len = sess->auth.req_digest_len;
139 IMB_AES_XCBC_KEYEXP(mb_mgr, xform->auth.key.data,
140 sess->auth.xcbc.k1_expanded,
141 sess->auth.xcbc.k2, sess->auth.xcbc.k3);
145 if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_CMAC) {
148 sess->auth.algo = AES_CMAC;
150 uint16_t cmac_digest_len = get_digest_byte_length(AES_CMAC);
152 if (sess->auth.req_digest_len > cmac_digest_len) {
153 AESNI_MB_LOG(ERR, "Invalid digest size\n");
157 * Multi-buffer lib supports digest sizes from 4 to 16 bytes
158 * in version 0.50 and sizes of 12 and 16 bytes,
160 * If size requested is different, generate the full digest
161 * (16 bytes) in a temporary location and then memcpy
162 * the requested number of bytes.
164 if (sess->auth.req_digest_len < 4)
165 sess->auth.gen_digest_len = cmac_digest_len;
167 sess->auth.gen_digest_len = sess->auth.req_digest_len;
169 IMB_AES_KEYEXP_128(mb_mgr, xform->auth.key.data,
170 sess->auth.cmac.expkey, dust);
171 IMB_AES_CMAC_SUBKEY_GEN_128(mb_mgr, sess->auth.cmac.expkey,
172 sess->auth.cmac.skey1, sess->auth.cmac.skey2);
176 switch (xform->auth.algo) {
177 case RTE_CRYPTO_AUTH_MD5_HMAC:
178 sess->auth.algo = MD5;
179 hash_oneblock_fn = mb_mgr->md5_one_block;
181 case RTE_CRYPTO_AUTH_SHA1_HMAC:
182 sess->auth.algo = SHA1;
183 hash_oneblock_fn = mb_mgr->sha1_one_block;
184 if (xform->auth.key.length > get_auth_algo_blocksize(SHA1)) {
186 xform->auth.key.data,
187 xform->auth.key.length,
189 key_larger_block_size = 1;
192 case RTE_CRYPTO_AUTH_SHA224_HMAC:
193 sess->auth.algo = SHA_224;
194 hash_oneblock_fn = mb_mgr->sha224_one_block;
195 if (xform->auth.key.length > get_auth_algo_blocksize(SHA_224)) {
197 xform->auth.key.data,
198 xform->auth.key.length,
200 key_larger_block_size = 1;
203 case RTE_CRYPTO_AUTH_SHA256_HMAC:
204 sess->auth.algo = SHA_256;
205 hash_oneblock_fn = mb_mgr->sha256_one_block;
206 if (xform->auth.key.length > get_auth_algo_blocksize(SHA_256)) {
208 xform->auth.key.data,
209 xform->auth.key.length,
211 key_larger_block_size = 1;
214 case RTE_CRYPTO_AUTH_SHA384_HMAC:
215 sess->auth.algo = SHA_384;
216 hash_oneblock_fn = mb_mgr->sha384_one_block;
217 if (xform->auth.key.length > get_auth_algo_blocksize(SHA_384)) {
219 xform->auth.key.data,
220 xform->auth.key.length,
222 key_larger_block_size = 1;
225 case RTE_CRYPTO_AUTH_SHA512_HMAC:
226 sess->auth.algo = SHA_512;
227 hash_oneblock_fn = mb_mgr->sha512_one_block;
228 if (xform->auth.key.length > get_auth_algo_blocksize(SHA_512)) {
230 xform->auth.key.data,
231 xform->auth.key.length,
233 key_larger_block_size = 1;
237 AESNI_MB_LOG(ERR, "Unsupported authentication algorithm selection");
240 uint16_t trunc_digest_size =
241 get_truncated_digest_byte_length(sess->auth.algo);
242 uint16_t full_digest_size =
243 get_digest_byte_length(sess->auth.algo);
245 if (sess->auth.req_digest_len > full_digest_size ||
246 sess->auth.req_digest_len == 0) {
247 AESNI_MB_LOG(ERR, "Invalid digest size\n");
251 if (sess->auth.req_digest_len != trunc_digest_size &&
252 sess->auth.req_digest_len != full_digest_size)
253 sess->auth.gen_digest_len = full_digest_size;
255 sess->auth.gen_digest_len = sess->auth.req_digest_len;
257 /* Calculate Authentication precomputes */
258 if (key_larger_block_size) {
259 calculate_auth_precomputes(hash_oneblock_fn,
260 sess->auth.pads.inner, sess->auth.pads.outer,
262 xform->auth.key.length,
263 get_auth_algo_blocksize(sess->auth.algo));
265 calculate_auth_precomputes(hash_oneblock_fn,
266 sess->auth.pads.inner, sess->auth.pads.outer,
267 xform->auth.key.data,
268 xform->auth.key.length,
269 get_auth_algo_blocksize(sess->auth.algo));
275 /** Set session cipher parameters */
277 aesni_mb_set_session_cipher_parameters(const MB_MGR *mb_mgr,
278 struct aesni_mb_session *sess,
279 const struct rte_crypto_sym_xform *xform)
285 sess->cipher.mode = NULL_CIPHER;
289 if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
290 AESNI_MB_LOG(ERR, "Crypto xform struct not of type cipher");
294 /* Select cipher direction */
295 switch (xform->cipher.op) {
296 case RTE_CRYPTO_CIPHER_OP_ENCRYPT:
297 sess->cipher.direction = ENCRYPT;
299 case RTE_CRYPTO_CIPHER_OP_DECRYPT:
300 sess->cipher.direction = DECRYPT;
303 AESNI_MB_LOG(ERR, "Invalid cipher operation parameter");
307 /* Select cipher mode */
308 switch (xform->cipher.algo) {
309 case RTE_CRYPTO_CIPHER_AES_CBC:
310 sess->cipher.mode = CBC;
313 case RTE_CRYPTO_CIPHER_AES_CTR:
314 sess->cipher.mode = CNTR;
317 case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
318 sess->cipher.mode = DOCSIS_SEC_BPI;
321 case RTE_CRYPTO_CIPHER_DES_CBC:
322 sess->cipher.mode = DES;
324 case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
325 sess->cipher.mode = DOCSIS_DES;
327 case RTE_CRYPTO_CIPHER_3DES_CBC:
328 sess->cipher.mode = DES3;
332 AESNI_MB_LOG(ERR, "Unsupported cipher mode parameter");
336 /* Set IV parameters */
337 sess->iv.offset = xform->cipher.iv.offset;
338 sess->iv.length = xform->cipher.iv.length;
340 /* Check key length and choose key expansion function for AES */
342 switch (xform->cipher.key.length) {
344 sess->cipher.key_length_in_bytes = AES_128_BYTES;
345 IMB_AES_KEYEXP_128(mb_mgr, xform->cipher.key.data,
346 sess->cipher.expanded_aes_keys.encode,
347 sess->cipher.expanded_aes_keys.decode);
350 sess->cipher.key_length_in_bytes = AES_192_BYTES;
351 IMB_AES_KEYEXP_192(mb_mgr, xform->cipher.key.data,
352 sess->cipher.expanded_aes_keys.encode,
353 sess->cipher.expanded_aes_keys.decode);
356 sess->cipher.key_length_in_bytes = AES_256_BYTES;
357 IMB_AES_KEYEXP_256(mb_mgr, xform->cipher.key.data,
358 sess->cipher.expanded_aes_keys.encode,
359 sess->cipher.expanded_aes_keys.decode);
362 AESNI_MB_LOG(ERR, "Invalid cipher key length");
365 } else if (is_3DES) {
366 uint64_t *keys[3] = {sess->cipher.exp_3des_keys.key[0],
367 sess->cipher.exp_3des_keys.key[1],
368 sess->cipher.exp_3des_keys.key[2]};
370 switch (xform->cipher.key.length) {
372 IMB_DES_KEYSCHED(mb_mgr, keys[0],
373 xform->cipher.key.data);
374 IMB_DES_KEYSCHED(mb_mgr, keys[1],
375 xform->cipher.key.data + 8);
376 IMB_DES_KEYSCHED(mb_mgr, keys[2],
377 xform->cipher.key.data + 16);
379 /* Initialize keys - 24 bytes: [K1-K2-K3] */
380 sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
381 sess->cipher.exp_3des_keys.ks_ptr[1] = keys[1];
382 sess->cipher.exp_3des_keys.ks_ptr[2] = keys[2];
385 IMB_DES_KEYSCHED(mb_mgr, keys[0],
386 xform->cipher.key.data);
387 IMB_DES_KEYSCHED(mb_mgr, keys[1],
388 xform->cipher.key.data + 8);
389 /* Initialize keys - 16 bytes: [K1=K1,K2=K2,K3=K1] */
390 sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
391 sess->cipher.exp_3des_keys.ks_ptr[1] = keys[1];
392 sess->cipher.exp_3des_keys.ks_ptr[2] = keys[0];
395 IMB_DES_KEYSCHED(mb_mgr, keys[0],
396 xform->cipher.key.data);
398 /* Initialize keys - 8 bytes: [K1 = K2 = K3] */
399 sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
400 sess->cipher.exp_3des_keys.ks_ptr[1] = keys[0];
401 sess->cipher.exp_3des_keys.ks_ptr[2] = keys[0];
404 AESNI_MB_LOG(ERR, "Invalid cipher key length");
408 sess->cipher.key_length_in_bytes = 24;
410 if (xform->cipher.key.length != 8) {
411 AESNI_MB_LOG(ERR, "Invalid cipher key length");
414 sess->cipher.key_length_in_bytes = 8;
416 IMB_DES_KEYSCHED(mb_mgr,
417 (uint64_t *)sess->cipher.expanded_aes_keys.encode,
418 xform->cipher.key.data);
419 IMB_DES_KEYSCHED(mb_mgr,
420 (uint64_t *)sess->cipher.expanded_aes_keys.decode,
421 xform->cipher.key.data);
428 aesni_mb_set_session_aead_parameters(const MB_MGR *mb_mgr,
429 struct aesni_mb_session *sess,
430 const struct rte_crypto_sym_xform *xform)
432 switch (xform->aead.op) {
433 case RTE_CRYPTO_AEAD_OP_ENCRYPT:
434 sess->cipher.direction = ENCRYPT;
435 sess->auth.operation = RTE_CRYPTO_AUTH_OP_GENERATE;
437 case RTE_CRYPTO_AEAD_OP_DECRYPT:
438 sess->cipher.direction = DECRYPT;
439 sess->auth.operation = RTE_CRYPTO_AUTH_OP_VERIFY;
442 AESNI_MB_LOG(ERR, "Invalid aead operation parameter");
446 switch (xform->aead.algo) {
447 case RTE_CRYPTO_AEAD_AES_CCM:
448 sess->cipher.mode = CCM;
449 sess->auth.algo = AES_CCM;
451 /* Check key length and choose key expansion function for AES */
452 switch (xform->aead.key.length) {
454 sess->cipher.key_length_in_bytes = AES_128_BYTES;
455 IMB_AES_KEYEXP_128(mb_mgr, xform->aead.key.data,
456 sess->cipher.expanded_aes_keys.encode,
457 sess->cipher.expanded_aes_keys.decode);
460 AESNI_MB_LOG(ERR, "Invalid cipher key length");
466 case RTE_CRYPTO_AEAD_AES_GCM:
467 sess->cipher.mode = GCM;
468 sess->auth.algo = AES_GMAC;
470 switch (xform->aead.key.length) {
472 sess->cipher.key_length_in_bytes = AES_128_BYTES;
473 IMB_AES128_GCM_PRE(mb_mgr, xform->aead.key.data,
474 &sess->cipher.gcm_key);
477 sess->cipher.key_length_in_bytes = AES_192_BYTES;
478 IMB_AES192_GCM_PRE(mb_mgr, xform->aead.key.data,
479 &sess->cipher.gcm_key);
482 sess->cipher.key_length_in_bytes = AES_256_BYTES;
483 IMB_AES256_GCM_PRE(mb_mgr, xform->aead.key.data,
484 &sess->cipher.gcm_key);
487 AESNI_MB_LOG(ERR, "Invalid cipher key length");
494 AESNI_MB_LOG(ERR, "Unsupported aead mode parameter");
498 /* Set IV parameters */
499 sess->iv.offset = xform->aead.iv.offset;
500 sess->iv.length = xform->aead.iv.length;
502 sess->auth.req_digest_len = xform->aead.digest_length;
503 /* CCM digests must be between 4 and 16 and an even number */
504 if (sess->auth.req_digest_len < AES_CCM_DIGEST_MIN_LEN ||
505 sess->auth.req_digest_len > AES_CCM_DIGEST_MAX_LEN ||
506 (sess->auth.req_digest_len & 1) == 1) {
507 AESNI_MB_LOG(ERR, "Invalid digest size\n");
510 sess->auth.gen_digest_len = sess->auth.req_digest_len;
515 /** Parse crypto xform chain and set private session parameters */
517 aesni_mb_set_session_parameters(const MB_MGR *mb_mgr,
518 struct aesni_mb_session *sess,
519 const struct rte_crypto_sym_xform *xform)
521 const struct rte_crypto_sym_xform *auth_xform = NULL;
522 const struct rte_crypto_sym_xform *cipher_xform = NULL;
523 const struct rte_crypto_sym_xform *aead_xform = NULL;
526 /* Select Crypto operation - hash then cipher / cipher then hash */
527 switch (aesni_mb_get_chain_order(xform)) {
528 case AESNI_MB_OP_HASH_CIPHER:
529 sess->chain_order = HASH_CIPHER;
531 cipher_xform = xform->next;
533 case AESNI_MB_OP_CIPHER_HASH:
534 sess->chain_order = CIPHER_HASH;
535 auth_xform = xform->next;
536 cipher_xform = xform;
538 case AESNI_MB_OP_HASH_ONLY:
539 sess->chain_order = HASH_CIPHER;
543 case AESNI_MB_OP_CIPHER_ONLY:
545 * Multi buffer library operates only at two modes,
546 * CIPHER_HASH and HASH_CIPHER. When doing ciphering only,
547 * chain order depends on cipher operation: encryption is always
548 * the first operation and decryption the last one.
550 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
551 sess->chain_order = CIPHER_HASH;
553 sess->chain_order = HASH_CIPHER;
555 cipher_xform = xform;
557 case AESNI_MB_OP_AEAD_CIPHER_HASH:
558 sess->chain_order = CIPHER_HASH;
559 sess->aead.aad_len = xform->aead.aad_length;
562 case AESNI_MB_OP_AEAD_HASH_CIPHER:
563 sess->chain_order = HASH_CIPHER;
564 sess->aead.aad_len = xform->aead.aad_length;
567 case AESNI_MB_OP_NOT_SUPPORTED:
569 AESNI_MB_LOG(ERR, "Unsupported operation chain order parameter");
573 /* Default IV length = 0 */
576 ret = aesni_mb_set_session_auth_parameters(mb_mgr, sess, auth_xform);
578 AESNI_MB_LOG(ERR, "Invalid/unsupported authentication parameters");
582 ret = aesni_mb_set_session_cipher_parameters(mb_mgr, sess,
585 AESNI_MB_LOG(ERR, "Invalid/unsupported cipher parameters");
590 ret = aesni_mb_set_session_aead_parameters(mb_mgr, sess,
593 AESNI_MB_LOG(ERR, "Invalid/unsupported aead parameters");
602 * burst enqueue, place crypto operations on ingress queue for processing.
604 * @param __qp Queue Pair to process
605 * @param ops Crypto operations for processing
606 * @param nb_ops Number of crypto operations for processing
609 * - Number of crypto operations enqueued
612 aesni_mb_pmd_enqueue_burst(void *__qp, struct rte_crypto_op **ops,
615 struct aesni_mb_qp *qp = __qp;
617 unsigned int nb_enqueued;
619 nb_enqueued = rte_ring_enqueue_burst(qp->ingress_queue,
620 (void **)ops, nb_ops, NULL);
622 qp->stats.enqueued_count += nb_enqueued;
627 /** Get multi buffer session */
628 static inline struct aesni_mb_session *
629 get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op)
631 struct aesni_mb_session *sess = NULL;
633 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
634 if (likely(op->sym->session != NULL))
635 sess = (struct aesni_mb_session *)
636 get_sym_session_private_data(
638 cryptodev_driver_id);
641 void *_sess_private_data = NULL;
643 if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
646 if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data))
649 sess = (struct aesni_mb_session *)_sess_private_data;
651 if (unlikely(aesni_mb_set_session_parameters(qp->mb_mgr,
652 sess, op->sym->xform) != 0)) {
653 rte_mempool_put(qp->sess_mp, _sess);
654 rte_mempool_put(qp->sess_mp, _sess_private_data);
657 op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
658 set_sym_session_private_data(op->sym->session,
659 cryptodev_driver_id, _sess_private_data);
662 if (unlikely(sess == NULL))
663 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
669 * Process a crypto operation and complete a JOB_AES_HMAC job structure for
670 * submission to the multi buffer library for processing.
672 * @param qp queue pair
673 * @param job JOB_AES_HMAC structure to fill
674 * @param m mbuf to process
677 * - Completed JOB_AES_HMAC structure pointer on success
678 * - NULL pointer if completion of JOB_AES_HMAC structure isn't possible
681 set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
682 struct rte_crypto_op *op, uint8_t *digest_idx)
684 struct rte_mbuf *m_src = op->sym->m_src, *m_dst;
685 struct aesni_mb_session *session;
686 uint16_t m_offset = 0;
688 session = get_session(qp, op);
689 if (session == NULL) {
690 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
694 /* Set crypto operation */
695 job->chain_order = session->chain_order;
697 /* Set cipher parameters */
698 job->cipher_direction = session->cipher.direction;
699 job->cipher_mode = session->cipher.mode;
701 job->aes_key_len_in_bytes = session->cipher.key_length_in_bytes;
703 /* Set authentication parameters */
704 job->hash_alg = session->auth.algo;
706 switch (job->hash_alg) {
708 job->u.XCBC._k1_expanded = session->auth.xcbc.k1_expanded;
709 job->u.XCBC._k2 = session->auth.xcbc.k2;
710 job->u.XCBC._k3 = session->auth.xcbc.k3;
712 job->aes_enc_key_expanded =
713 session->cipher.expanded_aes_keys.encode;
714 job->aes_dec_key_expanded =
715 session->cipher.expanded_aes_keys.decode;
719 job->u.CCM.aad = op->sym->aead.aad.data + 18;
720 job->u.CCM.aad_len_in_bytes = session->aead.aad_len;
721 job->aes_enc_key_expanded =
722 session->cipher.expanded_aes_keys.encode;
723 job->aes_dec_key_expanded =
724 session->cipher.expanded_aes_keys.decode;
728 job->u.CMAC._key_expanded = session->auth.cmac.expkey;
729 job->u.CMAC._skey1 = session->auth.cmac.skey1;
730 job->u.CMAC._skey2 = session->auth.cmac.skey2;
731 job->aes_enc_key_expanded =
732 session->cipher.expanded_aes_keys.encode;
733 job->aes_dec_key_expanded =
734 session->cipher.expanded_aes_keys.decode;
738 job->u.GCM.aad = op->sym->aead.aad.data;
739 job->u.GCM.aad_len_in_bytes = session->aead.aad_len;
740 job->aes_enc_key_expanded = &session->cipher.gcm_key;
741 job->aes_dec_key_expanded = &session->cipher.gcm_key;
745 job->u.HMAC._hashed_auth_key_xor_ipad = session->auth.pads.inner;
746 job->u.HMAC._hashed_auth_key_xor_opad = session->auth.pads.outer;
748 if (job->cipher_mode == DES3) {
749 job->aes_enc_key_expanded =
750 session->cipher.exp_3des_keys.ks_ptr;
751 job->aes_dec_key_expanded =
752 session->cipher.exp_3des_keys.ks_ptr;
754 job->aes_enc_key_expanded =
755 session->cipher.expanded_aes_keys.encode;
756 job->aes_dec_key_expanded =
757 session->cipher.expanded_aes_keys.decode;
761 /* Mutable crypto operation parameters */
762 if (op->sym->m_dst) {
763 m_src = m_dst = op->sym->m_dst;
765 /* append space for output data to mbuf */
766 char *odata = rte_pktmbuf_append(m_dst,
767 rte_pktmbuf_data_len(op->sym->m_src));
769 AESNI_MB_LOG(ERR, "failed to allocate space in destination "
770 "mbuf for source data");
771 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
775 memcpy(odata, rte_pktmbuf_mtod(op->sym->m_src, void*),
776 rte_pktmbuf_data_len(op->sym->m_src));
779 if (job->hash_alg == AES_CCM || job->hash_alg == AES_GMAC)
780 m_offset = op->sym->aead.data.offset;
782 m_offset = op->sym->cipher.data.offset;
785 /* Set digest output location */
786 if (job->hash_alg != NULL_HASH &&
787 session->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
788 job->auth_tag_output = qp->temp_digests[*digest_idx];
789 *digest_idx = (*digest_idx + 1) % MAX_JOBS;
791 if (job->hash_alg == AES_CCM || job->hash_alg == AES_GMAC)
792 job->auth_tag_output = op->sym->aead.digest.data;
794 job->auth_tag_output = op->sym->auth.digest.data;
796 if (session->auth.req_digest_len != session->auth.gen_digest_len) {
797 job->auth_tag_output = qp->temp_digests[*digest_idx];
798 *digest_idx = (*digest_idx + 1) % MAX_JOBS;
802 * Multi-buffer library current only support returning a truncated
803 * digest length as specified in the relevant IPsec RFCs
806 /* Set digest length */
807 job->auth_tag_output_len_in_bytes = session->auth.gen_digest_len;
809 /* Set IV parameters */
810 job->iv_len_in_bytes = session->iv.length;
813 job->src = rte_pktmbuf_mtod(m_src, uint8_t *);
814 job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, m_offset);
816 switch (job->hash_alg) {
818 job->cipher_start_src_offset_in_bytes =
819 op->sym->aead.data.offset;
820 job->msg_len_to_cipher_in_bytes = op->sym->aead.data.length;
821 job->hash_start_src_offset_in_bytes = op->sym->aead.data.offset;
822 job->msg_len_to_hash_in_bytes = op->sym->aead.data.length;
824 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
825 session->iv.offset + 1);
829 job->cipher_start_src_offset_in_bytes =
830 op->sym->aead.data.offset;
831 job->hash_start_src_offset_in_bytes = op->sym->aead.data.offset;
832 job->msg_len_to_cipher_in_bytes = op->sym->aead.data.length;
833 job->msg_len_to_hash_in_bytes = job->msg_len_to_cipher_in_bytes;
834 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
839 job->cipher_start_src_offset_in_bytes =
840 op->sym->cipher.data.offset;
841 job->msg_len_to_cipher_in_bytes = op->sym->cipher.data.length;
843 job->hash_start_src_offset_in_bytes = op->sym->auth.data.offset;
844 job->msg_len_to_hash_in_bytes = op->sym->auth.data.length;
846 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
850 /* Set user data to be crypto operation data struct */
857 verify_digest(JOB_AES_HMAC *job, struct rte_crypto_op *op,
858 struct aesni_mb_session *sess)
860 /* Verify digest if required */
861 if (job->hash_alg == AES_CCM || job->hash_alg == AES_GMAC) {
862 if (memcmp(job->auth_tag_output, op->sym->aead.digest.data,
863 sess->auth.req_digest_len) != 0)
864 op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
866 if (memcmp(job->auth_tag_output, op->sym->auth.digest.data,
867 sess->auth.req_digest_len) != 0)
868 op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
873 generate_digest(JOB_AES_HMAC *job, struct rte_crypto_op *op,
874 struct aesni_mb_session *sess)
876 /* No extra copy neeed */
877 if (likely(sess->auth.req_digest_len == sess->auth.gen_digest_len))
881 * This can only happen for HMAC, so only digest
882 * for authentication algos is required
884 memcpy(op->sym->auth.digest.data, job->auth_tag_output,
885 sess->auth.req_digest_len);
889 * Process a completed job and return rte_mbuf which job processed
891 * @param qp Queue Pair to process
892 * @param job JOB_AES_HMAC job to process
895 * - Returns processed crypto operation.
896 * - Returns NULL on invalid job
898 static inline struct rte_crypto_op *
899 post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
901 struct rte_crypto_op *op = (struct rte_crypto_op *)job->user_data;
902 struct aesni_mb_session *sess = get_sym_session_private_data(
904 cryptodev_driver_id);
906 if (likely(op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)) {
907 switch (job->status) {
909 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
911 if (job->hash_alg != NULL_HASH) {
912 if (sess->auth.operation ==
913 RTE_CRYPTO_AUTH_OP_VERIFY)
914 verify_digest(job, op, sess);
916 generate_digest(job, op, sess);
920 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
924 /* Free session if a session-less crypto op */
925 if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
926 memset(sess, 0, sizeof(struct aesni_mb_session));
927 memset(op->sym->session, 0,
928 rte_cryptodev_sym_get_header_session_size());
929 rte_mempool_put(qp->sess_mp, sess);
930 rte_mempool_put(qp->sess_mp, op->sym->session);
931 op->sym->session = NULL;
938 * Process a completed JOB_AES_HMAC job and keep processing jobs until
939 * get_completed_job return NULL
941 * @param qp Queue Pair to process
942 * @param job JOB_AES_HMAC job
945 * - Number of processed jobs
948 handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job,
949 struct rte_crypto_op **ops, uint16_t nb_ops)
951 struct rte_crypto_op *op = NULL;
952 unsigned processed_jobs = 0;
954 while (job != NULL) {
955 op = post_process_mb_job(qp, job);
958 ops[processed_jobs++] = op;
959 qp->stats.dequeued_count++;
961 qp->stats.dequeue_err_count++;
964 if (processed_jobs == nb_ops)
967 job = IMB_GET_COMPLETED_JOB(qp->mb_mgr);
970 return processed_jobs;
973 static inline uint16_t
974 flush_mb_mgr(struct aesni_mb_qp *qp, struct rte_crypto_op **ops,
977 int processed_ops = 0;
979 /* Flush the remaining jobs */
980 JOB_AES_HMAC *job = IMB_FLUSH_JOB(qp->mb_mgr);
983 processed_ops += handle_completed_jobs(qp, job,
984 &ops[processed_ops], nb_ops - processed_ops);
986 return processed_ops;
989 static inline JOB_AES_HMAC *
990 set_job_null_op(JOB_AES_HMAC *job, struct rte_crypto_op *op)
992 job->chain_order = HASH_CIPHER;
993 job->cipher_mode = NULL_CIPHER;
994 job->hash_alg = NULL_HASH;
995 job->cipher_direction = DECRYPT;
997 /* Set user data to be crypto operation data struct */
1004 aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
1007 struct aesni_mb_qp *qp = queue_pair;
1009 struct rte_crypto_op *op;
1012 int retval, processed_jobs = 0;
1014 if (unlikely(nb_ops == 0))
1017 uint8_t digest_idx = qp->digest_idx;
1019 /* Get next free mb job struct from mb manager */
1020 job = IMB_GET_NEXT_JOB(qp->mb_mgr);
1021 if (unlikely(job == NULL)) {
1022 /* if no free mb job structs we need to flush mb_mgr */
1023 processed_jobs += flush_mb_mgr(qp,
1024 &ops[processed_jobs],
1025 nb_ops - processed_jobs);
1027 if (nb_ops == processed_jobs)
1030 job = IMB_GET_NEXT_JOB(qp->mb_mgr);
1034 * Get next operation to process from ingress queue.
1035 * There is no need to return the job to the MB_MGR
1036 * if there are no more operations to process, since the MB_MGR
1037 * can use that pointer again in next get_next calls.
1039 retval = rte_ring_dequeue(qp->ingress_queue, (void **)&op);
1043 retval = set_mb_job_params(job, qp, op, &digest_idx);
1044 if (unlikely(retval != 0)) {
1045 qp->stats.dequeue_err_count++;
1046 set_job_null_op(job, op);
1049 /* Submit job to multi-buffer for processing */
1050 #ifdef RTE_LIBRTE_PMD_AESNI_MB_DEBUG
1051 job = IMB_SUBMIT_JOB(qp->mb_mgr);
1053 job = IMB_SUBMIT_JOB_NOCHECK(qp->mb_mgr);
1056 * If submit returns a processed job then handle it,
1057 * before submitting subsequent jobs
1060 processed_jobs += handle_completed_jobs(qp, job,
1061 &ops[processed_jobs],
1062 nb_ops - processed_jobs);
1064 } while (processed_jobs < nb_ops);
1066 qp->digest_idx = digest_idx;
1068 if (processed_jobs < 1)
1069 processed_jobs += flush_mb_mgr(qp,
1070 &ops[processed_jobs],
1071 nb_ops - processed_jobs);
1073 return processed_jobs;
1076 static int cryptodev_aesni_mb_remove(struct rte_vdev_device *vdev);
1079 cryptodev_aesni_mb_create(const char *name,
1080 struct rte_vdev_device *vdev,
1081 struct rte_cryptodev_pmd_init_params *init_params)
1083 struct rte_cryptodev *dev;
1084 struct aesni_mb_private *internals;
1085 enum aesni_mb_vector_mode vector_mode;
1088 /* Check CPU for support for AES instruction set */
1089 if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) {
1090 AESNI_MB_LOG(ERR, "AES instructions not supported by CPU");
1094 dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
1096 AESNI_MB_LOG(ERR, "failed to create cryptodev vdev");
1100 /* Check CPU for supported vector instruction set */
1101 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
1102 vector_mode = RTE_AESNI_MB_AVX512;
1103 else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
1104 vector_mode = RTE_AESNI_MB_AVX2;
1105 else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
1106 vector_mode = RTE_AESNI_MB_AVX;
1108 vector_mode = RTE_AESNI_MB_SSE;
1110 dev->driver_id = cryptodev_driver_id;
1111 dev->dev_ops = rte_aesni_mb_pmd_ops;
1113 /* register rx/tx burst functions for data path */
1114 dev->dequeue_burst = aesni_mb_pmd_dequeue_burst;
1115 dev->enqueue_burst = aesni_mb_pmd_enqueue_burst;
1117 dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
1118 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
1119 RTE_CRYPTODEV_FF_CPU_AESNI;
1121 mb_mgr = alloc_mb_mgr(0);
1125 switch (vector_mode) {
1126 case RTE_AESNI_MB_SSE:
1127 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
1128 init_mb_mgr_sse(mb_mgr);
1130 case RTE_AESNI_MB_AVX:
1131 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
1132 init_mb_mgr_avx(mb_mgr);
1134 case RTE_AESNI_MB_AVX2:
1135 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
1136 init_mb_mgr_avx2(mb_mgr);
1138 case RTE_AESNI_MB_AVX512:
1139 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX512;
1140 init_mb_mgr_avx512(mb_mgr);
1143 AESNI_MB_LOG(ERR, "Unsupported vector mode %u\n", vector_mode);
1147 /* Set vector instructions mode supported */
1148 internals = dev->data->dev_private;
1150 internals->vector_mode = vector_mode;
1151 internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
1152 internals->mb_mgr = mb_mgr;
1154 AESNI_MB_LOG(INFO, "IPSec Multi-buffer library version used: %s\n",
1155 imb_get_version_str());
1161 free_mb_mgr(mb_mgr);
1163 rte_cryptodev_pmd_destroy(dev);
1169 cryptodev_aesni_mb_probe(struct rte_vdev_device *vdev)
1171 struct rte_cryptodev_pmd_init_params init_params = {
1173 sizeof(struct aesni_mb_private),
1175 RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
1177 const char *name, *args;
1180 name = rte_vdev_device_name(vdev);
1184 args = rte_vdev_device_args(vdev);
1186 retval = rte_cryptodev_pmd_parse_input_args(&init_params, args);
1188 AESNI_MB_LOG(ERR, "Failed to parse initialisation arguments[%s]",
1193 return cryptodev_aesni_mb_create(name, vdev, &init_params);
1197 cryptodev_aesni_mb_remove(struct rte_vdev_device *vdev)
1199 struct rte_cryptodev *cryptodev;
1200 struct aesni_mb_private *internals;
1203 name = rte_vdev_device_name(vdev);
1207 cryptodev = rte_cryptodev_pmd_get_named_dev(name);
1208 if (cryptodev == NULL)
1211 internals = cryptodev->data->dev_private;
1213 free_mb_mgr(internals->mb_mgr);
1215 return rte_cryptodev_pmd_destroy(cryptodev);
1218 static struct rte_vdev_driver cryptodev_aesni_mb_pmd_drv = {
1219 .probe = cryptodev_aesni_mb_probe,
1220 .remove = cryptodev_aesni_mb_remove
1223 static struct cryptodev_driver aesni_mb_crypto_drv;
1225 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd_drv);
1226 RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd);
1227 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_MB_PMD,
1228 "max_nb_queue_pairs=<int> "
1230 RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_mb_crypto_drv,
1231 cryptodev_aesni_mb_pmd_drv.driver,
1232 cryptodev_driver_id);
1234 RTE_INIT(aesni_mb_init_log)
1236 aesni_mb_logtype_driver = rte_log_register("pmd.crypto.aesni_mb");