1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2017 Intel Corporation
7 #include <rte_string_fns.h>
8 #include <rte_common.h>
9 #include <rte_malloc.h>
10 #include <rte_cryptodev_pmd.h>
12 #include "aesni_mb_pmd_private.h"
15 static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
17 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
19 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
21 .algo = RTE_CRYPTO_AUTH_MD5_HMAC,
38 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
40 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
42 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
59 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
61 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
63 .algo = RTE_CRYPTO_AUTH_SHA1,
80 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
82 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
84 .algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
101 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
103 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
105 .algo = RTE_CRYPTO_AUTH_SHA224,
122 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
124 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
126 .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
143 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
145 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
147 .algo = RTE_CRYPTO_AUTH_SHA256,
164 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
166 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
168 .algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
185 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
187 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
189 .algo = RTE_CRYPTO_AUTH_SHA384,
206 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
208 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
210 .algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
227 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
229 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
231 .algo = RTE_CRYPTO_AUTH_SHA512,
247 { /* AES XCBC HMAC */
248 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
250 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
252 .algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
269 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
271 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
273 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
289 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
291 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
293 .algo = RTE_CRYPTO_CIPHER_AES_CTR,
308 { /* AES DOCSIS BPI */
309 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
311 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
313 .algo = RTE_CRYPTO_CIPHER_AES_DOCSISBPI,
317 #if IMB_VERSION_NUM >= IMB_VERSION(0, 53, 3)
334 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
336 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
338 .algo = RTE_CRYPTO_CIPHER_DES_CBC,
354 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
356 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
358 .algo = RTE_CRYPTO_CIPHER_3DES_CBC,
373 { /* DES DOCSIS BPI */
374 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
376 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
378 .algo = RTE_CRYPTO_CIPHER_DES_DOCSISBPI,
394 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
396 .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
398 .algo = RTE_CRYPTO_AEAD_AES_CCM,
424 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
426 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
428 .algo = RTE_CRYPTO_AUTH_AES_CMAC,
445 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
447 .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
449 .algo = RTE_CRYPTO_AEAD_AES_GCM,
474 { /* AES GMAC (AUTH) */
475 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
477 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
479 .algo = RTE_CRYPTO_AUTH_AES_GMAC,
499 RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
503 /** Configure device */
505 aesni_mb_pmd_config(__rte_unused struct rte_cryptodev *dev,
506 __rte_unused struct rte_cryptodev_config *config)
513 aesni_mb_pmd_start(__rte_unused struct rte_cryptodev *dev)
520 aesni_mb_pmd_stop(__rte_unused struct rte_cryptodev *dev)
526 aesni_mb_pmd_close(__rte_unused struct rte_cryptodev *dev)
532 /** Get device statistics */
534 aesni_mb_pmd_stats_get(struct rte_cryptodev *dev,
535 struct rte_cryptodev_stats *stats)
539 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
540 struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
542 stats->enqueued_count += qp->stats.enqueued_count;
543 stats->dequeued_count += qp->stats.dequeued_count;
545 stats->enqueue_err_count += qp->stats.enqueue_err_count;
546 stats->dequeue_err_count += qp->stats.dequeue_err_count;
550 /** Reset device statistics */
552 aesni_mb_pmd_stats_reset(struct rte_cryptodev *dev)
556 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
557 struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
559 memset(&qp->stats, 0, sizeof(qp->stats));
564 /** Get device info */
566 aesni_mb_pmd_info_get(struct rte_cryptodev *dev,
567 struct rte_cryptodev_info *dev_info)
569 struct aesni_mb_private *internals = dev->data->dev_private;
571 if (dev_info != NULL) {
572 dev_info->driver_id = dev->driver_id;
573 dev_info->feature_flags = dev->feature_flags;
574 dev_info->capabilities = aesni_mb_pmd_capabilities;
575 dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
576 /* No limit of number of sessions */
577 dev_info->sym.max_nb_sessions = 0;
581 /** Release queue pair */
583 aesni_mb_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
585 struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
586 struct rte_ring *r = NULL;
589 r = rte_ring_lookup(qp->name);
593 free_mb_mgr(qp->mb_mgr);
595 dev->data->queue_pairs[qp_id] = NULL;
600 /** set a unique name for the queue pair based on it's name, dev_id and qp_id */
602 aesni_mb_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
603 struct aesni_mb_qp *qp)
605 unsigned n = snprintf(qp->name, sizeof(qp->name),
606 "aesni_mb_pmd_%u_qp_%u",
607 dev->data->dev_id, qp->id);
609 if (n >= sizeof(qp->name))
615 /** Create a ring to place processed operations on */
616 static struct rte_ring *
617 aesni_mb_pmd_qp_create_processed_ops_ring(struct aesni_mb_qp *qp,
618 unsigned int ring_size, int socket_id)
621 char ring_name[RTE_CRYPTODEV_NAME_MAX_LEN];
623 unsigned int n = strlcpy(ring_name, qp->name, sizeof(ring_name));
625 if (n >= sizeof(ring_name))
628 r = rte_ring_lookup(ring_name);
630 if (rte_ring_get_size(r) >= ring_size) {
631 AESNI_MB_LOG(INFO, "Reusing existing ring %s for processed ops",
636 AESNI_MB_LOG(ERR, "Unable to reuse existing ring %s for processed ops",
641 return rte_ring_create(ring_name, ring_size, socket_id,
642 RING_F_SP_ENQ | RING_F_SC_DEQ);
645 /** Setup a queue pair */
647 aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
648 const struct rte_cryptodev_qp_conf *qp_conf,
651 struct aesni_mb_qp *qp = NULL;
652 struct aesni_mb_private *internals = dev->data->dev_private;
655 /* Free memory prior to re-allocation if needed. */
656 if (dev->data->queue_pairs[qp_id] != NULL)
657 aesni_mb_pmd_qp_release(dev, qp_id);
659 /* Allocate the queue pair data structure. */
660 qp = rte_zmalloc_socket("AES-NI PMD Queue Pair", sizeof(*qp),
661 RTE_CACHE_LINE_SIZE, socket_id);
666 dev->data->queue_pairs[qp_id] = qp;
668 if (aesni_mb_pmd_qp_set_unique_name(dev, qp))
669 goto qp_setup_cleanup;
672 qp->mb_mgr = alloc_mb_mgr(0);
673 if (qp->mb_mgr == NULL) {
675 goto qp_setup_cleanup;
678 switch (internals->vector_mode) {
679 case RTE_AESNI_MB_SSE:
680 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
681 init_mb_mgr_sse(qp->mb_mgr);
683 case RTE_AESNI_MB_AVX:
684 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
685 init_mb_mgr_avx(qp->mb_mgr);
687 case RTE_AESNI_MB_AVX2:
688 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
689 init_mb_mgr_avx2(qp->mb_mgr);
691 case RTE_AESNI_MB_AVX512:
692 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX512;
693 init_mb_mgr_avx512(qp->mb_mgr);
696 AESNI_MB_LOG(ERR, "Unsupported vector mode %u\n",
697 internals->vector_mode);
698 goto qp_setup_cleanup;
701 qp->ingress_queue = aesni_mb_pmd_qp_create_processed_ops_ring(qp,
702 qp_conf->nb_descriptors, socket_id);
703 if (qp->ingress_queue == NULL) {
705 goto qp_setup_cleanup;
708 qp->sess_mp = qp_conf->mp_session;
709 qp->sess_mp_priv = qp_conf->mp_session_private;
711 memset(&qp->stats, 0, sizeof(qp->stats));
713 char mp_name[RTE_MEMPOOL_NAMESIZE];
715 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
716 "digest_mp_%u_%u", dev->data->dev_id, qp_id);
722 free_mb_mgr(qp->mb_mgr);
729 /** Returns the size of the aesni multi-buffer session structure */
731 aesni_mb_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
733 return sizeof(struct aesni_mb_session);
736 /** Configure a aesni multi-buffer session from a crypto xform chain */
738 aesni_mb_pmd_sym_session_configure(struct rte_cryptodev *dev,
739 struct rte_crypto_sym_xform *xform,
740 struct rte_cryptodev_sym_session *sess,
741 struct rte_mempool *mempool)
743 void *sess_private_data;
744 struct aesni_mb_private *internals = dev->data->dev_private;
747 if (unlikely(sess == NULL)) {
748 AESNI_MB_LOG(ERR, "invalid session struct");
752 if (rte_mempool_get(mempool, &sess_private_data)) {
754 "Couldn't get object from session mempool");
758 ret = aesni_mb_set_session_parameters(internals->mb_mgr,
759 sess_private_data, xform);
761 AESNI_MB_LOG(ERR, "failed configure session parameters");
763 /* Return session to mempool */
764 rte_mempool_put(mempool, sess_private_data);
768 set_sym_session_private_data(sess, dev->driver_id,
774 /** Clear the memory of session so it doesn't leave key material behind */
776 aesni_mb_pmd_sym_session_clear(struct rte_cryptodev *dev,
777 struct rte_cryptodev_sym_session *sess)
779 uint8_t index = dev->driver_id;
780 void *sess_priv = get_sym_session_private_data(sess, index);
782 /* Zero out the whole structure */
784 memset(sess_priv, 0, sizeof(struct aesni_mb_session));
785 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
786 set_sym_session_private_data(sess, index, NULL);
787 rte_mempool_put(sess_mp, sess_priv);
791 struct rte_cryptodev_ops aesni_mb_pmd_ops = {
792 .dev_configure = aesni_mb_pmd_config,
793 .dev_start = aesni_mb_pmd_start,
794 .dev_stop = aesni_mb_pmd_stop,
795 .dev_close = aesni_mb_pmd_close,
797 .stats_get = aesni_mb_pmd_stats_get,
798 .stats_reset = aesni_mb_pmd_stats_reset,
800 .dev_infos_get = aesni_mb_pmd_info_get,
802 .queue_pair_setup = aesni_mb_pmd_qp_setup,
803 .queue_pair_release = aesni_mb_pmd_qp_release,
805 .sym_cpu_process = aesni_mb_cpu_crypto_process_bulk,
807 .sym_session_get_size = aesni_mb_pmd_sym_session_get_size,
808 .sym_session_configure = aesni_mb_pmd_sym_session_configure,
809 .sym_session_clear = aesni_mb_pmd_sym_session_clear
812 struct rte_cryptodev_ops *rte_aesni_mb_pmd_ops = &aesni_mb_pmd_ops;