1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2017 Intel Corporation
7 #include <rte_string_fns.h>
8 #include <rte_common.h>
9 #include <rte_malloc.h>
10 #include <rte_ether.h>
11 #include <rte_cryptodev_pmd.h>
13 #include "aesni_mb_pmd_private.h"
16 static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
18 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
20 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
22 .algo = RTE_CRYPTO_AUTH_MD5_HMAC,
39 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
41 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
43 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
60 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
62 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
64 .algo = RTE_CRYPTO_AUTH_SHA1,
81 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
83 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
85 .algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
102 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
104 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
106 .algo = RTE_CRYPTO_AUTH_SHA224,
123 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
125 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
127 .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
144 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
146 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
148 .algo = RTE_CRYPTO_AUTH_SHA256,
165 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
167 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
169 .algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
186 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
188 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
190 .algo = RTE_CRYPTO_AUTH_SHA384,
207 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
209 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
211 .algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
228 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
230 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
232 .algo = RTE_CRYPTO_AUTH_SHA512,
248 { /* AES XCBC HMAC */
249 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
251 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
253 .algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
270 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
272 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
274 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
290 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
292 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
294 .algo = RTE_CRYPTO_CIPHER_AES_CTR,
309 { /* AES DOCSIS BPI */
310 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
312 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
314 .algo = RTE_CRYPTO_CIPHER_AES_DOCSISBPI,
318 #if IMB_VERSION_NUM >= IMB_VERSION(0, 53, 3)
335 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
337 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
339 .algo = RTE_CRYPTO_CIPHER_DES_CBC,
355 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
357 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
359 .algo = RTE_CRYPTO_CIPHER_3DES_CBC,
374 { /* DES DOCSIS BPI */
375 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
377 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
379 .algo = RTE_CRYPTO_CIPHER_DES_DOCSISBPI,
395 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
397 .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
399 .algo = RTE_CRYPTO_AEAD_AES_CCM,
425 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
427 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
429 .algo = RTE_CRYPTO_AUTH_AES_CMAC,
446 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
448 .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
450 .algo = RTE_CRYPTO_AEAD_AES_GCM,
475 { /* AES GMAC (AUTH) */
476 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
478 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
480 .algo = RTE_CRYPTO_AUTH_AES_GMAC,
500 #if IMB_VERSION(0, 53, 0) <= IMB_VERSION_NUM
502 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
504 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
506 .algo = RTE_CRYPTO_CIPHER_AES_ECB,
518 #if IMB_VERSION(0, 53, 3) <= IMB_VERSION_NUM
520 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
522 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
524 .algo = RTE_CRYPTO_AUTH_ZUC_EIA3,
545 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
547 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
549 .algo = RTE_CRYPTO_CIPHER_ZUC_EEA3,
564 { /* SNOW 3G (UIA2) */
565 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
567 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
569 .algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2,
589 { /* SNOW 3G (UEA2) */
590 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
592 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
594 .algo = RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
610 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
612 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
614 .algo = RTE_CRYPTO_AUTH_KASUMI_F9,
631 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
633 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
635 .algo = RTE_CRYPTO_CIPHER_KASUMI_F8,
651 RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
654 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
655 static const struct rte_cryptodev_capabilities
656 aesni_mb_pmd_security_crypto_cap[] = {
657 { /* AES DOCSIS BPI */
658 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
660 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
662 .algo = RTE_CRYPTO_CIPHER_AES_DOCSISBPI,
678 RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
681 static const struct rte_security_capability aesni_mb_pmd_security_cap[] = {
682 { /* DOCSIS Uplink */
683 .action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
684 .protocol = RTE_SECURITY_PROTOCOL_DOCSIS,
686 .direction = RTE_SECURITY_DOCSIS_UPLINK
688 .crypto_capabilities = aesni_mb_pmd_security_crypto_cap
690 { /* DOCSIS Downlink */
691 .action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
692 .protocol = RTE_SECURITY_PROTOCOL_DOCSIS,
694 .direction = RTE_SECURITY_DOCSIS_DOWNLINK
696 .crypto_capabilities = aesni_mb_pmd_security_crypto_cap
699 .action = RTE_SECURITY_ACTION_TYPE_NONE
704 /** Configure device */
706 aesni_mb_pmd_config(__rte_unused struct rte_cryptodev *dev,
707 __rte_unused struct rte_cryptodev_config *config)
714 aesni_mb_pmd_start(__rte_unused struct rte_cryptodev *dev)
721 aesni_mb_pmd_stop(__rte_unused struct rte_cryptodev *dev)
727 aesni_mb_pmd_close(__rte_unused struct rte_cryptodev *dev)
733 /** Get device statistics */
735 aesni_mb_pmd_stats_get(struct rte_cryptodev *dev,
736 struct rte_cryptodev_stats *stats)
740 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
741 struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
743 stats->enqueued_count += qp->stats.enqueued_count;
744 stats->dequeued_count += qp->stats.dequeued_count;
746 stats->enqueue_err_count += qp->stats.enqueue_err_count;
747 stats->dequeue_err_count += qp->stats.dequeue_err_count;
751 /** Reset device statistics */
753 aesni_mb_pmd_stats_reset(struct rte_cryptodev *dev)
757 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
758 struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
760 memset(&qp->stats, 0, sizeof(qp->stats));
765 /** Get device info */
767 aesni_mb_pmd_info_get(struct rte_cryptodev *dev,
768 struct rte_cryptodev_info *dev_info)
770 struct aesni_mb_private *internals = dev->data->dev_private;
772 if (dev_info != NULL) {
773 dev_info->driver_id = dev->driver_id;
774 dev_info->feature_flags = dev->feature_flags;
775 dev_info->capabilities = aesni_mb_pmd_capabilities;
776 dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
777 /* No limit of number of sessions */
778 dev_info->sym.max_nb_sessions = 0;
782 /** Release queue pair */
784 aesni_mb_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
786 struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
787 struct rte_ring *r = NULL;
790 r = rte_ring_lookup(qp->name);
794 free_mb_mgr(qp->mb_mgr);
796 dev->data->queue_pairs[qp_id] = NULL;
801 /** set a unique name for the queue pair based on it's name, dev_id and qp_id */
803 aesni_mb_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
804 struct aesni_mb_qp *qp)
806 unsigned n = snprintf(qp->name, sizeof(qp->name),
807 "aesni_mb_pmd_%u_qp_%u",
808 dev->data->dev_id, qp->id);
810 if (n >= sizeof(qp->name))
816 /** Create a ring to place processed operations on */
817 static struct rte_ring *
818 aesni_mb_pmd_qp_create_processed_ops_ring(struct aesni_mb_qp *qp,
819 unsigned int ring_size, int socket_id)
822 char ring_name[RTE_CRYPTODEV_NAME_MAX_LEN];
824 unsigned int n = strlcpy(ring_name, qp->name, sizeof(ring_name));
826 if (n >= sizeof(ring_name))
829 r = rte_ring_lookup(ring_name);
831 if (rte_ring_get_size(r) >= ring_size) {
832 AESNI_MB_LOG(INFO, "Reusing existing ring %s for processed ops",
837 AESNI_MB_LOG(ERR, "Unable to reuse existing ring %s for processed ops",
842 return rte_ring_create(ring_name, ring_size, socket_id,
843 RING_F_SP_ENQ | RING_F_SC_DEQ);
846 /** Setup a queue pair */
848 aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
849 const struct rte_cryptodev_qp_conf *qp_conf,
852 struct aesni_mb_qp *qp = NULL;
853 struct aesni_mb_private *internals = dev->data->dev_private;
856 /* Free memory prior to re-allocation if needed. */
857 if (dev->data->queue_pairs[qp_id] != NULL)
858 aesni_mb_pmd_qp_release(dev, qp_id);
860 /* Allocate the queue pair data structure. */
861 qp = rte_zmalloc_socket("AES-NI PMD Queue Pair", sizeof(*qp),
862 RTE_CACHE_LINE_SIZE, socket_id);
867 dev->data->queue_pairs[qp_id] = qp;
869 if (aesni_mb_pmd_qp_set_unique_name(dev, qp))
870 goto qp_setup_cleanup;
873 qp->mb_mgr = alloc_mb_mgr(0);
874 if (qp->mb_mgr == NULL) {
876 goto qp_setup_cleanup;
879 switch (internals->vector_mode) {
880 case RTE_AESNI_MB_SSE:
881 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
882 init_mb_mgr_sse(qp->mb_mgr);
884 case RTE_AESNI_MB_AVX:
885 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
886 init_mb_mgr_avx(qp->mb_mgr);
888 case RTE_AESNI_MB_AVX2:
889 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
890 init_mb_mgr_avx2(qp->mb_mgr);
892 case RTE_AESNI_MB_AVX512:
893 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX512;
894 init_mb_mgr_avx512(qp->mb_mgr);
897 AESNI_MB_LOG(ERR, "Unsupported vector mode %u\n",
898 internals->vector_mode);
899 goto qp_setup_cleanup;
902 qp->ingress_queue = aesni_mb_pmd_qp_create_processed_ops_ring(qp,
903 qp_conf->nb_descriptors, socket_id);
904 if (qp->ingress_queue == NULL) {
906 goto qp_setup_cleanup;
909 qp->sess_mp = qp_conf->mp_session;
910 qp->sess_mp_priv = qp_conf->mp_session_private;
912 memset(&qp->stats, 0, sizeof(qp->stats));
914 char mp_name[RTE_MEMPOOL_NAMESIZE];
916 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
917 "digest_mp_%u_%u", dev->data->dev_id, qp_id);
923 free_mb_mgr(qp->mb_mgr);
930 /** Returns the size of the aesni multi-buffer session structure */
932 aesni_mb_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
934 return sizeof(struct aesni_mb_session);
937 /** Configure a aesni multi-buffer session from a crypto xform chain */
939 aesni_mb_pmd_sym_session_configure(struct rte_cryptodev *dev,
940 struct rte_crypto_sym_xform *xform,
941 struct rte_cryptodev_sym_session *sess,
942 struct rte_mempool *mempool)
944 void *sess_private_data;
945 struct aesni_mb_private *internals = dev->data->dev_private;
948 if (unlikely(sess == NULL)) {
949 AESNI_MB_LOG(ERR, "invalid session struct");
953 if (rte_mempool_get(mempool, &sess_private_data)) {
955 "Couldn't get object from session mempool");
959 ret = aesni_mb_set_session_parameters(internals->mb_mgr,
960 sess_private_data, xform);
962 AESNI_MB_LOG(ERR, "failed configure session parameters");
964 /* Return session to mempool */
965 rte_mempool_put(mempool, sess_private_data);
969 set_sym_session_private_data(sess, dev->driver_id,
975 /** Clear the memory of session so it doesn't leave key material behind */
977 aesni_mb_pmd_sym_session_clear(struct rte_cryptodev *dev,
978 struct rte_cryptodev_sym_session *sess)
980 uint8_t index = dev->driver_id;
981 void *sess_priv = get_sym_session_private_data(sess, index);
983 /* Zero out the whole structure */
985 memset(sess_priv, 0, sizeof(struct aesni_mb_session));
986 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
987 set_sym_session_private_data(sess, index, NULL);
988 rte_mempool_put(sess_mp, sess_priv);
992 struct rte_cryptodev_ops aesni_mb_pmd_ops = {
993 .dev_configure = aesni_mb_pmd_config,
994 .dev_start = aesni_mb_pmd_start,
995 .dev_stop = aesni_mb_pmd_stop,
996 .dev_close = aesni_mb_pmd_close,
998 .stats_get = aesni_mb_pmd_stats_get,
999 .stats_reset = aesni_mb_pmd_stats_reset,
1001 .dev_infos_get = aesni_mb_pmd_info_get,
1003 .queue_pair_setup = aesni_mb_pmd_qp_setup,
1004 .queue_pair_release = aesni_mb_pmd_qp_release,
1006 .sym_cpu_process = aesni_mb_cpu_crypto_process_bulk,
1008 .sym_session_get_size = aesni_mb_pmd_sym_session_get_size,
1009 .sym_session_configure = aesni_mb_pmd_sym_session_configure,
1010 .sym_session_clear = aesni_mb_pmd_sym_session_clear
1013 struct rte_cryptodev_ops *rte_aesni_mb_pmd_ops = &aesni_mb_pmd_ops;
1015 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
1017 * Configure a aesni multi-buffer session from a security session
1021 aesni_mb_pmd_sec_sess_create(void *dev, struct rte_security_session_conf *conf,
1022 struct rte_security_session *sess,
1023 struct rte_mempool *mempool)
1025 void *sess_private_data;
1026 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
1029 if (conf->action_type != RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL ||
1030 conf->protocol != RTE_SECURITY_PROTOCOL_DOCSIS) {
1031 AESNI_MB_LOG(ERR, "Invalid security protocol");
1035 if (rte_mempool_get(mempool, &sess_private_data)) {
1036 AESNI_MB_LOG(ERR, "Couldn't get object from session mempool");
1040 ret = aesni_mb_set_docsis_sec_session_parameters(cdev, conf,
1044 AESNI_MB_LOG(ERR, "Failed to configure session parameters");
1046 /* Return session to mempool */
1047 rte_mempool_put(mempool, sess_private_data);
1051 set_sec_session_private_data(sess, sess_private_data);
1056 /** Clear the memory of session so it doesn't leave key material behind */
1058 aesni_mb_pmd_sec_sess_destroy(void *dev __rte_unused,
1059 struct rte_security_session *sess)
1061 void *sess_priv = get_sec_session_private_data(sess);
1064 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
1065 memset(sess_priv, 0, sizeof(struct aesni_mb_session));
1066 set_sec_session_private_data(sess, NULL);
1067 rte_mempool_put(sess_mp, sess_priv);
1072 /** Get security capabilities for aesni multi-buffer */
1073 static const struct rte_security_capability *
1074 aesni_mb_pmd_sec_capa_get(void *device __rte_unused)
1076 return aesni_mb_pmd_security_cap;
1079 static struct rte_security_ops aesni_mb_pmd_sec_ops = {
1080 .session_create = aesni_mb_pmd_sec_sess_create,
1081 .session_update = NULL,
1082 .session_stats_get = NULL,
1083 .session_destroy = aesni_mb_pmd_sec_sess_destroy,
1084 .set_pkt_metadata = NULL,
1085 .capabilities_get = aesni_mb_pmd_sec_capa_get
1088 struct rte_security_ops *rte_aesni_mb_pmd_sec_ops = &aesni_mb_pmd_sec_ops;