1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2017 Intel Corporation
7 #include <rte_common.h>
8 #include <rte_malloc.h>
9 #include <rte_cryptodev_pmd.h>
11 #include "rte_aesni_mb_pmd_private.h"
14 static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
16 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
18 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
20 .algo = RTE_CRYPTO_AUTH_MD5_HMAC,
37 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
39 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
41 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
58 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
60 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
62 .algo = RTE_CRYPTO_AUTH_SHA1,
79 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
81 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
83 .algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
100 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
102 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
104 .algo = RTE_CRYPTO_AUTH_SHA224,
121 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
123 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
125 .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
142 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
144 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
146 .algo = RTE_CRYPTO_AUTH_SHA256,
163 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
165 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
167 .algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
184 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
186 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
188 .algo = RTE_CRYPTO_AUTH_SHA384,
205 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
207 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
209 .algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
226 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
228 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
230 .algo = RTE_CRYPTO_AUTH_SHA512,
246 { /* AES XCBC HMAC */
247 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
249 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
251 .algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
268 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
270 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
272 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
288 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
290 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
292 .algo = RTE_CRYPTO_CIPHER_AES_CTR,
307 { /* AES DOCSIS BPI */
308 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
310 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
312 .algo = RTE_CRYPTO_CIPHER_AES_DOCSISBPI,
328 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
330 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
332 .algo = RTE_CRYPTO_CIPHER_DES_CBC,
348 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
350 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
352 .algo = RTE_CRYPTO_CIPHER_3DES_CBC,
367 { /* DES DOCSIS BPI */
368 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
370 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
372 .algo = RTE_CRYPTO_CIPHER_DES_DOCSISBPI,
388 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
390 .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
392 .algo = RTE_CRYPTO_AEAD_AES_CCM,
418 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
420 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
422 .algo = RTE_CRYPTO_AUTH_AES_CMAC,
439 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
441 .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
443 .algo = RTE_CRYPTO_AEAD_AES_GCM,
468 { /* AES GMAC (AUTH) */
469 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
471 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
473 .algo = RTE_CRYPTO_AUTH_AES_GMAC,
493 RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
497 /** Configure device */
499 aesni_mb_pmd_config(__rte_unused struct rte_cryptodev *dev,
500 __rte_unused struct rte_cryptodev_config *config)
507 aesni_mb_pmd_start(__rte_unused struct rte_cryptodev *dev)
514 aesni_mb_pmd_stop(__rte_unused struct rte_cryptodev *dev)
520 aesni_mb_pmd_close(__rte_unused struct rte_cryptodev *dev)
526 /** Get device statistics */
528 aesni_mb_pmd_stats_get(struct rte_cryptodev *dev,
529 struct rte_cryptodev_stats *stats)
533 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
534 struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
536 stats->enqueued_count += qp->stats.enqueued_count;
537 stats->dequeued_count += qp->stats.dequeued_count;
539 stats->enqueue_err_count += qp->stats.enqueue_err_count;
540 stats->dequeue_err_count += qp->stats.dequeue_err_count;
544 /** Reset device statistics */
546 aesni_mb_pmd_stats_reset(struct rte_cryptodev *dev)
550 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
551 struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
553 memset(&qp->stats, 0, sizeof(qp->stats));
558 /** Get device info */
560 aesni_mb_pmd_info_get(struct rte_cryptodev *dev,
561 struct rte_cryptodev_info *dev_info)
563 struct aesni_mb_private *internals = dev->data->dev_private;
565 if (dev_info != NULL) {
566 dev_info->driver_id = dev->driver_id;
567 dev_info->feature_flags = dev->feature_flags;
568 dev_info->capabilities = aesni_mb_pmd_capabilities;
569 dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
570 /* No limit of number of sessions */
571 dev_info->sym.max_nb_sessions = 0;
575 /** Release queue pair */
577 aesni_mb_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
579 struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
580 struct rte_ring *r = NULL;
583 r = rte_ring_lookup(qp->name);
587 free_mb_mgr(qp->mb_mgr);
589 dev->data->queue_pairs[qp_id] = NULL;
594 /** set a unique name for the queue pair based on it's name, dev_id and qp_id */
596 aesni_mb_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
597 struct aesni_mb_qp *qp)
599 unsigned n = snprintf(qp->name, sizeof(qp->name),
600 "aesni_mb_pmd_%u_qp_%u",
601 dev->data->dev_id, qp->id);
603 if (n >= sizeof(qp->name))
609 /** Create a ring to place processed operations on */
610 static struct rte_ring *
611 aesni_mb_pmd_qp_create_processed_ops_ring(struct aesni_mb_qp *qp,
612 unsigned int ring_size, int socket_id)
615 char ring_name[RTE_CRYPTODEV_NAME_MAX_LEN];
617 unsigned int n = snprintf(ring_name, sizeof(ring_name), "%s", qp->name);
619 if (n >= sizeof(ring_name))
622 r = rte_ring_lookup(ring_name);
624 if (rte_ring_get_size(r) >= ring_size) {
625 AESNI_MB_LOG(INFO, "Reusing existing ring %s for processed ops",
630 AESNI_MB_LOG(ERR, "Unable to reuse existing ring %s for processed ops",
635 return rte_ring_create(ring_name, ring_size, socket_id,
636 RING_F_SP_ENQ | RING_F_SC_DEQ);
639 /** Setup a queue pair */
641 aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
642 const struct rte_cryptodev_qp_conf *qp_conf,
643 int socket_id, struct rte_mempool *session_pool)
645 struct aesni_mb_qp *qp = NULL;
646 struct aesni_mb_private *internals = dev->data->dev_private;
649 /* Free memory prior to re-allocation if needed. */
650 if (dev->data->queue_pairs[qp_id] != NULL)
651 aesni_mb_pmd_qp_release(dev, qp_id);
653 /* Allocate the queue pair data structure. */
654 qp = rte_zmalloc_socket("AES-NI PMD Queue Pair", sizeof(*qp),
655 RTE_CACHE_LINE_SIZE, socket_id);
660 dev->data->queue_pairs[qp_id] = qp;
662 if (aesni_mb_pmd_qp_set_unique_name(dev, qp))
663 goto qp_setup_cleanup;
666 qp->mb_mgr = alloc_mb_mgr(0);
667 if (qp->mb_mgr == NULL) {
669 goto qp_setup_cleanup;
672 switch (internals->vector_mode) {
673 case RTE_AESNI_MB_SSE:
674 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
675 init_mb_mgr_sse(qp->mb_mgr);
677 case RTE_AESNI_MB_AVX:
678 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
679 init_mb_mgr_avx(qp->mb_mgr);
681 case RTE_AESNI_MB_AVX2:
682 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
683 init_mb_mgr_avx2(qp->mb_mgr);
685 case RTE_AESNI_MB_AVX512:
686 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX512;
687 init_mb_mgr_avx512(qp->mb_mgr);
690 AESNI_MB_LOG(ERR, "Unsupported vector mode %u\n",
691 internals->vector_mode);
692 goto qp_setup_cleanup;
695 qp->ingress_queue = aesni_mb_pmd_qp_create_processed_ops_ring(qp,
696 qp_conf->nb_descriptors, socket_id);
697 if (qp->ingress_queue == NULL) {
699 goto qp_setup_cleanup;
702 qp->sess_mp = session_pool;
704 memset(&qp->stats, 0, sizeof(qp->stats));
706 char mp_name[RTE_MEMPOOL_NAMESIZE];
708 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
709 "digest_mp_%u_%u", dev->data->dev_id, qp_id);
715 free_mb_mgr(qp->mb_mgr);
722 /** Return the number of allocated queue pairs */
724 aesni_mb_pmd_qp_count(struct rte_cryptodev *dev)
726 return dev->data->nb_queue_pairs;
729 /** Returns the size of the aesni multi-buffer session structure */
731 aesni_mb_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
733 return sizeof(struct aesni_mb_session);
736 /** Configure a aesni multi-buffer session from a crypto xform chain */
738 aesni_mb_pmd_sym_session_configure(struct rte_cryptodev *dev,
739 struct rte_crypto_sym_xform *xform,
740 struct rte_cryptodev_sym_session *sess,
741 struct rte_mempool *mempool)
743 void *sess_private_data;
744 struct aesni_mb_private *internals = dev->data->dev_private;
747 if (unlikely(sess == NULL)) {
748 AESNI_MB_LOG(ERR, "invalid session struct");
752 if (rte_mempool_get(mempool, &sess_private_data)) {
754 "Couldn't get object from session mempool");
758 ret = aesni_mb_set_session_parameters(internals->mb_mgr,
759 sess_private_data, xform);
761 AESNI_MB_LOG(ERR, "failed configure session parameters");
763 /* Return session to mempool */
764 rte_mempool_put(mempool, sess_private_data);
768 set_sym_session_private_data(sess, dev->driver_id,
774 /** Clear the memory of session so it doesn't leave key material behind */
776 aesni_mb_pmd_sym_session_clear(struct rte_cryptodev *dev,
777 struct rte_cryptodev_sym_session *sess)
779 uint8_t index = dev->driver_id;
780 void *sess_priv = get_sym_session_private_data(sess, index);
782 /* Zero out the whole structure */
784 memset(sess_priv, 0, sizeof(struct aesni_mb_session));
785 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
786 set_sym_session_private_data(sess, index, NULL);
787 rte_mempool_put(sess_mp, sess_priv);
791 struct rte_cryptodev_ops aesni_mb_pmd_ops = {
792 .dev_configure = aesni_mb_pmd_config,
793 .dev_start = aesni_mb_pmd_start,
794 .dev_stop = aesni_mb_pmd_stop,
795 .dev_close = aesni_mb_pmd_close,
797 .stats_get = aesni_mb_pmd_stats_get,
798 .stats_reset = aesni_mb_pmd_stats_reset,
800 .dev_infos_get = aesni_mb_pmd_info_get,
802 .queue_pair_setup = aesni_mb_pmd_qp_setup,
803 .queue_pair_release = aesni_mb_pmd_qp_release,
804 .queue_pair_count = aesni_mb_pmd_qp_count,
806 .sym_session_get_size = aesni_mb_pmd_sym_session_get_size,
807 .sym_session_configure = aesni_mb_pmd_sym_session_configure,
808 .sym_session_clear = aesni_mb_pmd_sym_session_clear
811 struct rte_cryptodev_ops *rte_aesni_mb_pmd_ops = &aesni_mb_pmd_ops;