1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2017 Intel Corporation
7 #include <rte_common.h>
8 #include <rte_malloc.h>
9 #include <rte_cryptodev_pmd.h>
11 #include "rte_aesni_mb_pmd_private.h"
14 static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
16 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
18 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
20 .algo = RTE_CRYPTO_AUTH_MD5_HMAC,
28 #if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
43 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
45 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
47 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
51 #if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
59 #if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
74 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
76 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
78 .algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
82 #if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
90 #if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
105 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
107 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
109 .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
113 #if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
121 #if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
136 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
138 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
140 .algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
144 #if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
152 #if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
167 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
169 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
171 .algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
175 #if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
183 #if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
197 { /* AES XCBC HMAC */
198 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
200 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
202 .algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
219 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
221 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
223 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
239 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
241 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
243 .algo = RTE_CRYPTO_CIPHER_AES_CTR,
258 { /* AES DOCSIS BPI */
259 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
261 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
263 .algo = RTE_CRYPTO_CIPHER_AES_DOCSISBPI,
279 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
281 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
283 .algo = RTE_CRYPTO_CIPHER_DES_CBC,
299 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
301 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
303 .algo = RTE_CRYPTO_CIPHER_3DES_CBC,
318 { /* DES DOCSIS BPI */
319 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
321 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
323 .algo = RTE_CRYPTO_CIPHER_DES_DOCSISBPI,
339 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
341 .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
343 .algo = RTE_CRYPTO_AEAD_AES_CCM,
369 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
371 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
373 .algo = RTE_CRYPTO_AUTH_AES_CMAC,
390 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
392 .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
394 .algo = RTE_CRYPTO_AEAD_AES_GCM,
419 { /* AES GMAC (AUTH) */
420 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
422 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
424 .algo = RTE_CRYPTO_AUTH_AES_GMAC,
444 RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
448 /** Configure device */
450 aesni_mb_pmd_config(__rte_unused struct rte_cryptodev *dev,
451 __rte_unused struct rte_cryptodev_config *config)
458 aesni_mb_pmd_start(__rte_unused struct rte_cryptodev *dev)
465 aesni_mb_pmd_stop(__rte_unused struct rte_cryptodev *dev)
471 aesni_mb_pmd_close(__rte_unused struct rte_cryptodev *dev)
477 /** Get device statistics */
479 aesni_mb_pmd_stats_get(struct rte_cryptodev *dev,
480 struct rte_cryptodev_stats *stats)
484 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
485 struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
487 stats->enqueued_count += qp->stats.enqueued_count;
488 stats->dequeued_count += qp->stats.dequeued_count;
490 stats->enqueue_err_count += qp->stats.enqueue_err_count;
491 stats->dequeue_err_count += qp->stats.dequeue_err_count;
495 /** Reset device statistics */
497 aesni_mb_pmd_stats_reset(struct rte_cryptodev *dev)
501 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
502 struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
504 memset(&qp->stats, 0, sizeof(qp->stats));
509 /** Get device info */
511 aesni_mb_pmd_info_get(struct rte_cryptodev *dev,
512 struct rte_cryptodev_info *dev_info)
514 struct aesni_mb_private *internals = dev->data->dev_private;
516 if (dev_info != NULL) {
517 dev_info->driver_id = dev->driver_id;
518 dev_info->feature_flags = dev->feature_flags;
519 dev_info->capabilities = aesni_mb_pmd_capabilities;
520 dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
521 /* No limit of number of sessions */
522 dev_info->sym.max_nb_sessions = 0;
526 /** Release queue pair */
528 aesni_mb_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
530 struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
531 struct rte_ring *r = NULL;
534 r = rte_ring_lookup(qp->name);
538 free_mb_mgr(qp->mb_mgr);
540 dev->data->queue_pairs[qp_id] = NULL;
545 /** set a unique name for the queue pair based on it's name, dev_id and qp_id */
547 aesni_mb_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
548 struct aesni_mb_qp *qp)
550 unsigned n = snprintf(qp->name, sizeof(qp->name),
551 "aesni_mb_pmd_%u_qp_%u",
552 dev->data->dev_id, qp->id);
554 if (n >= sizeof(qp->name))
560 /** Create a ring to place processed operations on */
561 static struct rte_ring *
562 aesni_mb_pmd_qp_create_processed_ops_ring(struct aesni_mb_qp *qp,
563 unsigned int ring_size, int socket_id)
566 char ring_name[RTE_CRYPTODEV_NAME_MAX_LEN];
568 unsigned int n = snprintf(ring_name, sizeof(ring_name), "%s", qp->name);
570 if (n >= sizeof(ring_name))
573 r = rte_ring_lookup(ring_name);
575 if (rte_ring_get_size(r) >= ring_size) {
576 AESNI_MB_LOG(INFO, "Reusing existing ring %s for processed ops",
581 AESNI_MB_LOG(ERR, "Unable to reuse existing ring %s for processed ops",
586 return rte_ring_create(ring_name, ring_size, socket_id,
587 RING_F_SP_ENQ | RING_F_SC_DEQ);
590 /** Setup a queue pair */
592 aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
593 const struct rte_cryptodev_qp_conf *qp_conf,
594 int socket_id, struct rte_mempool *session_pool)
596 struct aesni_mb_qp *qp = NULL;
597 struct aesni_mb_private *internals = dev->data->dev_private;
600 /* Free memory prior to re-allocation if needed. */
601 if (dev->data->queue_pairs[qp_id] != NULL)
602 aesni_mb_pmd_qp_release(dev, qp_id);
604 /* Allocate the queue pair data structure. */
605 qp = rte_zmalloc_socket("AES-NI PMD Queue Pair", sizeof(*qp),
606 RTE_CACHE_LINE_SIZE, socket_id);
611 dev->data->queue_pairs[qp_id] = qp;
613 if (aesni_mb_pmd_qp_set_unique_name(dev, qp))
614 goto qp_setup_cleanup;
617 qp->mb_mgr = alloc_mb_mgr(0);
618 if (qp->mb_mgr == NULL) {
620 goto qp_setup_cleanup;
623 qp->op_fns = &job_ops[internals->vector_mode];
625 qp->ingress_queue = aesni_mb_pmd_qp_create_processed_ops_ring(qp,
626 qp_conf->nb_descriptors, socket_id);
627 if (qp->ingress_queue == NULL) {
629 goto qp_setup_cleanup;
632 qp->sess_mp = session_pool;
634 memset(&qp->stats, 0, sizeof(qp->stats));
636 char mp_name[RTE_MEMPOOL_NAMESIZE];
638 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
639 "digest_mp_%u_%u", dev->data->dev_id, qp_id);
641 /* Initialise multi-buffer manager */
642 (*qp->op_fns->job.init_mgr)(qp->mb_mgr);
647 if (qp->mb_mgr == NULL)
648 free_mb_mgr(qp->mb_mgr);
655 /** Return the number of allocated queue pairs */
657 aesni_mb_pmd_qp_count(struct rte_cryptodev *dev)
659 return dev->data->nb_queue_pairs;
662 /** Returns the size of the aesni multi-buffer session structure */
664 aesni_mb_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
666 return sizeof(struct aesni_mb_session);
669 /** Configure a aesni multi-buffer session from a crypto xform chain */
671 aesni_mb_pmd_sym_session_configure(struct rte_cryptodev *dev,
672 struct rte_crypto_sym_xform *xform,
673 struct rte_cryptodev_sym_session *sess,
674 struct rte_mempool *mempool)
676 void *sess_private_data;
677 struct aesni_mb_private *internals = dev->data->dev_private;
680 if (unlikely(sess == NULL)) {
681 AESNI_MB_LOG(ERR, "invalid session struct");
685 if (rte_mempool_get(mempool, &sess_private_data)) {
687 "Couldn't get object from session mempool");
691 ret = aesni_mb_set_session_parameters(&job_ops[internals->vector_mode],
692 sess_private_data, xform);
694 AESNI_MB_LOG(ERR, "failed configure session parameters");
696 /* Return session to mempool */
697 rte_mempool_put(mempool, sess_private_data);
701 set_sym_session_private_data(sess, dev->driver_id,
707 /** Clear the memory of session so it doesn't leave key material behind */
709 aesni_mb_pmd_sym_session_clear(struct rte_cryptodev *dev,
710 struct rte_cryptodev_sym_session *sess)
712 uint8_t index = dev->driver_id;
713 void *sess_priv = get_sym_session_private_data(sess, index);
715 /* Zero out the whole structure */
717 memset(sess_priv, 0, sizeof(struct aesni_mb_session));
718 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
719 set_sym_session_private_data(sess, index, NULL);
720 rte_mempool_put(sess_mp, sess_priv);
724 struct rte_cryptodev_ops aesni_mb_pmd_ops = {
725 .dev_configure = aesni_mb_pmd_config,
726 .dev_start = aesni_mb_pmd_start,
727 .dev_stop = aesni_mb_pmd_stop,
728 .dev_close = aesni_mb_pmd_close,
730 .stats_get = aesni_mb_pmd_stats_get,
731 .stats_reset = aesni_mb_pmd_stats_reset,
733 .dev_infos_get = aesni_mb_pmd_info_get,
735 .queue_pair_setup = aesni_mb_pmd_qp_setup,
736 .queue_pair_release = aesni_mb_pmd_qp_release,
737 .queue_pair_count = aesni_mb_pmd_qp_count,
739 .sym_session_get_size = aesni_mb_pmd_sym_session_get_size,
740 .sym_session_configure = aesni_mb_pmd_sym_session_configure,
741 .sym_session_clear = aesni_mb_pmd_sym_session_clear
744 struct rte_cryptodev_ops *rte_aesni_mb_pmd_ops = &aesni_mb_pmd_ops;