cryptodev: move inline APIs into separate structure
authorAkhil Goyal <gakhil@marvell.com>
Wed, 20 Oct 2021 11:27:49 +0000 (16:57 +0530)
committerAkhil Goyal <gakhil@marvell.com>
Wed, 20 Oct 2021 13:33:16 +0000 (15:33 +0200)
Move fastpath inline function pointers from rte_cryptodev into a
separate structure accessed via a flat array.
The intention is to make rte_cryptodev and related structures private
to avoid future API/ABI breakages.

Signed-off-by: Akhil Goyal <gakhil@marvell.com>
Tested-by: Rebecca Troy <rebecca.troy@intel.com>
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
lib/cryptodev/cryptodev_pmd.c
lib/cryptodev/cryptodev_pmd.h
lib/cryptodev/rte_cryptodev.c
lib/cryptodev/rte_cryptodev_core.h
lib/cryptodev/version.map

index 44a70ec..fd74543 100644 (file)
@@ -3,7 +3,7 @@
  */
 
 #include <sys/queue.h>
-
+#include <rte_errno.h>
 #include <rte_string_fns.h>
 #include <rte_malloc.h>
 
@@ -160,3 +160,54 @@ rte_cryptodev_pmd_destroy(struct rte_cryptodev *cryptodev)
 
        return 0;
 }
+
+static uint16_t
+dummy_crypto_enqueue_burst(__rte_unused void *qp,
+                          __rte_unused struct rte_crypto_op **ops,
+                          __rte_unused uint16_t nb_ops)
+{
+       CDEV_LOG_ERR(
+               "crypto enqueue burst requested for unconfigured device");
+       rte_errno = ENOTSUP;
+       return 0;
+}
+
+static uint16_t
+dummy_crypto_dequeue_burst(__rte_unused void *qp,
+                          __rte_unused struct rte_crypto_op **ops,
+                          __rte_unused uint16_t nb_ops)
+{
+       CDEV_LOG_ERR(
+               "crypto dequeue burst requested for unconfigured device");
+       rte_errno = ENOTSUP;
+       return 0;
+}
+
+void
+cryptodev_fp_ops_reset(struct rte_crypto_fp_ops *fp_ops)
+{
+       static struct rte_cryptodev_cb_rcu dummy_cb[RTE_MAX_QUEUES_PER_PORT];
+       static void *dummy_data[RTE_MAX_QUEUES_PER_PORT];
+       static const struct rte_crypto_fp_ops dummy = {
+               .enqueue_burst = dummy_crypto_enqueue_burst,
+               .dequeue_burst = dummy_crypto_dequeue_burst,
+               .qp = {
+                       .data = dummy_data,
+                       .enq_cb = dummy_cb,
+                       .deq_cb = dummy_cb,
+               },
+       };
+
+       *fp_ops = dummy;
+}
+
+void
+cryptodev_fp_ops_set(struct rte_crypto_fp_ops *fp_ops,
+                    const struct rte_cryptodev *dev)
+{
+       fp_ops->enqueue_burst = dev->enqueue_burst;
+       fp_ops->dequeue_burst = dev->dequeue_burst;
+       fp_ops->qp.data = dev->data->queue_pairs;
+       fp_ops->qp.enq_cb = dev->enq_cbs;
+       fp_ops->qp.deq_cb = dev->deq_cbs;
+}
index 36606dd..a71edbb 100644 (file)
@@ -516,6 +516,17 @@ RTE_INIT(init_ ##driver_id)\
        driver_id = rte_cryptodev_allocate_driver(&crypto_drv, &(drv));\
 }
 
+/* Reset crypto device fastpath APIs to dummy values. */
+__rte_internal
+void
+cryptodev_fp_ops_reset(struct rte_crypto_fp_ops *fp_ops);
+
+/* Setup crypto device fastpath APIs. */
+__rte_internal
+void
+cryptodev_fp_ops_set(struct rte_crypto_fp_ops *fp_ops,
+                    const struct rte_cryptodev *dev);
+
 static inline void *
 get_sym_session_private_data(const struct rte_cryptodev_sym_session *sess,
                uint8_t driver_id) {
index eb86e62..305e013 100644 (file)
@@ -53,6 +53,9 @@ static struct rte_cryptodev_global cryptodev_globals = {
                .nb_devs                = 0
 };
 
+/* Public fastpath APIs. */
+struct rte_crypto_fp_ops rte_crypto_fp_ops[RTE_CRYPTO_MAX_DEVS];
+
 /* spinlock for crypto device callbacks */
 static rte_spinlock_t rte_cryptodev_cb_lock = RTE_SPINLOCK_INITIALIZER;
 
@@ -917,6 +920,8 @@ rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev)
 
        dev_id = cryptodev->data->dev_id;
 
+       cryptodev_fp_ops_reset(rte_crypto_fp_ops + dev_id);
+
        /* Close device only if device operations have been set */
        if (cryptodev->dev_ops) {
                ret = rte_cryptodev_close(dev_id);
@@ -1080,6 +1085,9 @@ rte_cryptodev_start(uint8_t dev_id)
        }
 
        diag = (*dev->dev_ops->dev_start)(dev);
+       /* expose selection of PMD fast-path functions */
+       cryptodev_fp_ops_set(rte_crypto_fp_ops + dev_id, dev);
+
        rte_cryptodev_trace_start(dev_id, diag);
        if (diag == 0)
                dev->data->dev_started = 1;
@@ -1109,6 +1117,9 @@ rte_cryptodev_stop(uint8_t dev_id)
                return;
        }
 
+       /* point fast-path functions to dummy ones */
+       cryptodev_fp_ops_reset(rte_crypto_fp_ops + dev_id);
+
        (*dev->dev_ops->dev_stop)(dev);
        rte_cryptodev_trace_stop(dev_id);
        dev->data->dev_started = 0;
@@ -2411,3 +2422,11 @@ rte_cryptodev_allocate_driver(struct cryptodev_driver *crypto_drv,
 
        return nb_drivers++;
 }
+
+RTE_INIT(cryptodev_init_fp_ops)
+{
+       uint32_t i;
+
+       for (i = 0; i != RTE_DIM(rte_crypto_fp_ops); i++)
+               cryptodev_fp_ops_reset(rte_crypto_fp_ops + i);
+}
index 1633e55..2bb9a22 100644 (file)
@@ -25,6 +25,35 @@ typedef uint16_t (*enqueue_pkt_burst_t)(void *qp,
                struct rte_crypto_op **ops,     uint16_t nb_ops);
 /**< Enqueue packets for processing on queue pair of a device. */
 
+/**
+ * @internal
+ * Structure used to hold opaque pointers to internal ethdev Rx/Tx
+ * queues data.
+ * The main purpose to expose these pointers at all - allow compiler
+ * to fetch this data for fast-path cryptodev inline functions in advance.
+ */
+struct rte_cryptodev_qpdata {
+       /** points to array of internal queue pair data pointers. */
+       void **data;
+       /** points to array of enqueue callback data pointers */
+       struct rte_cryptodev_cb_rcu *enq_cb;
+       /** points to array of dequeue callback data pointers */
+       struct rte_cryptodev_cb_rcu *deq_cb;
+};
+
+struct rte_crypto_fp_ops {
+       /** PMD enqueue burst function. */
+       enqueue_pkt_burst_t enqueue_burst;
+       /** PMD dequeue burst function. */
+       dequeue_pkt_burst_t dequeue_burst;
+       /** Internal queue pair data pointers. */
+       struct rte_cryptodev_qpdata qp;
+       /** Reserved for future ops. */
+       uintptr_t reserved[3];
+} __rte_cache_aligned;
+
+extern struct rte_crypto_fp_ops rte_crypto_fp_ops[RTE_CRYPTO_MAX_DEVS];
+
 /**
  * @internal
  * The data part, with no function pointers, associated with each device.
index 43cf937..0e409e7 100644 (file)
@@ -7,6 +7,7 @@ DPDK_22 {
        rte_crypto_auth_operation_strings;
        rte_crypto_cipher_algorithm_strings;
        rte_crypto_cipher_operation_strings;
+       rte_crypto_fp_ops;
        rte_crypto_op_pool_create;
        rte_cryptodev_callback_register;
        rte_cryptodev_callback_unregister;
@@ -109,6 +110,8 @@ EXPERIMENTAL {
 INTERNAL {
        global:
 
+       cryptodev_fp_ops_reset;
+       cryptodev_fp_ops_set;
        rte_cryptodev_allocate_driver;
        rte_cryptodev_pmd_allocate;
        rte_cryptodev_pmd_callback_process;