*/
#include <sys/queue.h>
-
+#include <rte_errno.h>
#include <rte_string_fns.h>
#include <rte_malloc.h>
return 0;
}
+
+static uint16_t
+dummy_crypto_enqueue_burst(__rte_unused void *qp,
+ __rte_unused struct rte_crypto_op **ops,
+ __rte_unused uint16_t nb_ops)
+{
+ CDEV_LOG_ERR(
+ "crypto enqueue burst requested for unconfigured device");
+ rte_errno = ENOTSUP;
+ return 0;
+}
+
+static uint16_t
+dummy_crypto_dequeue_burst(__rte_unused void *qp,
+ __rte_unused struct rte_crypto_op **ops,
+ __rte_unused uint16_t nb_ops)
+{
+ CDEV_LOG_ERR(
+ "crypto dequeue burst requested for unconfigured device");
+ rte_errno = ENOTSUP;
+ return 0;
+}
+
+void
+cryptodev_fp_ops_reset(struct rte_crypto_fp_ops *fp_ops)
+{
+ static struct rte_cryptodev_cb_rcu dummy_cb[RTE_MAX_QUEUES_PER_PORT];
+ static void *dummy_data[RTE_MAX_QUEUES_PER_PORT];
+ static const struct rte_crypto_fp_ops dummy = {
+ .enqueue_burst = dummy_crypto_enqueue_burst,
+ .dequeue_burst = dummy_crypto_dequeue_burst,
+ .qp = {
+ .data = dummy_data,
+ .enq_cb = dummy_cb,
+ .deq_cb = dummy_cb,
+ },
+ };
+
+ *fp_ops = dummy;
+}
+
+void
+cryptodev_fp_ops_set(struct rte_crypto_fp_ops *fp_ops,
+ const struct rte_cryptodev *dev)
+{
+ fp_ops->enqueue_burst = dev->enqueue_burst;
+ fp_ops->dequeue_burst = dev->dequeue_burst;
+ fp_ops->qp.data = dev->data->queue_pairs;
+ fp_ops->qp.enq_cb = dev->enq_cbs;
+ fp_ops->qp.deq_cb = dev->deq_cbs;
+}
driver_id = rte_cryptodev_allocate_driver(&crypto_drv, &(drv));\
}
+/* Reset crypto device fastpath APIs to dummy values. */
+__rte_internal
+void
+cryptodev_fp_ops_reset(struct rte_crypto_fp_ops *fp_ops);
+
+/* Setup crypto device fastpath APIs. */
+__rte_internal
+void
+cryptodev_fp_ops_set(struct rte_crypto_fp_ops *fp_ops,
+ const struct rte_cryptodev *dev);
+
static inline void *
get_sym_session_private_data(const struct rte_cryptodev_sym_session *sess,
uint8_t driver_id) {
.nb_devs = 0
};
+/* Public fastpath APIs. */
+struct rte_crypto_fp_ops rte_crypto_fp_ops[RTE_CRYPTO_MAX_DEVS];
+
/* spinlock for crypto device callbacks */
static rte_spinlock_t rte_cryptodev_cb_lock = RTE_SPINLOCK_INITIALIZER;
dev_id = cryptodev->data->dev_id;
+ cryptodev_fp_ops_reset(rte_crypto_fp_ops + dev_id);
+
/* Close device only if device operations have been set */
if (cryptodev->dev_ops) {
ret = rte_cryptodev_close(dev_id);
}
diag = (*dev->dev_ops->dev_start)(dev);
+ /* expose selection of PMD fast-path functions */
+ cryptodev_fp_ops_set(rte_crypto_fp_ops + dev_id, dev);
+
rte_cryptodev_trace_start(dev_id, diag);
if (diag == 0)
dev->data->dev_started = 1;
return;
}
+ /* point fast-path functions to dummy ones */
+ cryptodev_fp_ops_reset(rte_crypto_fp_ops + dev_id);
+
(*dev->dev_ops->dev_stop)(dev);
rte_cryptodev_trace_stop(dev_id);
dev->data->dev_started = 0;
return nb_drivers++;
}
+
+RTE_INIT(cryptodev_init_fp_ops)
+{
+ uint32_t i;
+
+ for (i = 0; i != RTE_DIM(rte_crypto_fp_ops); i++)
+ cryptodev_fp_ops_reset(rte_crypto_fp_ops + i);
+}
struct rte_crypto_op **ops, uint16_t nb_ops);
/**< Enqueue packets for processing on queue pair of a device. */
+/**
+ * @internal
+ * Structure used to hold opaque pointers to internal ethdev Rx/Tx
+ * queues data.
+ * The main purpose to expose these pointers at all - allow compiler
+ * to fetch this data for fast-path cryptodev inline functions in advance.
+ */
+struct rte_cryptodev_qpdata {
+ /** points to array of internal queue pair data pointers. */
+ void **data;
+ /** points to array of enqueue callback data pointers */
+ struct rte_cryptodev_cb_rcu *enq_cb;
+ /** points to array of dequeue callback data pointers */
+ struct rte_cryptodev_cb_rcu *deq_cb;
+};
+
+struct rte_crypto_fp_ops {
+ /** PMD enqueue burst function. */
+ enqueue_pkt_burst_t enqueue_burst;
+ /** PMD dequeue burst function. */
+ dequeue_pkt_burst_t dequeue_burst;
+ /** Internal queue pair data pointers. */
+ struct rte_cryptodev_qpdata qp;
+ /** Reserved for future ops. */
+ uintptr_t reserved[3];
+} __rte_cache_aligned;
+
+extern struct rte_crypto_fp_ops rte_crypto_fp_ops[RTE_CRYPTO_MAX_DEVS];
+
/**
* @internal
* The data part, with no function pointers, associated with each device.
rte_crypto_auth_operation_strings;
rte_crypto_cipher_algorithm_strings;
rte_crypto_cipher_operation_strings;
+ rte_crypto_fp_ops;
rte_crypto_op_pool_create;
rte_cryptodev_callback_register;
rte_cryptodev_callback_unregister;
INTERNAL {
global:
+ cryptodev_fp_ops_reset;
+ cryptodev_fp_ops_set;
rte_cryptodev_allocate_driver;
rte_cryptodev_pmd_allocate;
rte_cryptodev_pmd_callback_process;