/* cryptodev defines */
#define RTE_CRYPTO_MAX_DEVS 64
#define RTE_CRYPTODEV_NAME_LEN 64
+#define RTE_CRYPTO_CALLBACKS 1
/* compressdev defines */
#define RTE_COMPRESS_MAX_DEVS 64
; Explicit ignore for driver-only ABI
[suppress_type]
name = eth_dev_ops
+
+; Ignore fields inserted in cacheline boundary of rte_cryptodev
+[suppress_type]
+ name = rte_cryptodev
+ has_data_member_inserted_between = {0, 1023}
rte_crypto_op including other crypto information such as the IVs (since there can
be an IV also for authentication).
+User callback APIs
+~~~~~~~~~~~~~~~~~~
+The add APIs configures a user callback function to be called for each burst of crypto
+ops received/sent on a given crypto device queue pair. The return value is a pointer
+that can be used later to remove the callback using remove API. Application is expected
+to register a callback function of type ``rte_cryptodev_callback_fn``. Multiple callback
+functions can be added for a given queue pair. API does not restrict on maximum number of
+callbacks.
+
+Callbacks registered by application would not survive ``rte_cryptodev_configure`` as it
+reinitializes the callback list. It is user responsibility to remove all installed
+callbacks before calling ``rte_cryptodev_configure`` to avoid possible memory leakage.
+
+So, the application is expected to add user callback after ``rte_cryptodev_configure``.
+The callbacks can also be added at the runtime. These callbacks get executed when
+``rte_cryptodev_enqueue_burst``/``rte_cryptodev_dequeue_burst`` is called.
+
+.. code-block:: c
+
+ struct rte_cryptodev_cb *
+ rte_cryptodev_add_enq_callback(uint8_t dev_id, uint16_t qp_id,
+ rte_cryptodev_callback_fn cb_fn,
+ void *cb_arg);
+
+ struct rte_cryptodev_cb *
+ rte_cryptodev_add_deq_callback(uint8_t dev_id, uint16_t qp_id,
+ rte_cryptodev_callback_fn cb_fn,
+ void *cb_arg);
+
+ uint16_t (* rte_cryptodev_callback_fn)(uint16_t dev_id, uint16_t qp_id,
+ struct rte_crypto_op **ops,
+ uint16_t nb_ops, void *user_param);
+
+The remove API removes a callback function added by
+``rte_cryptodev_add_enq_callback``/``rte_cryptodev_add_deq_callback``.
+
+.. code-block:: c
+
+ int rte_cryptodev_remove_enq_callback(uint8_t dev_id, uint16_t qp_id,
+ struct rte_cryptodev_cb *cb);
+
+ int rte_cryptodev_remove_deq_callback(uint8_t dev_id, uint16_t qp_id,
+ struct rte_cryptodev_cb *cb);
+
Enqueue / Dequeue Burst APIs
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* Added inner UDP/IPv4 support for VXLAN IPv4 GSO.
+* **Added enqueue & dequeue callback APIs for cryptodev library.**
+
+ Cryptodev library is added with enqueue & dequeue callback APIs to
+ enable applications to add/remove user callbacks which gets called
+ for every enqueue/dequeue operation.
+
* **Updated the OCTEON TX2 crypto PMD.**
* Updated the OCTEON TX2 crypto PMD lookaside protocol offload for IPsec with
Also, make sure to start the actual text at the margin.
=======================================================
+* cryptodev: The structure ``rte_cryptodev`` has been updated with pointers
+ for adding enqueue and dequeue callbacks.
+
ABI Changes
-----------
'rte_crypto.h',
'rte_crypto_sym.h',
'rte_crypto_asym.h')
-deps += ['kvargs', 'mbuf']
+deps += ['kvargs', 'mbuf', 'rcu']
return 0;
}
+/* spinlock for crypto device enq callbacks */
+static rte_spinlock_t rte_cryptodev_callback_lock = RTE_SPINLOCK_INITIALIZER;
+
+static void
+cryptodev_cb_cleanup(struct rte_cryptodev *dev)
+{
+ struct rte_cryptodev_cb_rcu *list;
+ struct rte_cryptodev_cb *cb, *next;
+ uint16_t qp_id;
+
+ if (dev->enq_cbs == NULL && dev->deq_cbs == NULL)
+ return;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ list = &dev->enq_cbs[qp_id];
+ cb = list->next;
+ while (cb != NULL) {
+ next = cb->next;
+ rte_free(cb);
+ cb = next;
+ }
+
+ rte_free(list->qsbr);
+ }
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ list = &dev->deq_cbs[qp_id];
+ cb = list->next;
+ while (cb != NULL) {
+ next = cb->next;
+ rte_free(cb);
+ cb = next;
+ }
+
+ rte_free(list->qsbr);
+ }
+
+ rte_free(dev->enq_cbs);
+ dev->enq_cbs = NULL;
+ rte_free(dev->deq_cbs);
+ dev->deq_cbs = NULL;
+}
+
+static int
+cryptodev_cb_init(struct rte_cryptodev *dev)
+{
+ struct rte_cryptodev_cb_rcu *list;
+ struct rte_rcu_qsbr *qsbr;
+ uint16_t qp_id;
+ size_t size;
+
+ /* Max thread set to 1, as one DP thread accessing a queue-pair */
+ const uint32_t max_threads = 1;
+
+ dev->enq_cbs = rte_zmalloc(NULL,
+ sizeof(struct rte_cryptodev_cb_rcu) *
+ dev->data->nb_queue_pairs, 0);
+ if (dev->enq_cbs == NULL) {
+ CDEV_LOG_ERR("Failed to allocate memory for enq callbacks");
+ return -ENOMEM;
+ }
+
+ dev->deq_cbs = rte_zmalloc(NULL,
+ sizeof(struct rte_cryptodev_cb_rcu) *
+ dev->data->nb_queue_pairs, 0);
+ if (dev->deq_cbs == NULL) {
+ CDEV_LOG_ERR("Failed to allocate memory for deq callbacks");
+ rte_free(dev->enq_cbs);
+ return -ENOMEM;
+ }
+
+ /* Create RCU QSBR variable */
+ size = rte_rcu_qsbr_get_memsize(max_threads);
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ list = &dev->enq_cbs[qp_id];
+ qsbr = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+ if (qsbr == NULL) {
+ CDEV_LOG_ERR("Failed to allocate memory for RCU on "
+ "queue_pair_id=%d", qp_id);
+ goto cb_init_err;
+ }
+
+ if (rte_rcu_qsbr_init(qsbr, max_threads)) {
+ CDEV_LOG_ERR("Failed to initialize for RCU on "
+ "queue_pair_id=%d", qp_id);
+ goto cb_init_err;
+ }
+
+ list->qsbr = qsbr;
+ }
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ list = &dev->deq_cbs[qp_id];
+ qsbr = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+ if (qsbr == NULL) {
+ CDEV_LOG_ERR("Failed to allocate memory for RCU on "
+ "queue_pair_id=%d", qp_id);
+ goto cb_init_err;
+ }
+
+ if (rte_rcu_qsbr_init(qsbr, max_threads)) {
+ CDEV_LOG_ERR("Failed to initialize for RCU on "
+ "queue_pair_id=%d", qp_id);
+ goto cb_init_err;
+ }
+
+ list->qsbr = qsbr;
+ }
+
+ return 0;
+
+cb_init_err:
+ cryptodev_cb_cleanup(dev);
+ return -ENOMEM;
+}
const char *
rte_cryptodev_get_feature_name(uint64_t flag)
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
+ rte_spinlock_lock(&rte_cryptodev_callback_lock);
+ cryptodev_cb_cleanup(dev);
+ rte_spinlock_unlock(&rte_cryptodev_callback_lock);
+
/* Setup new number of queue pairs and reconfigure device. */
diag = rte_cryptodev_queue_pairs_config(dev, config->nb_queue_pairs,
config->socket_id);
return diag;
}
+ rte_spinlock_lock(&rte_cryptodev_callback_lock);
+ diag = cryptodev_cb_init(dev);
+ rte_spinlock_unlock(&rte_cryptodev_callback_lock);
+ if (diag) {
+ CDEV_LOG_ERR("Callback init failed for dev_id=%d", dev_id);
+ return diag;
+ }
+
rte_cryptodev_trace_configure(dev_id, config);
return (*dev->dev_ops->dev_configure)(dev, config);
}
-
int
rte_cryptodev_start(uint8_t dev_id)
{
socket_id);
}
+struct rte_cryptodev_cb *
+rte_cryptodev_add_enq_callback(uint8_t dev_id,
+ uint16_t qp_id,
+ rte_cryptodev_callback_fn cb_fn,
+ void *cb_arg)
+{
+ struct rte_cryptodev *dev;
+ struct rte_cryptodev_cb_rcu *list;
+ struct rte_cryptodev_cb *cb, *tail;
+
+ if (!cb_fn) {
+ CDEV_LOG_ERR("Callback is NULL on dev_id=%d", dev_id);
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
+ CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
+ rte_errno = ENODEV;
+ return NULL;
+ }
+
+ dev = &rte_crypto_devices[dev_id];
+ if (qp_id >= dev->data->nb_queue_pairs) {
+ CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
+ rte_errno = ENODEV;
+ return NULL;
+ }
+
+ cb = rte_zmalloc(NULL, sizeof(*cb), 0);
+ if (cb == NULL) {
+ CDEV_LOG_ERR("Failed to allocate memory for callback on "
+ "dev=%d, queue_pair_id=%d", dev_id, qp_id);
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+
+ rte_spinlock_lock(&rte_cryptodev_callback_lock);
+
+ cb->fn = cb_fn;
+ cb->arg = cb_arg;
+
+ /* Add the callbacks in fifo order. */
+ list = &dev->enq_cbs[qp_id];
+ tail = list->next;
+
+ if (tail) {
+ while (tail->next)
+ tail = tail->next;
+ /* Stores to cb->fn and cb->param should complete before
+ * cb is visible to data plane.
+ */
+ __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
+ } else {
+ /* Stores to cb->fn and cb->param should complete before
+ * cb is visible to data plane.
+ */
+ __atomic_store_n(&list->next, cb, __ATOMIC_RELEASE);
+ }
+
+ rte_spinlock_unlock(&rte_cryptodev_callback_lock);
+
+ return cb;
+}
+
+int
+rte_cryptodev_remove_enq_callback(uint8_t dev_id,
+ uint16_t qp_id,
+ struct rte_cryptodev_cb *cb)
+{
+ struct rte_cryptodev *dev;
+ struct rte_cryptodev_cb **prev_cb, *curr_cb;
+ struct rte_cryptodev_cb_rcu *list;
+ int ret;
+
+ ret = -EINVAL;
+
+ if (!cb) {
+ CDEV_LOG_ERR("Callback is NULL");
+ return -EINVAL;
+ }
+
+ if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
+ CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
+ return -ENODEV;
+ }
+
+ dev = &rte_crypto_devices[dev_id];
+ if (qp_id >= dev->data->nb_queue_pairs) {
+ CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
+ return -ENODEV;
+ }
+
+ rte_spinlock_lock(&rte_cryptodev_callback_lock);
+ if (dev->enq_cbs == NULL) {
+ CDEV_LOG_ERR("Callback not initialized");
+ goto cb_err;
+ }
+
+ list = &dev->enq_cbs[qp_id];
+ if (list == NULL) {
+ CDEV_LOG_ERR("Callback list is NULL");
+ goto cb_err;
+ }
+
+ if (list->qsbr == NULL) {
+ CDEV_LOG_ERR("Rcu qsbr is NULL");
+ goto cb_err;
+ }
+
+ prev_cb = &list->next;
+ for (; *prev_cb != NULL; prev_cb = &curr_cb->next) {
+ curr_cb = *prev_cb;
+ if (curr_cb == cb) {
+ /* Remove the user cb from the callback list. */
+ __atomic_store_n(prev_cb, curr_cb->next,
+ __ATOMIC_RELAXED);
+ ret = 0;
+ break;
+ }
+ }
+
+ if (!ret) {
+ /* Call sync with invalid thread id as this is part of
+ * control plane API
+ */
+ rte_rcu_qsbr_synchronize(list->qsbr, RTE_QSBR_THRID_INVALID);
+ rte_free(cb);
+ }
+
+cb_err:
+ rte_spinlock_unlock(&rte_cryptodev_callback_lock);
+ return ret;
+}
+
+struct rte_cryptodev_cb *
+rte_cryptodev_add_deq_callback(uint8_t dev_id,
+ uint16_t qp_id,
+ rte_cryptodev_callback_fn cb_fn,
+ void *cb_arg)
+{
+ struct rte_cryptodev *dev;
+ struct rte_cryptodev_cb_rcu *list;
+ struct rte_cryptodev_cb *cb, *tail;
+
+ if (!cb_fn) {
+ CDEV_LOG_ERR("Callback is NULL on dev_id=%d", dev_id);
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
+ CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
+ rte_errno = ENODEV;
+ return NULL;
+ }
+
+ dev = &rte_crypto_devices[dev_id];
+ if (qp_id >= dev->data->nb_queue_pairs) {
+ CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
+ rte_errno = ENODEV;
+ return NULL;
+ }
+
+ cb = rte_zmalloc(NULL, sizeof(*cb), 0);
+ if (cb == NULL) {
+ CDEV_LOG_ERR("Failed to allocate memory for callback on "
+ "dev=%d, queue_pair_id=%d", dev_id, qp_id);
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+
+ rte_spinlock_lock(&rte_cryptodev_callback_lock);
+
+ cb->fn = cb_fn;
+ cb->arg = cb_arg;
+
+ /* Add the callbacks in fifo order. */
+ list = &dev->deq_cbs[qp_id];
+ tail = list->next;
+
+ if (tail) {
+ while (tail->next)
+ tail = tail->next;
+ /* Stores to cb->fn and cb->param should complete before
+ * cb is visible to data plane.
+ */
+ __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
+ } else {
+ /* Stores to cb->fn and cb->param should complete before
+ * cb is visible to data plane.
+ */
+ __atomic_store_n(&list->next, cb, __ATOMIC_RELEASE);
+ }
+
+ rte_spinlock_unlock(&rte_cryptodev_callback_lock);
+
+ return cb;
+}
+
+int
+rte_cryptodev_remove_deq_callback(uint8_t dev_id,
+ uint16_t qp_id,
+ struct rte_cryptodev_cb *cb)
+{
+ struct rte_cryptodev *dev;
+ struct rte_cryptodev_cb **prev_cb, *curr_cb;
+ struct rte_cryptodev_cb_rcu *list;
+ int ret;
+
+ ret = -EINVAL;
+
+ if (!cb) {
+ CDEV_LOG_ERR("Callback is NULL");
+ return -EINVAL;
+ }
+
+ if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
+ CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
+ return -ENODEV;
+ }
+
+ dev = &rte_crypto_devices[dev_id];
+ if (qp_id >= dev->data->nb_queue_pairs) {
+ CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
+ return -ENODEV;
+ }
+
+ rte_spinlock_lock(&rte_cryptodev_callback_lock);
+ if (dev->enq_cbs == NULL) {
+ CDEV_LOG_ERR("Callback not initialized");
+ goto cb_err;
+ }
+
+ list = &dev->deq_cbs[qp_id];
+ if (list == NULL) {
+ CDEV_LOG_ERR("Callback list is NULL");
+ goto cb_err;
+ }
+
+ if (list->qsbr == NULL) {
+ CDEV_LOG_ERR("Rcu qsbr is NULL");
+ goto cb_err;
+ }
+
+ prev_cb = &list->next;
+ for (; *prev_cb != NULL; prev_cb = &curr_cb->next) {
+ curr_cb = *prev_cb;
+ if (curr_cb == cb) {
+ /* Remove the user cb from the callback list. */
+ __atomic_store_n(prev_cb, curr_cb->next,
+ __ATOMIC_RELAXED);
+ ret = 0;
+ break;
+ }
+ }
+
+ if (!ret) {
+ /* Call sync with invalid thread id as this is part of
+ * control plane API
+ */
+ rte_rcu_qsbr_synchronize(list->qsbr, RTE_QSBR_THRID_INVALID);
+ rte_free(cb);
+ }
+
+cb_err:
+ rte_spinlock_unlock(&rte_cryptodev_callback_lock);
+ return ret;
+}
int
rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats)
#include "rte_dev.h"
#include <rte_common.h>
#include <rte_config.h>
+#include <rte_rcu_qsbr.h>
#include "rte_cryptodev_trace_fp.h"
/**< The mempool for creating sess private data in sessionless mode */
};
+/**
+ * Function type used for processing crypto ops when enqueue/dequeue burst is
+ * called.
+ *
+ * The callback function is called on enqueue/dequeue burst immediately.
+ *
+ * @param dev_id The identifier of the device.
+ * @param qp_id The index of the queue pair on which ops are
+ * enqueued/dequeued. The value must be in the
+ * range [0, nb_queue_pairs - 1] previously
+ * supplied to *rte_cryptodev_configure*.
+ * @param ops The address of an array of *nb_ops* pointers
+ * to *rte_crypto_op* structures which contain
+ * the crypto operations to be processed.
+ * @param nb_ops The number of operations to process.
+ * @param user_param The arbitrary user parameter passed in by the
+ * application when the callback was originally
+ * registered.
+ * @return The number of ops to be enqueued to the
+ * crypto device.
+ */
+typedef uint16_t (*rte_cryptodev_callback_fn)(uint16_t dev_id, uint16_t qp_id,
+ struct rte_crypto_op **ops, uint16_t nb_ops, void *user_param);
+
/**
* Typedef for application callback function to be registered by application
* software for notification of device events
enum rte_cryptodev_event_type event,
rte_cryptodev_cb_fn cb_fn, void *cb_arg);
-
typedef uint16_t (*dequeue_pkt_burst_t)(void *qp,
struct rte_crypto_op **ops, uint16_t nb_ops);
/**< Dequeue processed packets from queue pair of a device. */
/** Structure to keep track of registered callbacks */
TAILQ_HEAD(rte_cryptodev_cb_list, rte_cryptodev_callback);
+/**
+ * Structure used to hold information about the callbacks to be called for a
+ * queue pair on enqueue/dequeue.
+ */
+struct rte_cryptodev_cb {
+ struct rte_cryptodev_cb *next;
+ /**< Pointer to next callback */
+ rte_cryptodev_callback_fn fn;
+ /**< Pointer to callback function */
+ void *arg;
+ /**< Pointer to argument */
+};
+
+/**
+ * @internal
+ * Structure used to hold information about the RCU for a queue pair.
+ */
+struct rte_cryptodev_cb_rcu {
+ struct rte_cryptodev_cb *next;
+ /**< Pointer to next callback */
+ struct rte_rcu_qsbr *qsbr;
+ /**< RCU QSBR variable per queue pair */
+};
+
/** The data structure associated with each crypto device. */
struct rte_cryptodev {
dequeue_pkt_burst_t dequeue_burst;
__extension__
uint8_t attached : 1;
/**< Flag indicating the device is attached */
+
+ struct rte_cryptodev_cb_rcu *enq_cbs;
+ /**< User application callback for pre enqueue processing */
+
+ struct rte_cryptodev_cb_rcu *deq_cbs;
+ /**< User application callback for post dequeue processing */
} __rte_cache_aligned;
void *
{
struct rte_cryptodev *dev = &rte_cryptodevs[dev_id];
+ rte_cryptodev_trace_dequeue_burst(dev_id, qp_id, (void **)ops, nb_ops);
nb_ops = (*dev->dequeue_burst)
(dev->data->queue_pairs[qp_id], ops, nb_ops);
-
- rte_cryptodev_trace_dequeue_burst(dev_id, qp_id, (void **)ops, nb_ops);
+#ifdef RTE_CRYPTO_CALLBACKS
+ if (unlikely(dev->deq_cbs != NULL)) {
+ struct rte_cryptodev_cb_rcu *list;
+ struct rte_cryptodev_cb *cb;
+
+ /* __ATOMIC_RELEASE memory order was used when the
+ * call back was inserted into the list.
+ * Since there is a clear dependency between loading
+ * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
+ * not required.
+ */
+ list = &dev->deq_cbs[qp_id];
+ rte_rcu_qsbr_thread_online(list->qsbr, 0);
+ cb = __atomic_load_n(&list->next, __ATOMIC_RELAXED);
+
+ while (cb != NULL) {
+ nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops,
+ cb->arg);
+ cb = cb->next;
+ };
+
+ rte_rcu_qsbr_thread_offline(list->qsbr, 0);
+ }
+#endif
return nb_ops;
}
{
struct rte_cryptodev *dev = &rte_cryptodevs[dev_id];
+#ifdef RTE_CRYPTO_CALLBACKS
+ if (unlikely(dev->enq_cbs != NULL)) {
+ struct rte_cryptodev_cb_rcu *list;
+ struct rte_cryptodev_cb *cb;
+
+ /* __ATOMIC_RELEASE memory order was used when the
+ * call back was inserted into the list.
+ * Since there is a clear dependency between loading
+ * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
+ * not required.
+ */
+ list = &dev->enq_cbs[qp_id];
+ rte_rcu_qsbr_thread_online(list->qsbr, 0);
+ cb = __atomic_load_n(&list->next, __ATOMIC_RELAXED);
+
+ while (cb != NULL) {
+ nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops,
+ cb->arg);
+ cb = cb->next;
+ };
+
+ rte_rcu_qsbr_thread_offline(list->qsbr, 0);
+ }
+#endif
+
rte_cryptodev_trace_enqueue_burst(dev_id, qp_id, (void **)ops, nb_ops);
return (*dev->enqueue_burst)(
dev->data->queue_pairs[qp_id], ops, nb_ops);
rte_cryptodev_raw_dequeue_done(struct rte_crypto_raw_dp_ctx *ctx,
uint32_t n);
+/**
+ * Add a user callback for a given crypto device and queue pair which will be
+ * called on crypto ops enqueue.
+ *
+ * This API configures a function to be called for each burst of crypto ops
+ * received on a given crypto device queue pair. The return value is a pointer
+ * that can be used later to remove the callback using
+ * rte_cryptodev_remove_enq_callback().
+ *
+ * Callbacks registered by application would not survive
+ * rte_cryptodev_configure() as it reinitializes the callback list.
+ * It is user responsibility to remove all installed callbacks before
+ * calling rte_cryptodev_configure() to avoid possible memory leakage.
+ * Application is expected to call add API after rte_cryptodev_configure().
+ *
+ * Multiple functions can be registered per queue pair & they are called
+ * in the order they were added. The API does not restrict on maximum number
+ * of callbacks.
+ *
+ * @param dev_id The identifier of the device.
+ * @param qp_id The index of the queue pair on which ops are
+ * to be enqueued for processing. The value
+ * must be in the range [0, nb_queue_pairs - 1]
+ * previously supplied to
+ * *rte_cryptodev_configure*.
+ * @param cb_fn The callback function
+ * @param cb_arg A generic pointer parameter which will be passed
+ * to each invocation of the callback function on
+ * this crypto device and queue pair.
+ *
+ * @return
+ * - NULL on error & rte_errno will contain the error code.
+ * - On success, a pointer value which can later be used to remove the
+ * callback.
+ */
+
+__rte_experimental
+struct rte_cryptodev_cb *
+rte_cryptodev_add_enq_callback(uint8_t dev_id,
+ uint16_t qp_id,
+ rte_cryptodev_callback_fn cb_fn,
+ void *cb_arg);
+
+/**
+ * Remove a user callback function for given crypto device and queue pair.
+ *
+ * This function is used to remove enqueue callbacks that were added to a
+ * crypto device queue pair using rte_cryptodev_add_enq_callback().
+ *
+ *
+ *
+ * @param dev_id The identifier of the device.
+ * @param qp_id The index of the queue pair on which ops are
+ * to be enqueued. The value must be in the
+ * range [0, nb_queue_pairs - 1] previously
+ * supplied to *rte_cryptodev_configure*.
+ * @param cb Pointer to user supplied callback created via
+ * rte_cryptodev_add_enq_callback().
+ *
+ * @return
+ * - 0: Success. Callback was removed.
+ * - <0: The dev_id or the qp_id is out of range, or the callback
+ * is NULL or not found for the crypto device queue pair.
+ */
+
+__rte_experimental
+int rte_cryptodev_remove_enq_callback(uint8_t dev_id,
+ uint16_t qp_id,
+ struct rte_cryptodev_cb *cb);
+
+/**
+ * Add a user callback for a given crypto device and queue pair which will be
+ * called on crypto ops dequeue.
+ *
+ * This API configures a function to be called for each burst of crypto ops
+ * received on a given crypto device queue pair. The return value is a pointer
+ * that can be used later to remove the callback using
+ * rte_cryptodev_remove_deq_callback().
+ *
+ * Callbacks registered by application would not survive
+ * rte_cryptodev_configure() as it reinitializes the callback list.
+ * It is user responsibility to remove all installed callbacks before
+ * calling rte_cryptodev_configure() to avoid possible memory leakage.
+ * Application is expected to call add API after rte_cryptodev_configure().
+ *
+ * Multiple functions can be registered per queue pair & they are called
+ * in the order they were added. The API does not restrict on maximum number
+ * of callbacks.
+ *
+ * @param dev_id The identifier of the device.
+ * @param qp_id The index of the queue pair on which ops are
+ * to be dequeued. The value must be in the
+ * range [0, nb_queue_pairs - 1] previously
+ * supplied to *rte_cryptodev_configure*.
+ * @param cb_fn The callback function
+ * @param cb_arg A generic pointer parameter which will be passed
+ * to each invocation of the callback function on
+ * this crypto device and queue pair.
+ *
+ * @return
+ * - NULL on error & rte_errno will contain the error code.
+ * - On success, a pointer value which can later be used to remove the
+ * callback.
+ */
+
+__rte_experimental
+struct rte_cryptodev_cb *
+rte_cryptodev_add_deq_callback(uint8_t dev_id,
+ uint16_t qp_id,
+ rte_cryptodev_callback_fn cb_fn,
+ void *cb_arg);
+
+/**
+ * Remove a user callback function for given crypto device and queue pair.
+ *
+ * This function is used to remove dequeue callbacks that were added to a
+ * crypto device queue pair using rte_cryptodev_add_deq_callback().
+ *
+ *
+ *
+ * @param dev_id The identifier of the device.
+ * @param qp_id The index of the queue pair on which ops are
+ * to be dequeued. The value must be in the
+ * range [0, nb_queue_pairs - 1] previously
+ * supplied to *rte_cryptodev_configure*.
+ * @param cb Pointer to user supplied callback created via
+ * rte_cryptodev_add_deq_callback().
+ *
+ * @return
+ * - 0: Success. Callback was removed.
+ * - <0: The dev_id or the qp_id is out of range, or the callback
+ * is NULL or not found for the crypto device queue pair.
+ */
+__rte_experimental
+int rte_cryptodev_remove_deq_callback(uint8_t dev_id,
+ uint16_t qp_id,
+ struct rte_cryptodev_cb *cb);
+
#ifdef __cplusplus
}
#endif
rte_cryptodev_raw_enqueue;
rte_cryptodev_raw_enqueue_burst;
rte_cryptodev_raw_enqueue_done;
+
+ # added in 21.02
+ rte_cryptodev_add_deq_callback;
+ rte_cryptodev_add_enq_callback;
+ rte_cryptodev_remove_deq_callback;
+ rte_cryptodev_remove_enq_callback;
+
};