cryptodev: use new flat array in fast path API
authorAkhil Goyal <gakhil@marvell.com>
Wed, 20 Oct 2021 11:27:53 +0000 (16:57 +0530)
committerAkhil Goyal <gakhil@marvell.com>
Wed, 20 Oct 2021 13:33:16 +0000 (15:33 +0200)
Rework fast-path cryptodev functions to use rte_crypto_fp_ops[].
While it is an API/ABI breakage, this change is intended to be
transparent for both users (no changes in user app is required) and
PMD developers (no changes in PMD is required).

Signed-off-by: Akhil Goyal <gakhil@marvell.com>
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
lib/cryptodev/rte_cryptodev.h

index ce0dca7..56e3868 100644 (file)
@@ -1832,13 +1832,18 @@ static inline uint16_t
 rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
                struct rte_crypto_op **ops, uint16_t nb_ops)
 {
-       struct rte_cryptodev *dev = &rte_cryptodevs[dev_id];
+       const struct rte_crypto_fp_ops *fp_ops;
+       void *qp;
 
        rte_cryptodev_trace_dequeue_burst(dev_id, qp_id, (void **)ops, nb_ops);
-       nb_ops = (*dev->dequeue_burst)
-                       (dev->data->queue_pairs[qp_id], ops, nb_ops);
+
+       fp_ops = &rte_crypto_fp_ops[dev_id];
+       qp = fp_ops->qp.data[qp_id];
+
+       nb_ops = fp_ops->dequeue_burst(qp, ops, nb_ops);
+
 #ifdef RTE_CRYPTO_CALLBACKS
-       if (unlikely(dev->deq_cbs != NULL)) {
+       if (unlikely(fp_ops->qp.deq_cb != NULL)) {
                struct rte_cryptodev_cb_rcu *list;
                struct rte_cryptodev_cb *cb;
 
@@ -1848,7 +1853,7 @@ rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
                 * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
                 * not required.
                 */
-               list = &dev->deq_cbs[qp_id];
+               list = &fp_ops->qp.deq_cb[qp_id];
                rte_rcu_qsbr_thread_online(list->qsbr, 0);
                cb = __atomic_load_n(&list->next, __ATOMIC_RELAXED);
 
@@ -1899,10 +1904,13 @@ static inline uint16_t
 rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
                struct rte_crypto_op **ops, uint16_t nb_ops)
 {
-       struct rte_cryptodev *dev = &rte_cryptodevs[dev_id];
+       const struct rte_crypto_fp_ops *fp_ops;
+       void *qp;
 
+       fp_ops = &rte_crypto_fp_ops[dev_id];
+       qp = fp_ops->qp.data[qp_id];
 #ifdef RTE_CRYPTO_CALLBACKS
-       if (unlikely(dev->enq_cbs != NULL)) {
+       if (unlikely(fp_ops->qp.enq_cb != NULL)) {
                struct rte_cryptodev_cb_rcu *list;
                struct rte_cryptodev_cb *cb;
 
@@ -1912,7 +1920,7 @@ rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
                 * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
                 * not required.
                 */
-               list = &dev->enq_cbs[qp_id];
+               list = &fp_ops->qp.enq_cb[qp_id];
                rte_rcu_qsbr_thread_online(list->qsbr, 0);
                cb = __atomic_load_n(&list->next, __ATOMIC_RELAXED);
 
@@ -1927,8 +1935,7 @@ rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
 #endif
 
        rte_cryptodev_trace_enqueue_burst(dev_id, qp_id, (void **)ops, nb_ops);
-       return (*dev->enqueue_burst)(
-                       dev->data->queue_pairs[qp_id], ops, nb_ops);
+       return fp_ops->enqueue_burst(qp, ops, nb_ops);
 }