crypto/scheduler: use ring peek API
authorKonstantin Ananyev <konstantin.ananyev@intel.com>
Fri, 22 May 2020 11:57:36 +0000 (12:57 +0100)
committerAkhil Goyal <akhil.goyal@nxp.com>
Sat, 18 Jul 2020 21:09:00 +0000 (23:09 +0200)
scheduler PMD uses its own hand-made peek functions
that directly access rte_ring internals.
As now rte_ring has an API for that type of functionality -
change scheduler PMD to use API provided by rte_ring.

Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>
drivers/crypto/scheduler/scheduler_multicore.c
drivers/crypto/scheduler/scheduler_pmd_private.h

index 7808e9a..2d6790b 100644 (file)
@@ -110,31 +110,25 @@ static uint16_t
 schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
                uint16_t nb_ops)
 {
-       struct rte_ring *order_ring = ((struct scheduler_qp_ctx *)qp)->order_ring;
+       struct rte_ring *order_ring =
+               ((struct scheduler_qp_ctx *)qp)->order_ring;
        struct rte_crypto_op *op;
-       uint32_t nb_objs = rte_ring_count(order_ring);
-       uint32_t nb_ops_to_deq = 0;
-       uint32_t nb_ops_deqd = 0;
-
-       if (nb_objs > nb_ops)
-               nb_objs = nb_ops;
+       uint32_t nb_objs, nb_ops_to_deq;
 
-       while (nb_ops_to_deq < nb_objs) {
-               SCHEDULER_GET_RING_OBJ(order_ring, nb_ops_to_deq, op);
+       nb_objs = rte_ring_dequeue_burst_start(order_ring, (void **)ops,
+               nb_ops, NULL);
+       if (nb_objs == 0)
+               return 0;
 
+       for (nb_ops_to_deq = 0; nb_ops_to_deq != nb_objs; nb_ops_to_deq++) {
+               op = ops[nb_ops_to_deq];
                if (!(op->status & CRYPTO_OP_STATUS_BIT_COMPLETE))
                        break;
-
                op->status &= ~CRYPTO_OP_STATUS_BIT_COMPLETE;
-               nb_ops_to_deq++;
-       }
-
-       if (nb_ops_to_deq) {
-               nb_ops_deqd = rte_ring_sc_dequeue_bulk(order_ring,
-                               (void **)ops, nb_ops_to_deq, NULL);
        }
 
-       return nb_ops_deqd;
+       rte_ring_dequeue_finish(order_ring, nb_ops_to_deq);
+       return nb_ops_to_deq;
 }
 
 static int
index 3ed480c..e1531d1 100644 (file)
@@ -80,36 +80,28 @@ scheduler_order_insert(struct rte_ring *order_ring,
        rte_ring_sp_enqueue_burst(order_ring, (void **)ops, nb_ops, NULL);
 }
 
-#define SCHEDULER_GET_RING_OBJ(order_ring, pos, op) do {            \
-       struct rte_crypto_op **ring = (void *)&order_ring[1];     \
-       op = ring[(order_ring->cons.head + pos) & order_ring->mask]; \
-} while (0)
-
 static __rte_always_inline uint16_t
 scheduler_order_drain(struct rte_ring *order_ring,
                struct rte_crypto_op **ops, uint16_t nb_ops)
 {
        struct rte_crypto_op *op;
-       uint32_t nb_objs = rte_ring_count(order_ring);
-       uint32_t nb_ops_to_deq = 0;
-       uint32_t nb_ops_deqd = 0;
+       uint32_t nb_objs, nb_ops_to_deq;
 
-       if (nb_objs > nb_ops)
-               nb_objs = nb_ops;
+       nb_objs = rte_ring_dequeue_burst_start(order_ring, (void **)ops,
+               nb_ops, NULL);
+       if (nb_objs == 0)
+               return 0;
 
-       while (nb_ops_to_deq < nb_objs) {
-               SCHEDULER_GET_RING_OBJ(order_ring, nb_ops_to_deq, op);
+       for (nb_ops_to_deq = 0; nb_ops_to_deq != nb_objs; nb_ops_to_deq++) {
+               op = ops[nb_ops_to_deq];
                if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
                        break;
-               nb_ops_to_deq++;
        }
 
-       if (nb_ops_to_deq)
-               nb_ops_deqd = rte_ring_sc_dequeue_bulk(order_ring,
-                               (void **)ops, nb_ops_to_deq, NULL);
-
-       return nb_ops_deqd;
+       rte_ring_dequeue_finish(order_ring, nb_ops_to_deq);
+       return nb_ops_to_deq;
 }
+
 /** device specific operations function pointer structure */
 extern struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops;