From 8b6c9aee90a90705c40e0593012a2c2a4bec7d52 Mon Sep 17 00:00:00 2001 From: Konstantin Ananyev Date: Fri, 22 May 2020 12:57:36 +0100 Subject: [PATCH] crypto/scheduler: use ring peek API scheduler PMD uses its own hand-made peek functions that directly access rte_ring internals. As now rte_ring has an API for that type of functionality - change scheduler PMD to use API provided by rte_ring. Signed-off-by: Konstantin Ananyev Acked-by: Fan Zhang --- .../crypto/scheduler/scheduler_multicore.c | 28 ++++++++----------- .../crypto/scheduler/scheduler_pmd_private.h | 28 +++++++------------ 2 files changed, 21 insertions(+), 35 deletions(-) diff --git a/drivers/crypto/scheduler/scheduler_multicore.c b/drivers/crypto/scheduler/scheduler_multicore.c index 7808e9a345..2d6790bb3b 100644 --- a/drivers/crypto/scheduler/scheduler_multicore.c +++ b/drivers/crypto/scheduler/scheduler_multicore.c @@ -110,31 +110,25 @@ static uint16_t schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) { - struct rte_ring *order_ring = ((struct scheduler_qp_ctx *)qp)->order_ring; + struct rte_ring *order_ring = + ((struct scheduler_qp_ctx *)qp)->order_ring; struct rte_crypto_op *op; - uint32_t nb_objs = rte_ring_count(order_ring); - uint32_t nb_ops_to_deq = 0; - uint32_t nb_ops_deqd = 0; - - if (nb_objs > nb_ops) - nb_objs = nb_ops; + uint32_t nb_objs, nb_ops_to_deq; - while (nb_ops_to_deq < nb_objs) { - SCHEDULER_GET_RING_OBJ(order_ring, nb_ops_to_deq, op); + nb_objs = rte_ring_dequeue_burst_start(order_ring, (void **)ops, + nb_ops, NULL); + if (nb_objs == 0) + return 0; + for (nb_ops_to_deq = 0; nb_ops_to_deq != nb_objs; nb_ops_to_deq++) { + op = ops[nb_ops_to_deq]; if (!(op->status & CRYPTO_OP_STATUS_BIT_COMPLETE)) break; - op->status &= ~CRYPTO_OP_STATUS_BIT_COMPLETE; - nb_ops_to_deq++; - } - - if (nb_ops_to_deq) { - nb_ops_deqd = rte_ring_sc_dequeue_bulk(order_ring, - (void **)ops, nb_ops_to_deq, NULL); } - return nb_ops_deqd; + rte_ring_dequeue_finish(order_ring, nb_ops_to_deq); + return nb_ops_to_deq; } static int diff --git a/drivers/crypto/scheduler/scheduler_pmd_private.h b/drivers/crypto/scheduler/scheduler_pmd_private.h index 3ed480c183..e1531d1da4 100644 --- a/drivers/crypto/scheduler/scheduler_pmd_private.h +++ b/drivers/crypto/scheduler/scheduler_pmd_private.h @@ -80,36 +80,28 @@ scheduler_order_insert(struct rte_ring *order_ring, rte_ring_sp_enqueue_burst(order_ring, (void **)ops, nb_ops, NULL); } -#define SCHEDULER_GET_RING_OBJ(order_ring, pos, op) do { \ - struct rte_crypto_op **ring = (void *)&order_ring[1]; \ - op = ring[(order_ring->cons.head + pos) & order_ring->mask]; \ -} while (0) - static __rte_always_inline uint16_t scheduler_order_drain(struct rte_ring *order_ring, struct rte_crypto_op **ops, uint16_t nb_ops) { struct rte_crypto_op *op; - uint32_t nb_objs = rte_ring_count(order_ring); - uint32_t nb_ops_to_deq = 0; - uint32_t nb_ops_deqd = 0; + uint32_t nb_objs, nb_ops_to_deq; - if (nb_objs > nb_ops) - nb_objs = nb_ops; + nb_objs = rte_ring_dequeue_burst_start(order_ring, (void **)ops, + nb_ops, NULL); + if (nb_objs == 0) + return 0; - while (nb_ops_to_deq < nb_objs) { - SCHEDULER_GET_RING_OBJ(order_ring, nb_ops_to_deq, op); + for (nb_ops_to_deq = 0; nb_ops_to_deq != nb_objs; nb_ops_to_deq++) { + op = ops[nb_ops_to_deq]; if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) break; - nb_ops_to_deq++; } - if (nb_ops_to_deq) - nb_ops_deqd = rte_ring_sc_dequeue_bulk(order_ring, - (void **)ops, nb_ops_to_deq, NULL); - - return nb_ops_deqd; + rte_ring_dequeue_finish(order_ring, nb_ops_to_deq); + return nb_ops_to_deq; } + /** device specific operations function pointer structure */ extern struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops; -- 2.20.1