schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
uint16_t nb_ops)
{
- struct rte_ring *order_ring = ((struct scheduler_qp_ctx *)qp)->order_ring;
+ struct rte_ring *order_ring =
+ ((struct scheduler_qp_ctx *)qp)->order_ring;
struct rte_crypto_op *op;
- uint32_t nb_objs = rte_ring_count(order_ring);
- uint32_t nb_ops_to_deq = 0;
- uint32_t nb_ops_deqd = 0;
-
- if (nb_objs > nb_ops)
- nb_objs = nb_ops;
+ uint32_t nb_objs, nb_ops_to_deq;
- while (nb_ops_to_deq < nb_objs) {
- SCHEDULER_GET_RING_OBJ(order_ring, nb_ops_to_deq, op);
+ nb_objs = rte_ring_dequeue_burst_start(order_ring, (void **)ops,
+ nb_ops, NULL);
+ if (nb_objs == 0)
+ return 0;
+ for (nb_ops_to_deq = 0; nb_ops_to_deq != nb_objs; nb_ops_to_deq++) {
+ op = ops[nb_ops_to_deq];
if (!(op->status & CRYPTO_OP_STATUS_BIT_COMPLETE))
break;
-
op->status &= ~CRYPTO_OP_STATUS_BIT_COMPLETE;
- nb_ops_to_deq++;
- }
-
- if (nb_ops_to_deq) {
- nb_ops_deqd = rte_ring_sc_dequeue_bulk(order_ring,
- (void **)ops, nb_ops_to_deq, NULL);
}
- return nb_ops_deqd;
+ rte_ring_dequeue_finish(order_ring, nb_ops_to_deq);
+ return nb_ops_to_deq;
}
static int
rte_ring_sp_enqueue_burst(order_ring, (void **)ops, nb_ops, NULL);
}
-#define SCHEDULER_GET_RING_OBJ(order_ring, pos, op) do { \
- struct rte_crypto_op **ring = (void *)&order_ring[1]; \
- op = ring[(order_ring->cons.head + pos) & order_ring->mask]; \
-} while (0)
-
static __rte_always_inline uint16_t
scheduler_order_drain(struct rte_ring *order_ring,
struct rte_crypto_op **ops, uint16_t nb_ops)
{
struct rte_crypto_op *op;
- uint32_t nb_objs = rte_ring_count(order_ring);
- uint32_t nb_ops_to_deq = 0;
- uint32_t nb_ops_deqd = 0;
+ uint32_t nb_objs, nb_ops_to_deq;
- if (nb_objs > nb_ops)
- nb_objs = nb_ops;
+ nb_objs = rte_ring_dequeue_burst_start(order_ring, (void **)ops,
+ nb_ops, NULL);
+ if (nb_objs == 0)
+ return 0;
- while (nb_ops_to_deq < nb_objs) {
- SCHEDULER_GET_RING_OBJ(order_ring, nb_ops_to_deq, op);
+ for (nb_ops_to_deq = 0; nb_ops_to_deq != nb_objs; nb_ops_to_deq++) {
+ op = ops[nb_ops_to_deq];
if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
break;
- nb_ops_to_deq++;
}
- if (nb_ops_to_deq)
- nb_ops_deqd = rte_ring_sc_dequeue_bulk(order_ring,
- (void **)ops, nb_ops_to_deq, NULL);
-
- return nb_ops_deqd;
+ rte_ring_dequeue_finish(order_ring, nb_ops_to_deq);
+ return nb_ops_to_deq;
}
+
/** device specific operations function pointer structure */
extern struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops;