Commit <
ecaed092b677> ("ring: return remaining entry count when
dequeuing") changed the return of rte_ring_sc_dequeue_bulk.
This patch updates the scheduler to comply with this change.
Fixes:
8a48e039432b ("crypto/scheduler: optimize crypto op ordering")
Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
struct rte_crypto_op *op;
uint32_t nb_objs = rte_ring_count(order_ring);
uint32_t nb_ops_to_deq = 0;
struct rte_crypto_op *op;
uint32_t nb_objs = rte_ring_count(order_ring);
uint32_t nb_ops_to_deq = 0;
+ uint32_t nb_ops_deqd = 0;
if (nb_objs > nb_ops)
nb_objs = nb_ops;
if (nb_objs > nb_ops)
nb_objs = nb_ops;
- status = rte_ring_sc_dequeue_bulk(order_ring, (void **)ops,
- nb_ops_to_deq, NULL);
+ nb_ops_deqd = rte_ring_sc_dequeue_bulk(order_ring,
+ (void **)ops, nb_ops_to_deq, NULL);
- return (status == 0) ? nb_ops_to_deq : 0;
}
/** device specific operations function pointer structure */
extern struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops;
}
/** device specific operations function pointer structure */
extern struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops;