* removed the build-time setting ``CONFIG_RTE_RING_PAUSE_REP_COUNT``
* removed the function ``rte_ring_set_water_mark`` as part of a general
removal of watermarks support in the library.
+ * changed the return value of the enqueue and dequeue bulk functions to
+ match that of the burst equivalents. In all cases, ring functions which
+ operate on multiple packets now return the number of elements enqueued
+ or dequeued, as appropriate. The updated functions are:
+
+ - ``rte_ring_mp_enqueue_bulk``
+ - ``rte_ring_sp_enqueue_bulk``
+ - ``rte_ring_enqueue_bulk``
+ - ``rte_ring_mc_dequeue_bulk``
+ - ``rte_ring_sc_dequeue_bulk``
+ - ``rte_ring_dequeue_bulk``
ABI Changes
-----------
cl = &nodes[node];
if (rte_ring_enqueue_bulk(cl->rx_q, (void **)cl_rx_buf[node].buffer,
- cl_rx_buf[node].count) != 0){
+ cl_rx_buf[node].count) != cl_rx_buf[node].count){
for (j = 0; j < cl_rx_buf[node].count; j++)
rte_pktmbuf_free(cl_rx_buf[node].buffer[j]);
cl->stats.rx_drop += cl_rx_buf[node].count;
(void **) lp->rx.mbuf_out[worker].array,
bsz);
- if (unlikely(ret == -ENOBUFS)) {
+ if (unlikely(ret == 0)) {
uint32_t k;
for (k = 0; k < bsz; k ++) {
struct rte_mbuf *m = lp->rx.mbuf_out[worker].array[k];
(void **) lp->rx.mbuf_out[worker].array,
lp->rx.mbuf_out[worker].n_mbufs);
- if (unlikely(ret < 0)) {
+ if (unlikely(ret == 0)) {
uint32_t k;
for (k = 0; k < lp->rx.mbuf_out[worker].n_mbufs; k ++) {
struct rte_mbuf *pkt_to_free = lp->rx.mbuf_out[worker].array[k];
(void **) &lp->tx.mbuf_out[port].array[n_mbufs],
bsz_rd);
- if (unlikely(ret == -ENOENT)) {
+ if (unlikely(ret == 0))
continue;
- }
n_mbufs += bsz_rd;
(void **) lp->mbuf_in.array,
bsz_rd);
- if (unlikely(ret == -ENOENT)) {
+ if (unlikely(ret == 0))
continue;
- }
#if APP_WORKER_DROP_ALL_PACKETS
for (j = 0; j < bsz_rd; j ++) {
#if APP_STATS
lp->rings_out_iters[port] ++;
- if (ret == 0) {
+ if (ret > 0) {
lp->rings_out_count[port] += 1;
}
if (lp->rings_out_iters[port] == APP_STATS){
}
#endif
- if (unlikely(ret == -ENOBUFS)) {
+ if (unlikely(ret == 0)) {
uint32_t k;
for (k = 0; k < bsz_wr; k ++) {
struct rte_mbuf *pkt_to_free = lp->mbuf_out[port].array[k];
(void **) lp->mbuf_out[port].array,
lp->mbuf_out[port].n_mbufs);
- if (unlikely(ret < 0)) {
+ if (unlikely(ret == 0)) {
uint32_t k;
for (k = 0; k < lp->mbuf_out[port].n_mbufs; k ++) {
struct rte_mbuf *pkt_to_free = lp->mbuf_out[port].array[k];
printf("[Press Ctrl-C to quit ...]\n");
for (;;) {
- uint16_t i, rx_pkts = PKT_READ_SIZE;
+ uint16_t i, rx_pkts;
uint8_t port;
- /* try dequeuing max possible packets first, if that fails, get the
- * most we can. Loop body should only execute once, maximum */
- while (rx_pkts > 0 &&
- unlikely(rte_ring_dequeue_bulk(rx_ring, pkts, rx_pkts) != 0))
- rx_pkts = (uint16_t)RTE_MIN(rte_ring_count(rx_ring), PKT_READ_SIZE);
+ rx_pkts = rte_ring_dequeue_burst(rx_ring, pkts, PKT_READ_SIZE);
if (unlikely(rx_pkts == 0)){
if (need_flush)
cl = &clients[client];
if (rte_ring_enqueue_bulk(cl->rx_q, (void **)cl_rx_buf[client].buffer,
- cl_rx_buf[client].count) != 0){
+ cl_rx_buf[client].count) == 0){
for (j = 0; j < cl_rx_buf[client].count; j++)
rte_pktmbuf_free(cl_rx_buf[client].buffer[j]);
cl->stats.rx_drop += cl_rx_buf[client].count;
}
if (unlikely(rte_ring_sp_enqueue_bulk(conf->rx_ring,
- (void **)rx_mbufs, nb_rx) != 0)) {
+ (void **)rx_mbufs, nb_rx) == 0)) {
for(i = 0; i < nb_rx; i++) {
rte_pktmbuf_free(rx_mbufs[i]);
while ((conf = confs[conf_idx])) {
retval = rte_ring_sc_dequeue_bulk(conf->tx_ring, (void **)mbufs,
burst_conf.qos_dequeue);
- if (likely(retval == 0)) {
+ if (likely(retval != 0)) {
app_send_packets(conf, mbufs, burst_conf.qos_dequeue);
conf->counter = 0; /* reset empty read loop counter */
nb_pkt = rte_sched_port_dequeue(conf->sched_port, mbufs,
burst_conf.qos_dequeue);
if (likely(nb_pkt > 0))
- while (rte_ring_sp_enqueue_bulk(conf->tx_ring, (void **)mbufs, nb_pkt) != 0);
+ while (rte_ring_sp_enqueue_bulk(conf->tx_ring,
+ (void **)mbufs, nb_pkt) == 0)
+ ; /* empty body */
conf_idx++;
if (confs[conf_idx] == NULL)
*/
while (rx_pkts > 0 &&
unlikely(rte_ring_dequeue_bulk(rx_ring, pkts,
- rx_pkts) != 0))
+ rx_pkts) == 0))
rx_pkts = (uint16_t)RTE_MIN(rte_ring_count(rx_ring),
PKT_READ_SIZE);
cl = &nodes[node];
if (rte_ring_enqueue_bulk(cl->rx_q, (void **)cl_rx_buf[node].buffer,
- cl_rx_buf[node].count) != 0){
+ cl_rx_buf[node].count) != cl_rx_buf[node].count){
for (j = 0; j < cl_rx_buf[node].count; j++)
rte_pktmbuf_free(cl_rx_buf[node].buffer[j]);
cl->stats.rx_drop += cl_rx_buf[node].count;
common_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
unsigned n)
{
- return rte_ring_mp_enqueue_bulk(mp->pool_data, obj_table, n);
+ return rte_ring_mp_enqueue_bulk(mp->pool_data,
+ obj_table, n) == 0 ? -ENOBUFS : 0;
}
static int
common_ring_sp_enqueue(struct rte_mempool *mp, void * const *obj_table,
unsigned n)
{
- return rte_ring_sp_enqueue_bulk(mp->pool_data, obj_table, n);
+ return rte_ring_sp_enqueue_bulk(mp->pool_data,
+ obj_table, n) == 0 ? -ENOBUFS : 0;
}
static int
common_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
{
- return rte_ring_mc_dequeue_bulk(mp->pool_data, obj_table, n);
+ return rte_ring_mc_dequeue_bulk(mp->pool_data,
+ obj_table, n) == 0 ? -ENOBUFS : 0;
}
static int
common_ring_sc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
{
- return rte_ring_sc_dequeue_bulk(mp->pool_data, obj_table, n);
+ return rte_ring_sc_dequeue_bulk(mp->pool_data,
+ obj_table, n) == 0 ? -ENOBUFS : 0;
}
static unsigned
* RTE_RING_QUEUE_FIXED: Enqueue a fixed number of items from a ring
* RTE_RING_QUEUE_VARIABLE: Enqueue as many items a possible from ring
* @return
- * Depend on the behavior value
- * if behavior = RTE_RING_QUEUE_FIXED
- * - 0: Success; objects enqueue.
- * - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.
- * if behavior = RTE_RING_QUEUE_VARIABLE
- * - n: Actual number of objects enqueued.
+ * Actual number of objects enqueued.
+ * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/
-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
__rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
unsigned n, enum rte_ring_queue_behavior behavior)
{
/* check that we have enough room in ring */
if (unlikely(n > free_entries)) {
if (behavior == RTE_RING_QUEUE_FIXED)
- return -ENOBUFS;
+ return 0;
else {
/* No free entry available */
if (unlikely(free_entries == 0))
rte_pause();
r->prod.tail = prod_next;
- return (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
+ return n;
}
/**
* RTE_RING_QUEUE_FIXED: Enqueue a fixed number of items from a ring
* RTE_RING_QUEUE_VARIABLE: Enqueue as many items a possible from ring
* @return
- * Depend on the behavior value
- * if behavior = RTE_RING_QUEUE_FIXED
- * - 0: Success; objects enqueue.
- * - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.
- * if behavior = RTE_RING_QUEUE_VARIABLE
- * - n: Actual number of objects enqueued.
+ * Actual number of objects enqueued.
+ * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/
-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
__rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
unsigned n, enum rte_ring_queue_behavior behavior)
{
/* check that we have enough room in ring */
if (unlikely(n > free_entries)) {
if (behavior == RTE_RING_QUEUE_FIXED)
- return -ENOBUFS;
+ return 0;
else {
/* No free entry available */
if (unlikely(free_entries == 0))
rte_smp_wmb();
r->prod.tail = prod_next;
- return (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
+ return n;
}
/**
* RTE_RING_QUEUE_FIXED: Dequeue a fixed number of items from a ring
* RTE_RING_QUEUE_VARIABLE: Dequeue as many items a possible from ring
* @return
- * Depend on the behavior value
- * if behavior = RTE_RING_QUEUE_FIXED
- * - 0: Success; objects dequeued.
- * - -ENOENT: Not enough entries in the ring to dequeue; no object is
- * dequeued.
- * if behavior = RTE_RING_QUEUE_VARIABLE
- * - n: Actual number of objects dequeued.
+ * - Actual number of objects dequeued.
+ * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/
-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
__rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
unsigned n, enum rte_ring_queue_behavior behavior)
{
/* Set the actual entries for dequeue */
if (n > entries) {
if (behavior == RTE_RING_QUEUE_FIXED)
- return -ENOENT;
+ return 0;
else {
if (unlikely(entries == 0))
return 0;
r->cons.tail = cons_next;
- return behavior == RTE_RING_QUEUE_FIXED ? 0 : n;
+ return n;
}
/**
* RTE_RING_QUEUE_FIXED: Dequeue a fixed number of items from a ring
* RTE_RING_QUEUE_VARIABLE: Dequeue as many items a possible from ring
* @return
- * Depend on the behavior value
- * if behavior = RTE_RING_QUEUE_FIXED
- * - 0: Success; objects dequeued.
- * - -ENOENT: Not enough entries in the ring to dequeue; no object is
- * dequeued.
- * if behavior = RTE_RING_QUEUE_VARIABLE
- * - n: Actual number of objects dequeued.
+ * - Actual number of objects dequeued.
+ * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/
-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
__rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
unsigned n, enum rte_ring_queue_behavior behavior)
{
if (n > entries) {
if (behavior == RTE_RING_QUEUE_FIXED)
- return -ENOENT;
+ return 0;
else {
if (unlikely(entries == 0))
return 0;
rte_smp_rmb();
r->cons.tail = cons_next;
- return behavior == RTE_RING_QUEUE_FIXED ? 0 : n;
+ return n;
}
/**
* @param n
* The number of objects to add in the ring from the obj_table.
* @return
- * - 0: Success; objects enqueue.
- * - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.
+ * The number of objects enqueued, either 0 or n
*/
-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
unsigned n)
{
* @param n
* The number of objects to add in the ring from the obj_table.
* @return
- * - 0: Success; objects enqueued.
- * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
+ * The number of objects enqueued, either 0 or n
*/
-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
unsigned n)
{
* @param n
* The number of objects to add in the ring from the obj_table.
* @return
- * - 0: Success; objects enqueued.
- * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
+ * The number of objects enqueued, either 0 or n
*/
-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
unsigned n)
{
static inline int __attribute__((always_inline))
rte_ring_mp_enqueue(struct rte_ring *r, void *obj)
{
- return rte_ring_mp_enqueue_bulk(r, &obj, 1);
+ return rte_ring_mp_enqueue_bulk(r, &obj, 1) ? 0 : -ENOBUFS;
}
/**
static inline int __attribute__((always_inline))
rte_ring_sp_enqueue(struct rte_ring *r, void *obj)
{
- return rte_ring_sp_enqueue_bulk(r, &obj, 1);
+ return rte_ring_sp_enqueue_bulk(r, &obj, 1) ? 0 : -ENOBUFS;
}
/**
static inline int __attribute__((always_inline))
rte_ring_enqueue(struct rte_ring *r, void *obj)
{
- if (r->prod.single)
- return rte_ring_sp_enqueue(r, obj);
- else
- return rte_ring_mp_enqueue(r, obj);
+ return rte_ring_enqueue_bulk(r, &obj, 1) ? 0 : -ENOBUFS;
}
/**
* @param n
* The number of objects to dequeue from the ring to the obj_table.
* @return
- * - 0: Success; objects dequeued.
- * - -ENOENT: Not enough entries in the ring to dequeue; no object is
- * dequeued.
+ * The number of objects dequeued, either 0 or n
*/
-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
{
return __rte_ring_mc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
* The number of objects to dequeue from the ring to the obj_table,
* must be strictly positive.
* @return
- * - 0: Success; objects dequeued.
- * - -ENOENT: Not enough entries in the ring to dequeue; no object is
- * dequeued.
+ * The number of objects dequeued, either 0 or n
*/
-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
{
return __rte_ring_sc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
* @param n
* The number of objects to dequeue from the ring to the obj_table.
* @return
- * - 0: Success; objects dequeued.
- * - -ENOENT: Not enough entries in the ring to dequeue, no object is
- * dequeued.
+ * The number of objects dequeued, either 0 or n
*/
-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
{
if (r->cons.single)
static inline int __attribute__((always_inline))
rte_ring_mc_dequeue(struct rte_ring *r, void **obj_p)
{
- return rte_ring_mc_dequeue_bulk(r, obj_p, 1);
+ return rte_ring_mc_dequeue_bulk(r, obj_p, 1) ? 0 : -ENOBUFS;
}
/**
static inline int __attribute__((always_inline))
rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p)
{
- return rte_ring_sc_dequeue_bulk(r, obj_p, 1);
+ return rte_ring_sc_dequeue_bulk(r, obj_p, 1) ? 0 : -ENOBUFS;
}
/**
static inline int __attribute__((always_inline))
rte_ring_dequeue(struct rte_ring *r, void **obj_p)
{
- if (r->cons.single)
- return rte_ring_sc_dequeue(r, obj_p);
- else
- return rte_ring_mc_dequeue(r, obj_p);
+ return rte_ring_dequeue_bulk(r, obj_p, 1) ? 0 : -ENOBUFS;
}
/**
app.rings_rx[i],
(void **) app.mbuf_rx.array,
n_mbufs);
- } while (ret < 0);
+ } while (ret == 0);
}
}
app.rings_rx[i],
(void **) app.mbuf_rx.array,
n_mbufs);
- } while (ret < 0);
+ } while (ret == 0);
}
}
(void **) worker_mbuf->array,
app.burst_size_worker_read);
- if (ret == -ENOENT)
+ if (ret == 0)
continue;
do {
app.rings_tx[i ^ 1],
(void **) worker_mbuf->array,
app.burst_size_worker_write);
- } while (ret < 0);
+ } while (ret == 0);
}
}
(void **) &app.mbuf_tx[i].array[n_mbufs],
app.burst_size_tx_read);
- if (ret == -ENOENT)
+ if (ret == 0)
continue;
n_mbufs += app.burst_size_tx_read;
rand = RTE_MAX(rte_rand() % RING_SIZE, 1UL);
printf("%s: iteration %u, random shift: %u;\n",
__func__, i, rand);
- TEST_RING_VERIFY(-ENOBUFS != rte_ring_enqueue_bulk(r, src,
- rand));
- TEST_RING_VERIFY(0 == rte_ring_dequeue_bulk(r, dst, rand));
+ TEST_RING_VERIFY(rte_ring_enqueue_bulk(r, src, rand) != 0);
+ TEST_RING_VERIFY(rte_ring_dequeue_bulk(r, dst, rand) == rand);
/* fill the ring */
- TEST_RING_VERIFY(-ENOBUFS != rte_ring_enqueue_bulk(r, src,
- rsz));
+ TEST_RING_VERIFY(rte_ring_enqueue_bulk(r, src, rsz) != 0);
TEST_RING_VERIFY(0 == rte_ring_free_count(r));
TEST_RING_VERIFY(rsz == rte_ring_count(r));
TEST_RING_VERIFY(rte_ring_full(r));
TEST_RING_VERIFY(0 == rte_ring_empty(r));
/* empty the ring */
- TEST_RING_VERIFY(0 == rte_ring_dequeue_bulk(r, dst, rsz));
+ TEST_RING_VERIFY(rte_ring_dequeue_bulk(r, dst, rsz) == rsz);
TEST_RING_VERIFY(rsz == rte_ring_free_count(r));
TEST_RING_VERIFY(0 == rte_ring_count(r));
TEST_RING_VERIFY(0 == rte_ring_full(r));
printf("enqueue 1 obj\n");
ret = rte_ring_sp_enqueue_bulk(r, cur_src, 1);
cur_src += 1;
- if (ret != 0)
+ if (ret == 0)
goto fail;
printf("enqueue 2 objs\n");
ret = rte_ring_sp_enqueue_bulk(r, cur_src, 2);
cur_src += 2;
- if (ret != 0)
+ if (ret == 0)
goto fail;
printf("enqueue MAX_BULK objs\n");
ret = rte_ring_sp_enqueue_bulk(r, cur_src, MAX_BULK);
cur_src += MAX_BULK;
- if (ret != 0)
+ if (ret == 0)
goto fail;
printf("dequeue 1 obj\n");
ret = rte_ring_sc_dequeue_bulk(r, cur_dst, 1);
cur_dst += 1;
- if (ret != 0)
+ if (ret == 0)
goto fail;
printf("dequeue 2 objs\n");
ret = rte_ring_sc_dequeue_bulk(r, cur_dst, 2);
cur_dst += 2;
- if (ret != 0)
+ if (ret == 0)
goto fail;
printf("dequeue MAX_BULK objs\n");
ret = rte_ring_sc_dequeue_bulk(r, cur_dst, MAX_BULK);
cur_dst += MAX_BULK;
- if (ret != 0)
+ if (ret == 0)
goto fail;
/* check data */
printf("enqueue 1 obj\n");
ret = rte_ring_mp_enqueue_bulk(r, cur_src, 1);
cur_src += 1;
- if (ret != 0)
+ if (ret == 0)
goto fail;
printf("enqueue 2 objs\n");
ret = rte_ring_mp_enqueue_bulk(r, cur_src, 2);
cur_src += 2;
- if (ret != 0)
+ if (ret == 0)
goto fail;
printf("enqueue MAX_BULK objs\n");
ret = rte_ring_mp_enqueue_bulk(r, cur_src, MAX_BULK);
cur_src += MAX_BULK;
- if (ret != 0)
+ if (ret == 0)
goto fail;
printf("dequeue 1 obj\n");
ret = rte_ring_mc_dequeue_bulk(r, cur_dst, 1);
cur_dst += 1;
- if (ret != 0)
+ if (ret == 0)
goto fail;
printf("dequeue 2 objs\n");
ret = rte_ring_mc_dequeue_bulk(r, cur_dst, 2);
cur_dst += 2;
- if (ret != 0)
+ if (ret == 0)
goto fail;
printf("dequeue MAX_BULK objs\n");
ret = rte_ring_mc_dequeue_bulk(r, cur_dst, MAX_BULK);
cur_dst += MAX_BULK;
- if (ret != 0)
+ if (ret == 0)
goto fail;
/* check data */
for (i = 0; i<RING_SIZE/MAX_BULK; i++) {
ret = rte_ring_mp_enqueue_bulk(r, cur_src, MAX_BULK);
cur_src += MAX_BULK;
- if (ret != 0)
+ if (ret == 0)
goto fail;
ret = rte_ring_mc_dequeue_bulk(r, cur_dst, MAX_BULK);
cur_dst += MAX_BULK;
- if (ret != 0)
+ if (ret == 0)
goto fail;
}
ret = rte_ring_enqueue_bulk(r, cur_src, num_elems);
cur_src += num_elems;
- if (ret != 0) {
+ if (ret == 0) {
printf("Cannot enqueue\n");
goto fail;
}
ret = rte_ring_enqueue_bulk(r, cur_src, num_elems);
cur_src += num_elems;
- if (ret != 0) {
+ if (ret == 0) {
printf("Cannot enqueue\n");
goto fail;
}
ret = rte_ring_dequeue_bulk(r, cur_dst, num_elems);
cur_dst += num_elems;
- if (ret != 0) {
+ if (ret == 0) {
printf("Cannot dequeue\n");
goto fail;
}
ret = rte_ring_dequeue_bulk(r, cur_dst, num_elems);
cur_dst += num_elems;
- if (ret != 0) {
+ if (ret == 0) {
printf("Cannot dequeue2\n");
goto fail;
}
const uint64_t sp_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- while (rte_ring_sp_enqueue_bulk(r, burst, size) != 0)
+ while (rte_ring_sp_enqueue_bulk(r, burst, size) == 0)
rte_pause();
const uint64_t sp_end = rte_rdtsc();
const uint64_t mp_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- while (rte_ring_mp_enqueue_bulk(r, burst, size) != 0)
+ while (rte_ring_mp_enqueue_bulk(r, burst, size) == 0)
rte_pause();
const uint64_t mp_end = rte_rdtsc();
const uint64_t sc_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- while (rte_ring_sc_dequeue_bulk(r, burst, size) != 0)
+ while (rte_ring_sc_dequeue_bulk(r, burst, size) == 0)
rte_pause();
const uint64_t sc_end = rte_rdtsc();
const uint64_t mc_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- while (rte_ring_mc_dequeue_bulk(r, burst, size) != 0)
+ while (rte_ring_mc_dequeue_bulk(r, burst, size) == 0)
rte_pause();
const uint64_t mc_end = rte_rdtsc();