/* first dequeue packets from ring of primary process */
const uint16_t nb_in_deq = rte_ring_dequeue_burst(ring,
- (void *)rxtx_bufs, BURST_SIZE);
+ (void *)rxtx_bufs, BURST_SIZE, NULL);
stats->dequeue_pkts += nb_in_deq;
if (nb_in_deq) {
while (1) {
/* Process as many elements as can be dequeued. */
- count = rte_ring_dequeue_burst(ring, obj_table, MAX_BULK);
+ count = rte_ring_dequeue_burst(ring, obj_table, MAX_BULK, NULL);
if (unlikely(count == 0))
continue;
* added an extra parameter to the burst/bulk enqueue functions to
return the number of free spaces in the ring after enqueue. This can
be used by an application to implement its own watermark functionality.
+ * added an extra parameter to the burst/bulk dequeue functions to return
+ the number elements remaining in the ring after dequeue.
* changed the return value of the enqueue and dequeue bulk functions to
match that of the burst equivalents. In all cases, ring functions which
operate on multiple packets now return the number of elements enqueued
- ``rte_ring_sc_dequeue_bulk``
- ``rte_ring_dequeue_bulk``
+ NOTE: the above functions all have different parameters as well as
+ different return values, due to the other listed changes above. This
+ means that all instances of the functions in existing code will be
+ flagged by the compiler. The return value usage should be checked
+ while fixing the compiler error due to the extra parameter.
+
ABI Changes
-----------
unsigned nb_dequeued;
nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
- (void **)ops, nb_ops);
+ (void **)ops, nb_ops, NULL);
qp->qp_stats.dequeued_count += nb_dequeued;
return nb_dequeued;
unsigned nb_dequeued;
nb_dequeued = rte_ring_dequeue_burst(qp->processed_ops,
- (void **)ops, nb_ops);
+ (void **)ops, nb_ops, NULL);
qp->stats.dequeued_count += nb_dequeued;
return nb_dequeued;
unsigned int nb_dequeued = 0;
nb_dequeued = rte_ring_dequeue_burst(qp->processed_ops,
- (void **)ops, nb_ops);
+ (void **)ops, nb_ops, NULL);
qp->stats.dequeued_count += nb_dequeued;
return nb_dequeued;
unsigned nb_dequeued;
nb_dequeued = rte_ring_dequeue_burst(qp->processed_ops,
- (void **)c_ops, nb_ops);
+ (void **)c_ops, nb_ops, NULL);
qp->qp_stats.dequeued_count += nb_dequeued;
return nb_dequeued;
unsigned nb_dequeued;
nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
- (void **)ops, nb_ops);
+ (void **)ops, nb_ops, NULL);
qp->qp_stats.dequeued_count += nb_dequeued;
return nb_dequeued;
unsigned int nb_dequeued = 0;
nb_dequeued = rte_ring_dequeue_burst(qp->processed_ops,
- (void **)ops, nb_ops);
+ (void **)ops, nb_ops, NULL);
qp->stats.dequeued_count += nb_dequeued;
return nb_dequeued;
unsigned nb_dequeued;
nb_dequeued = rte_ring_dequeue_burst(qp->processed_ops,
- (void **)c_ops, nb_ops);
+ (void **)c_ops, nb_ops, NULL);
qp->qp_stats.dequeued_count += nb_dequeued;
return nb_dequeued;
unsigned nb_dequeued;
nb_dequeued = rte_ring_dequeue_burst(qp->processed_ops,
- (void **)c_ops, nb_ops);
+ (void **)c_ops, nb_ops, NULL);
qp->qp_stats.dequeued_count += nb_dequeued;
return nb_dequeued;
struct port *port = &mode_8023ad_ports[slaves[i]];
slave_slow_nb_pkts[i] = rte_ring_dequeue_burst(port->tx_ring,
- slow_pkts, BOND_MODE_8023AX_SLAVE_TX_PKTS);
+ slow_pkts, BOND_MODE_8023AX_SLAVE_TX_PKTS,
+ NULL);
slave_nb_pkts[i] = slave_slow_nb_pkts[i];
for (j = 0; j < slave_slow_nb_pkts[i]; j++)
void **ptrs = (void *)&bufs[0];
struct ring_queue *r = q;
const uint16_t nb_rx = (uint16_t)rte_ring_dequeue_burst(r->rng,
- ptrs, nb_bufs);
+ ptrs, nb_bufs, NULL);
if (r->rng->flags & RING_F_SC_DEQ)
r->rx_pkts.cnt += nb_rx;
else
printf("\nCore %u acting as distributor core.\n", rte_lcore_id());
while (!quit_signal_dist) {
const uint16_t nb_rx = rte_ring_dequeue_burst(in_r,
- (void *)bufs, BURST_SIZE*1);
+ (void *)bufs, BURST_SIZE*1, NULL);
if (nb_rx) {
app_stats.dist.in_pkts += nb_rx;
struct rte_mbuf *bufs[BURST_SIZE_TX];
const uint16_t nb_rx = rte_ring_dequeue_burst(in_r,
- (void *)bufs, BURST_SIZE_TX);
+ (void *)bufs, BURST_SIZE_TX, NULL);
app_stats.tx.dequeue_pkts += nb_rx;
/* if we get no traffic, flush anything we have */
ret = rte_ring_sc_dequeue_bulk(
ring,
(void **) &lp->tx.mbuf_out[port].array[n_mbufs],
- bsz_rd);
+ bsz_rd,
+ NULL);
if (unlikely(ret == 0))
continue;
ret = rte_ring_sc_dequeue_bulk(
ring_in,
(void **) lp->mbuf_in.array,
- bsz_rd);
+ bsz_rd,
+ NULL);
if (unlikely(ret == 0))
continue;
uint16_t i, rx_pkts;
uint8_t port;
- rx_pkts = rte_ring_dequeue_burst(rx_ring, pkts, PKT_READ_SIZE);
+ rx_pkts = rte_ring_dequeue_burst(rx_ring, pkts,
+ PKT_READ_SIZE, NULL);
if (unlikely(rx_pkts == 0)){
if (need_flush)
/* dequeue the mbufs from rx_to_workers ring */
burst_size = rte_ring_dequeue_burst(ring_in,
- (void *)burst_buffer, MAX_PKTS_BURST);
+ (void *)burst_buffer, MAX_PKTS_BURST, NULL);
if (unlikely(burst_size == 0))
continue;
/* deque the mbufs from workers_to_tx ring */
nb_dq_mbufs = rte_ring_dequeue_burst(args->ring_in,
- (void *)mbufs, MAX_PKTS_BURST);
+ (void *)mbufs, MAX_PKTS_BURST, NULL);
if (unlikely(nb_dq_mbufs == 0))
continue;
/* deque the mbufs from workers_to_tx ring */
dqnum = rte_ring_dequeue_burst(ring_in,
- (void *)mbufs, MAX_PKTS_BURST);
+ (void *)mbufs, MAX_PKTS_BURST, NULL);
if (unlikely(dqnum == 0))
continue;
*/
SET_CPU_BUSY(tx_conf, CPU_POLL);
nb_rx = rte_ring_sc_dequeue_burst(ring, (void **)pkts_burst,
- MAX_PKT_BURST);
+ MAX_PKT_BURST, NULL);
SET_CPU_IDLE(tx_conf, CPU_POLL);
if (nb_rx > 0) {
*/
SET_CPU_BUSY(tx_conf, CPU_POLL);
nb_rx = rte_ring_sc_dequeue_burst(tx_conf->ring,
- (void **)pkts_burst, MAX_PKT_BURST);
+ (void **)pkts_burst, MAX_PKT_BURST, NULL);
SET_CPU_IDLE(tx_conf, CPU_POLL);
if (unlikely(nb_rx == 0)) {
while ((conf = confs[conf_idx])) {
retval = rte_ring_sc_dequeue_bulk(conf->tx_ring, (void **)mbufs,
- burst_conf.qos_dequeue);
+ burst_conf.qos_dequeue, NULL);
if (likely(retval != 0)) {
app_send_packets(conf, mbufs, burst_conf.qos_dequeue);
/* Read packet from the ring */
nb_pkt = rte_ring_sc_dequeue_burst(conf->rx_ring, (void **)mbufs,
- burst_conf.ring_burst);
+ burst_conf.ring_burst, NULL);
if (likely(nb_pkt)) {
int nb_sent = rte_sched_port_enqueue(conf->sched_port, mbufs,
nb_pkt);
/* Read packet from the ring */
nb_pkt = rte_ring_sc_dequeue_burst(conf->rx_ring, (void **)mbufs,
- burst_conf.ring_burst);
+ burst_conf.ring_burst, NULL);
if (likely(nb_pkt)) {
int nb_sent = rte_sched_port_enqueue(conf->sched_port, mbufs,
nb_pkt);
}
/* Dequeue up to quota mbuf from rx */
- nb_dq_pkts = rte_ring_dequeue_burst(rx, pkts, *quota);
+ nb_dq_pkts = rte_ring_dequeue_burst(rx, pkts,
+ *quota, NULL);
if (unlikely(nb_dq_pkts < 0))
continue;
/* Dequeue packets from tx and send them */
nb_dq_pkts = (uint16_t) rte_ring_dequeue_burst(tx,
- (void *) tx_pkts, *quota);
+ (void *) tx_pkts, *quota, NULL);
rte_eth_tx_burst(dest_port_id, 0, tx_pkts, nb_dq_pkts);
/* TODO: Check if nb_dq_pkts == nb_tx_pkts? */
*/
while (rx_pkts > 0 &&
unlikely(rte_ring_dequeue_bulk(rx_ring, pkts,
- rx_pkts) == 0))
+ rx_pkts, NULL) == 0))
rx_pkts = (uint16_t)RTE_MIN(rte_ring_count(rx_ring),
PKT_READ_SIZE);
if (cached_free_slots->len == 0) {
/* Need to get another burst of free slots from global ring */
n_slots = rte_ring_mc_dequeue_burst(h->free_slots,
- cached_free_slots->objs, LCORE_CACHE_SIZE);
+ cached_free_slots->objs,
+ LCORE_CACHE_SIZE, NULL);
if (n_slots == 0)
return -ENOSPC;
common_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
{
return rte_ring_mc_dequeue_bulk(mp->pool_data,
- obj_table, n) == 0 ? -ENOBUFS : 0;
+ obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
}
static int
common_ring_sc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
{
return rte_ring_sc_dequeue_bulk(mp->pool_data,
- obj_table, n) == 0 ? -ENOBUFS : 0;
+ obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
}
static unsigned
/* If "pkts" buffer is empty, read packet burst from ring */
if (p->n_pkts == 0) {
p->n_pkts = rte_ring_sc_dequeue_burst(p->ring,
- (void **) p->pkts, RTE_PORT_IN_BURST_SIZE_MAX);
+ (void **) p->pkts, RTE_PORT_IN_BURST_SIZE_MAX,
+ NULL);
RTE_PORT_RING_READER_FRAG_STATS_PKTS_IN_ADD(p, p->n_pkts);
if (p->n_pkts == 0)
return n_pkts_out;
struct rte_port_ring_reader *p = (struct rte_port_ring_reader *) port;
uint32_t nb_rx;
- nb_rx = rte_ring_sc_dequeue_burst(p->ring, (void **) pkts, n_pkts);
+ nb_rx = rte_ring_sc_dequeue_burst(p->ring, (void **) pkts,
+ n_pkts, NULL);
RTE_PORT_RING_READER_STATS_PKTS_IN_ADD(p, nb_rx);
return nb_rx;
struct rte_port_ring_reader *p = (struct rte_port_ring_reader *) port;
uint32_t nb_rx;
- nb_rx = rte_ring_mc_dequeue_burst(p->ring, (void **) pkts, n_pkts);
+ nb_rx = rte_ring_mc_dequeue_burst(p->ring, (void **) pkts,
+ n_pkts, NULL);
RTE_PORT_RING_READER_STATS_PKTS_IN_ADD(p, nb_rx);
return nb_rx;
static inline unsigned int __attribute__((always_inline))
__rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
- unsigned n, enum rte_ring_queue_behavior behavior)
+ unsigned int n, enum rte_ring_queue_behavior behavior,
+ unsigned int *available)
{
uint32_t cons_head, prod_tail;
uint32_t cons_next, entries;
unsigned int i;
uint32_t mask = r->mask;
- /* Avoid the unnecessary cmpset operation below, which is also
- * potentially harmful when n equals 0. */
- if (n == 0)
- return 0;
-
/* move cons.head atomically */
do {
/* Restore n as it may change every loop */
entries = (prod_tail - cons_head);
/* Set the actual entries for dequeue */
- if (n > entries) {
- if (behavior == RTE_RING_QUEUE_FIXED)
- return 0;
- else {
- if (unlikely(entries == 0))
- return 0;
- n = entries;
- }
- }
+ if (n > entries)
+ n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : entries;
+
+ if (unlikely(n == 0))
+ goto end;
cons_next = cons_head + n;
success = rte_atomic32_cmpset(&r->cons.head, cons_head,
rte_pause();
r->cons.tail = cons_next;
-
+end:
+ if (available != NULL)
+ *available = entries - n;
return n;
}
*/
static inline unsigned int __attribute__((always_inline))
__rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
- unsigned n, enum rte_ring_queue_behavior behavior)
+ unsigned int n, enum rte_ring_queue_behavior behavior,
+ unsigned int *available)
{
uint32_t cons_head, prod_tail;
uint32_t cons_next, entries;
* and size(ring)-1. */
entries = prod_tail - cons_head;
- if (n > entries) {
- if (behavior == RTE_RING_QUEUE_FIXED)
- return 0;
- else {
- if (unlikely(entries == 0))
- return 0;
- n = entries;
- }
- }
+ if (n > entries)
+ n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : entries;
+
+ if (unlikely(entries == 0))
+ goto end;
cons_next = cons_head + n;
r->cons.head = cons_next;
rte_smp_rmb();
r->cons.tail = cons_next;
+end:
+ if (available != NULL)
+ *available = entries - n;
return n;
}
* A pointer to a table of void * pointers (objects) that will be filled.
* @param n
* The number of objects to dequeue from the ring to the obj_table.
+ * @param available
+ * If non-NULL, returns the number of remaining ring entries after the
+ * dequeue has finished.
* @return
* The number of objects dequeued, either 0 or n
*/
static inline unsigned int __attribute__((always_inline))
-rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
+rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table,
+ unsigned int n, unsigned int *available)
{
- return __rte_ring_mc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
+ return __rte_ring_mc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
+ available);
}
/**
* @param n
* The number of objects to dequeue from the ring to the obj_table,
* must be strictly positive.
+ * @param available
+ * If non-NULL, returns the number of remaining ring entries after the
+ * dequeue has finished.
* @return
* The number of objects dequeued, either 0 or n
*/
static inline unsigned int __attribute__((always_inline))
-rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
+rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table,
+ unsigned int n, unsigned int *available)
{
- return __rte_ring_sc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
+ return __rte_ring_sc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
+ available);
}
/**
* A pointer to a table of void * pointers (objects) that will be filled.
* @param n
* The number of objects to dequeue from the ring to the obj_table.
+ * @param available
+ * If non-NULL, returns the number of remaining ring entries after the
+ * dequeue has finished.
* @return
* The number of objects dequeued, either 0 or n
*/
static inline unsigned int __attribute__((always_inline))
-rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
+rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned int n,
+ unsigned int *available)
{
if (r->cons.single)
- return rte_ring_sc_dequeue_bulk(r, obj_table, n);
+ return rte_ring_sc_dequeue_bulk(r, obj_table, n, available);
else
- return rte_ring_mc_dequeue_bulk(r, obj_table, n);
+ return rte_ring_mc_dequeue_bulk(r, obj_table, n, available);
}
/**
static inline int __attribute__((always_inline))
rte_ring_mc_dequeue(struct rte_ring *r, void **obj_p)
{
- return rte_ring_mc_dequeue_bulk(r, obj_p, 1) ? 0 : -ENOBUFS;
+ return rte_ring_mc_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOBUFS;
}
/**
static inline int __attribute__((always_inline))
rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p)
{
- return rte_ring_sc_dequeue_bulk(r, obj_p, 1) ? 0 : -ENOBUFS;
+ return rte_ring_sc_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOBUFS;
}
/**
static inline int __attribute__((always_inline))
rte_ring_dequeue(struct rte_ring *r, void **obj_p)
{
- return rte_ring_dequeue_bulk(r, obj_p, 1) ? 0 : -ENOBUFS;
+ return rte_ring_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOBUFS;
}
/**
* A pointer to a table of void * pointers (objects) that will be filled.
* @param n
* The number of objects to dequeue from the ring to the obj_table.
+ * @param available
+ * If non-NULL, returns the number of remaining ring entries after the
+ * dequeue has finished.
* @return
* - n: Actual number of objects dequeued, 0 if ring is empty
*/
static inline unsigned __attribute__((always_inline))
-rte_ring_mc_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned n)
+rte_ring_mc_dequeue_burst(struct rte_ring *r, void **obj_table,
+ unsigned int n, unsigned int *available)
{
- return __rte_ring_mc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
+ return __rte_ring_mc_do_dequeue(r, obj_table, n,
+ RTE_RING_QUEUE_VARIABLE, available);
}
/**
* A pointer to a table of void * pointers (objects) that will be filled.
* @param n
* The number of objects to dequeue from the ring to the obj_table.
+ * @param available
+ * If non-NULL, returns the number of remaining ring entries after the
+ * dequeue has finished.
* @return
* - n: Actual number of objects dequeued, 0 if ring is empty
*/
static inline unsigned __attribute__((always_inline))
-rte_ring_sc_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned n)
+rte_ring_sc_dequeue_burst(struct rte_ring *r, void **obj_table,
+ unsigned int n, unsigned int *available)
{
- return __rte_ring_sc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
+ return __rte_ring_sc_do_dequeue(r, obj_table, n,
+ RTE_RING_QUEUE_VARIABLE, available);
}
/**
* A pointer to a table of void * pointers (objects) that will be filled.
* @param n
* The number of objects to dequeue from the ring to the obj_table.
+ * @param available
+ * If non-NULL, returns the number of remaining ring entries after the
+ * dequeue has finished.
* @return
* - Number of objects dequeued
*/
static inline unsigned __attribute__((always_inline))
-rte_ring_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned n)
+rte_ring_dequeue_burst(struct rte_ring *r, void **obj_table,
+ unsigned int n, unsigned int *available)
{
if (r->cons.single)
- return rte_ring_sc_dequeue_burst(r, obj_table, n);
+ return rte_ring_sc_dequeue_burst(r, obj_table, n, available);
else
- return rte_ring_mc_dequeue_burst(r, obj_table, n);
+ return rte_ring_mc_dequeue_burst(r, obj_table, n, available);
}
#ifdef __cplusplus
ret = rte_ring_sc_dequeue_bulk(
app.rings_rx[i],
(void **) worker_mbuf->array,
- app.burst_size_worker_read);
+ app.burst_size_worker_read,
+ NULL);
if (ret == 0)
continue;
ret = rte_ring_sc_dequeue_bulk(
app.rings_tx[i],
(void **) &app.mbuf_tx[i].array[n_mbufs],
- app.burst_size_tx_read);
+ app.burst_size_tx_read,
+ NULL);
if (ret == 0)
continue;
static int
slave_get_pkts(struct slave_conf *slave, struct rte_mbuf **buf, uint16_t size)
{
- return rte_ring_dequeue_burst(slave->tx_queue, (void **)buf, size);
+ return rte_ring_dequeue_burst(slave->tx_queue, (void **)buf,
+ size, NULL);
}
/*
const uint64_t sc_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- rte_ring_sc_dequeue_bulk(r, burst, bulk_sizes[0]);
+ rte_ring_sc_dequeue_bulk(r, burst, bulk_sizes[0], NULL);
const uint64_t sc_end = rte_rdtsc();
const uint64_t eth_start = rte_rdtsc();
rte_compiler_barrier();
for (i = 0; i < iterations; i++) {
rte_ring_enqueue_bulk(r, &burst, 1, NULL);
- rte_ring_dequeue_bulk(r, &burst, 1);
+ rte_ring_dequeue_bulk(r, &burst, 1, NULL);
}
const uint64_t sc_end = rte_rdtsc_precise();
rte_compiler_barrier();
for (i = 0; i < iterations; i++) {
rte_ring_sp_enqueue_bulk(r, (void *)burst,
bulk_sizes[sz], NULL);
- rte_ring_sc_dequeue_bulk(r, (void *)burst, bulk_sizes[sz]);
+ rte_ring_sc_dequeue_bulk(r, (void *)burst,
+ bulk_sizes[sz], NULL);
}
const uint64_t sc_end = rte_rdtsc();
__func__, i, rand);
TEST_RING_VERIFY(rte_ring_enqueue_bulk(r, src, rand,
NULL) != 0);
- TEST_RING_VERIFY(rte_ring_dequeue_bulk(r, dst, rand) == rand);
+ TEST_RING_VERIFY(rte_ring_dequeue_bulk(r, dst, rand,
+ NULL) == rand);
/* fill the ring */
TEST_RING_VERIFY(rte_ring_enqueue_bulk(r, src, rsz, NULL) != 0);
TEST_RING_VERIFY(0 == rte_ring_empty(r));
/* empty the ring */
- TEST_RING_VERIFY(rte_ring_dequeue_bulk(r, dst, rsz) == rsz);
+ TEST_RING_VERIFY(rte_ring_dequeue_bulk(r, dst, rsz,
+ NULL) == rsz);
TEST_RING_VERIFY(rsz == rte_ring_free_count(r));
TEST_RING_VERIFY(0 == rte_ring_count(r));
TEST_RING_VERIFY(0 == rte_ring_full(r));
goto fail;
printf("dequeue 1 obj\n");
- ret = rte_ring_sc_dequeue_bulk(r, cur_dst, 1);
+ ret = rte_ring_sc_dequeue_bulk(r, cur_dst, 1, NULL);
cur_dst += 1;
if (ret == 0)
goto fail;
printf("dequeue 2 objs\n");
- ret = rte_ring_sc_dequeue_bulk(r, cur_dst, 2);
+ ret = rte_ring_sc_dequeue_bulk(r, cur_dst, 2, NULL);
cur_dst += 2;
if (ret == 0)
goto fail;
printf("dequeue MAX_BULK objs\n");
- ret = rte_ring_sc_dequeue_bulk(r, cur_dst, MAX_BULK);
+ ret = rte_ring_sc_dequeue_bulk(r, cur_dst, MAX_BULK, NULL);
cur_dst += MAX_BULK;
if (ret == 0)
goto fail;
goto fail;
printf("dequeue 1 obj\n");
- ret = rte_ring_mc_dequeue_bulk(r, cur_dst, 1);
+ ret = rte_ring_mc_dequeue_bulk(r, cur_dst, 1, NULL);
cur_dst += 1;
if (ret == 0)
goto fail;
printf("dequeue 2 objs\n");
- ret = rte_ring_mc_dequeue_bulk(r, cur_dst, 2);
+ ret = rte_ring_mc_dequeue_bulk(r, cur_dst, 2, NULL);
cur_dst += 2;
if (ret == 0)
goto fail;
printf("dequeue MAX_BULK objs\n");
- ret = rte_ring_mc_dequeue_bulk(r, cur_dst, MAX_BULK);
+ ret = rte_ring_mc_dequeue_bulk(r, cur_dst, MAX_BULK, NULL);
cur_dst += MAX_BULK;
if (ret == 0)
goto fail;
cur_src += MAX_BULK;
if (ret == 0)
goto fail;
- ret = rte_ring_mc_dequeue_bulk(r, cur_dst, MAX_BULK);
+ ret = rte_ring_mc_dequeue_bulk(r, cur_dst, MAX_BULK, NULL);
cur_dst += MAX_BULK;
if (ret == 0)
goto fail;
printf("Cannot enqueue\n");
goto fail;
}
- ret = rte_ring_dequeue_bulk(r, cur_dst, num_elems);
+ ret = rte_ring_dequeue_bulk(r, cur_dst, num_elems, NULL);
cur_dst += num_elems;
if (ret == 0) {
printf("Cannot dequeue\n");
goto fail;
}
- ret = rte_ring_dequeue_bulk(r, cur_dst, num_elems);
+ ret = rte_ring_dequeue_bulk(r, cur_dst, num_elems, NULL);
cur_dst += num_elems;
if (ret == 0) {
printf("Cannot dequeue2\n");
goto fail;
printf("dequeue 1 obj\n");
- ret = rte_ring_sc_dequeue_burst(r, cur_dst, 1) ;
+ ret = rte_ring_sc_dequeue_burst(r, cur_dst, 1, NULL);
cur_dst += 1;
if ((ret & RTE_RING_SZ_MASK) != 1)
goto fail;
printf("dequeue 2 objs\n");
- ret = rte_ring_sc_dequeue_burst(r, cur_dst, 2);
+ ret = rte_ring_sc_dequeue_burst(r, cur_dst, 2, NULL);
cur_dst += 2;
if ((ret & RTE_RING_SZ_MASK) != 2)
goto fail;
printf("dequeue MAX_BULK objs\n");
- ret = rte_ring_sc_dequeue_burst(r, cur_dst, MAX_BULK);
+ ret = rte_ring_sc_dequeue_burst(r, cur_dst, MAX_BULK, NULL);
cur_dst += MAX_BULK;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
goto fail;
printf("Test dequeue without enough objects \n");
for (i = 0; i<RING_SIZE/MAX_BULK - 1; i++) {
- ret = rte_ring_sc_dequeue_burst(r, cur_dst, MAX_BULK);
+ ret = rte_ring_sc_dequeue_burst(r, cur_dst, MAX_BULK, NULL);
cur_dst += MAX_BULK;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
goto fail;
}
/* Available memory space for the exact MAX_BULK entries */
- ret = rte_ring_sc_dequeue_burst(r, cur_dst, 2);
+ ret = rte_ring_sc_dequeue_burst(r, cur_dst, 2, NULL);
cur_dst += 2;
if ((ret & RTE_RING_SZ_MASK) != 2)
goto fail;
- ret = rte_ring_sc_dequeue_burst(r, cur_dst, MAX_BULK);
+ ret = rte_ring_sc_dequeue_burst(r, cur_dst, MAX_BULK, NULL);
cur_dst += MAX_BULK - 3;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK - 3)
goto fail;
goto fail;
printf("dequeue 1 obj\n");
- ret = rte_ring_mc_dequeue_burst(r, cur_dst, 1);
+ ret = rte_ring_mc_dequeue_burst(r, cur_dst, 1, NULL);
cur_dst += 1;
if ((ret & RTE_RING_SZ_MASK) != 1)
goto fail;
printf("dequeue 2 objs\n");
- ret = rte_ring_mc_dequeue_burst(r, cur_dst, 2);
+ ret = rte_ring_mc_dequeue_burst(r, cur_dst, 2, NULL);
cur_dst += 2;
if ((ret & RTE_RING_SZ_MASK) != 2)
goto fail;
printf("dequeue MAX_BULK objs\n");
- ret = rte_ring_mc_dequeue_burst(r, cur_dst, MAX_BULK);
+ ret = rte_ring_mc_dequeue_burst(r, cur_dst, MAX_BULK, NULL);
cur_dst += MAX_BULK;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
goto fail;
cur_src += MAX_BULK;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
goto fail;
- ret = rte_ring_mc_dequeue_burst(r, cur_dst, MAX_BULK);
+ ret = rte_ring_mc_dequeue_burst(r, cur_dst, MAX_BULK, NULL);
cur_dst += MAX_BULK;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
goto fail;
printf("Test dequeue without enough objects \n");
for (i = 0; i<RING_SIZE/MAX_BULK - 1; i++) {
- ret = rte_ring_mc_dequeue_burst(r, cur_dst, MAX_BULK);
+ ret = rte_ring_mc_dequeue_burst(r, cur_dst, MAX_BULK, NULL);
cur_dst += MAX_BULK;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
goto fail;
}
/* Available objects - the exact MAX_BULK */
- ret = rte_ring_mc_dequeue_burst(r, cur_dst, 2);
+ ret = rte_ring_mc_dequeue_burst(r, cur_dst, 2, NULL);
cur_dst += 2;
if ((ret & RTE_RING_SZ_MASK) != 2)
goto fail;
- ret = rte_ring_mc_dequeue_burst(r, cur_dst, MAX_BULK);
+ ret = rte_ring_mc_dequeue_burst(r, cur_dst, MAX_BULK, NULL);
cur_dst += MAX_BULK - 3;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK - 3)
goto fail;
if ((ret & RTE_RING_SZ_MASK) != 2)
goto fail;
- ret = rte_ring_dequeue_burst(r, cur_dst, 2);
+ ret = rte_ring_dequeue_burst(r, cur_dst, 2, NULL);
cur_dst += 2;
if (ret != 2)
goto fail;
goto fail_test;
}
- ret = rte_ring_dequeue_burst(rp, obj, 2);
+ ret = rte_ring_dequeue_burst(rp, obj, 2, NULL);
if (ret != 2) {
printf("test_ring_basic_ex: rte_ring_dequeue_burst fails \n");
goto fail_test;
const uint64_t sc_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- rte_ring_sc_dequeue_bulk(r, burst, bulk_sizes[0]);
+ rte_ring_sc_dequeue_bulk(r, burst, bulk_sizes[0], NULL);
const uint64_t sc_end = rte_rdtsc();
const uint64_t mc_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- rte_ring_mc_dequeue_bulk(r, burst, bulk_sizes[0]);
+ rte_ring_mc_dequeue_bulk(r, burst, bulk_sizes[0], NULL);
const uint64_t mc_end = rte_rdtsc();
printf("SC empty dequeue: %.2F\n",
const uint64_t sc_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- while (rte_ring_sc_dequeue_bulk(r, burst, size) == 0)
+ while (rte_ring_sc_dequeue_bulk(r, burst, size, NULL) == 0)
rte_pause();
const uint64_t sc_end = rte_rdtsc();
const uint64_t mc_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- while (rte_ring_mc_dequeue_bulk(r, burst, size) == 0)
+ while (rte_ring_mc_dequeue_bulk(r, burst, size, NULL) == 0)
rte_pause();
const uint64_t mc_end = rte_rdtsc();
for (i = 0; i < iterations; i++) {
rte_ring_sp_enqueue_burst(r, burst,
bulk_sizes[sz], NULL);
- rte_ring_sc_dequeue_burst(r, burst, bulk_sizes[sz]);
+ rte_ring_sc_dequeue_burst(r, burst,
+ bulk_sizes[sz], NULL);
}
const uint64_t sc_end = rte_rdtsc();
for (i = 0; i < iterations; i++) {
rte_ring_mp_enqueue_burst(r, burst,
bulk_sizes[sz], NULL);
- rte_ring_mc_dequeue_burst(r, burst, bulk_sizes[sz]);
+ rte_ring_mc_dequeue_burst(r, burst,
+ bulk_sizes[sz], NULL);
}
const uint64_t mc_end = rte_rdtsc();
for (i = 0; i < iterations; i++) {
rte_ring_sp_enqueue_bulk(r, burst,
bulk_sizes[sz], NULL);
- rte_ring_sc_dequeue_bulk(r, burst, bulk_sizes[sz]);
+ rte_ring_sc_dequeue_bulk(r, burst,
+ bulk_sizes[sz], NULL);
}
const uint64_t sc_end = rte_rdtsc();
for (i = 0; i < iterations; i++) {
rte_ring_mp_enqueue_bulk(r, burst,
bulk_sizes[sz], NULL);
- rte_ring_mc_dequeue_bulk(r, burst, bulk_sizes[sz]);
+ rte_ring_mc_dequeue_bulk(r, burst,
+ bulk_sizes[sz], NULL);
}
const uint64_t mc_end = rte_rdtsc();
void *objs[RING_TX_SIZE];
struct rte_mbuf *mbuf;
- ret = rte_ring_sc_dequeue_burst(rings_tx[i], objs, 10);
+ ret = rte_ring_sc_dequeue_burst(rings_tx[i], objs, 10, NULL);
if (ret <= 0) {
printf("Got no objects from ring %d - error code %d\n",
i, ret);
void *objs[RING_TX_SIZE];
struct rte_mbuf *mbuf;
- ret = rte_ring_sc_dequeue_burst(rings_tx[i], objs, 10);
+ ret = rte_ring_sc_dequeue_burst(rings_tx[i], objs, 10, NULL);
if (ret <= 0)
printf("Got no objects from ring %d - error code %d\n",
i, ret);
rte_port_ring_writer_ops.f_flush(port);
expected_pkts = 1;
received_pkts = rte_ring_sc_dequeue_burst(port_ring_writer_params.ring,
- (void **)res_mbuf, port_ring_writer_params.tx_burst_sz);
+ (void **)res_mbuf, port_ring_writer_params.tx_burst_sz, NULL);
if (received_pkts < expected_pkts)
return -7;
expected_pkts = RTE_PORT_IN_BURST_SIZE_MAX;
received_pkts = rte_ring_sc_dequeue_burst(port_ring_writer_params.ring,
- (void **)res_mbuf, port_ring_writer_params.tx_burst_sz);
+ (void **)res_mbuf, port_ring_writer_params.tx_burst_sz, NULL);
if (received_pkts < expected_pkts)
return -8;
expected_pkts = RTE_PORT_IN_BURST_SIZE_MAX;
received_pkts = rte_ring_sc_dequeue_burst(port_ring_writer_params.ring,
- (void **)res_mbuf, port_ring_writer_params.tx_burst_sz);
+ (void **)res_mbuf, port_ring_writer_params.tx_burst_sz, NULL);
if (received_pkts < expected_pkts)
return -8;
expected_pkts = RTE_PORT_IN_BURST_SIZE_MAX;
received_pkts = rte_ring_sc_dequeue_burst(port_ring_writer_params.ring,
- (void **)res_mbuf, port_ring_writer_params.tx_burst_sz);
+ (void **)res_mbuf, port_ring_writer_params.tx_burst_sz, NULL);
if (received_pkts < expected_pkts)
return -9;
dev_private = vrtl_eth_dev->data->dev_private;
rx_count = rte_ring_dequeue_burst(dev_private->rx_queue, (void **) bufs,
- nb_pkts);
+ nb_pkts, NULL);
/* increments ipackets count */
dev_private->eth_stats.ipackets += rx_count;
dev_private = vrtl_eth_dev->data->dev_private;
return rte_ring_dequeue_burst(dev_private->tx_queue, (void **)pkt_burst,
- burst_length);
+ burst_length, NULL);
}
static uint8_t