};
static void *
-rte_port_ring_reader_create(void *params, int socket_id)
+rte_port_ring_reader_create_internal(void *params, int socket_id,
+ uint32_t is_multi)
{
struct rte_port_ring_reader_params *conf =
(struct rte_port_ring_reader_params *) params;
struct rte_port_ring_reader *port;
/* Check input parameters */
- if (conf == NULL) {
- RTE_LOG(ERR, PORT, "%s: params is NULL\n", __func__);
+ if ((conf == NULL) ||
+ (conf->ring == NULL) ||
+ (conf->ring->cons.sc_dequeue && is_multi) ||
+ (!(conf->ring->cons.sc_dequeue) && !is_multi)) {
+ RTE_LOG(ERR, PORT, "%s: Invalid Parameters\n", __func__);
return NULL;
}
return port;
}
+static void *
+rte_port_ring_reader_create(void *params, int socket_id)
+{
+ return rte_port_ring_reader_create_internal(params, socket_id, 0);
+}
+
+static void *
+rte_port_ring_multi_reader_create(void *params, int socket_id)
+{
+ return rte_port_ring_reader_create_internal(params, socket_id, 1);
+}
+
static int
rte_port_ring_reader_rx(void *port, struct rte_mbuf **pkts, uint32_t n_pkts)
{
return nb_rx;
}
+static int
+rte_port_ring_multi_reader_rx(void *port, struct rte_mbuf **pkts,
+ uint32_t n_pkts)
+{
+ struct rte_port_ring_reader *p = (struct rte_port_ring_reader *) port;
+ uint32_t nb_rx;
+
+ nb_rx = rte_ring_mc_dequeue_burst(p->ring, (void **) pkts, n_pkts);
+ RTE_PORT_RING_READER_STATS_PKTS_IN_ADD(p, nb_rx);
+
+ return nb_rx;
+}
+
static int
rte_port_ring_reader_free(void *port)
{
uint32_t tx_burst_sz;
uint32_t tx_buf_count;
uint64_t bsz_mask;
+ uint32_t is_multi;
};
static void *
-rte_port_ring_writer_create(void *params, int socket_id)
+rte_port_ring_writer_create_internal(void *params, int socket_id,
+ uint32_t is_multi)
{
struct rte_port_ring_writer_params *conf =
(struct rte_port_ring_writer_params *) params;
/* Check input parameters */
if ((conf == NULL) ||
- (conf->ring == NULL) ||
+ (conf->ring == NULL) ||
+ (conf->ring->prod.sp_enqueue && is_multi) ||
+ (!(conf->ring->prod.sp_enqueue) && !is_multi) ||
(conf->tx_burst_sz > RTE_PORT_IN_BURST_SIZE_MAX)) {
RTE_LOG(ERR, PORT, "%s: Invalid Parameters\n", __func__);
return NULL;
port->tx_burst_sz = conf->tx_burst_sz;
port->tx_buf_count = 0;
port->bsz_mask = 1LLU << (conf->tx_burst_sz - 1);
+ port->is_multi = is_multi;
return port;
}
+static void *
+rte_port_ring_writer_create(void *params, int socket_id)
+{
+ return rte_port_ring_writer_create_internal(params, socket_id, 0);
+}
+
+static void *
+rte_port_ring_multi_writer_create(void *params, int socket_id)
+{
+ return rte_port_ring_writer_create_internal(params, socket_id, 1);
+}
+
static inline void
send_burst(struct rte_port_ring_writer *p)
{
p->tx_buf_count = 0;
}
+static inline void
+send_burst_mp(struct rte_port_ring_writer *p)
+{
+ uint32_t nb_tx;
+
+ nb_tx = rte_ring_mp_enqueue_burst(p->ring, (void **)p->tx_buf,
+ p->tx_buf_count);
+
+ RTE_PORT_RING_WRITER_STATS_PKTS_DROP_ADD(p, p->tx_buf_count - nb_tx);
+ for ( ; nb_tx < p->tx_buf_count; nb_tx++)
+ rte_pktmbuf_free(p->tx_buf[nb_tx]);
+
+ p->tx_buf_count = 0;
+}
+
static int
rte_port_ring_writer_tx(void *port, struct rte_mbuf *pkt)
{
}
static int
-rte_port_ring_writer_tx_bulk(void *port,
+rte_port_ring_multi_writer_tx(void *port, struct rte_mbuf *pkt)
+{
+ struct rte_port_ring_writer *p = (struct rte_port_ring_writer *) port;
+
+ p->tx_buf[p->tx_buf_count++] = pkt;
+ RTE_PORT_RING_WRITER_STATS_PKTS_IN_ADD(p, 1);
+ if (p->tx_buf_count >= p->tx_burst_sz)
+ send_burst_mp(p);
+
+ return 0;
+}
+
+static inline int __attribute__((always_inline))
+rte_port_ring_writer_tx_bulk_internal(void *port,
struct rte_mbuf **pkts,
- uint64_t pkts_mask)
+ uint64_t pkts_mask,
+ uint32_t is_multi)
{
struct rte_port_ring_writer *p =
(struct rte_port_ring_writer *) port;
uint64_t n_pkts = __builtin_popcountll(pkts_mask);
uint32_t n_pkts_ok;
- if (tx_buf_count)
- send_burst(p);
+ if (tx_buf_count) {
+ if (is_multi)
+ send_burst_mp(p);
+ else
+ send_burst(p);
+ }
RTE_PORT_RING_WRITER_STATS_PKTS_IN_ADD(p, n_pkts);
- n_pkts_ok = rte_ring_sp_enqueue_burst(p->ring, (void **)pkts, n_pkts);
+ if (is_multi)
+ n_pkts_ok = rte_ring_mp_enqueue_burst(p->ring, (void **)pkts,
+ n_pkts);
+ else
+ n_pkts_ok = rte_ring_sp_enqueue_burst(p->ring, (void **)pkts,
+ n_pkts);
RTE_PORT_RING_WRITER_STATS_PKTS_DROP_ADD(p, n_pkts - n_pkts_ok);
for ( ; n_pkts_ok < n_pkts; n_pkts_ok++) {
}
p->tx_buf_count = tx_buf_count;
- if (tx_buf_count >= p->tx_burst_sz)
- send_burst(p);
+ if (tx_buf_count >= p->tx_burst_sz) {
+ if (is_multi)
+ send_burst_mp(p);
+ else
+ send_burst(p);
+ }
}
return 0;
}
+static int
+rte_port_ring_writer_tx_bulk(void *port,
+ struct rte_mbuf **pkts,
+ uint64_t pkts_mask)
+{
+ return rte_port_ring_writer_tx_bulk_internal(port, pkts, pkts_mask, 0);
+}
+
+static int
+rte_port_ring_multi_writer_tx_bulk(void *port,
+ struct rte_mbuf **pkts,
+ uint64_t pkts_mask)
+{
+ return rte_port_ring_writer_tx_bulk_internal(port, pkts, pkts_mask, 1);
+}
+
static int
rte_port_ring_writer_flush(void *port)
{
return 0;
}
+static int
+rte_port_ring_multi_writer_flush(void *port)
+{
+ struct rte_port_ring_writer *p = (struct rte_port_ring_writer *) port;
+
+ if (p->tx_buf_count > 0)
+ send_burst_mp(p);
+
+ return 0;
+}
+
static int
rte_port_ring_writer_free(void *port)
{
+ struct rte_port_ring_writer *p = (struct rte_port_ring_writer *) port;
+
if (port == NULL) {
RTE_LOG(ERR, PORT, "%s: Port is NULL\n", __func__);
return -EINVAL;
}
- rte_port_ring_writer_flush(port);
+ if (p->is_multi)
+ rte_port_ring_multi_writer_flush(port);
+ else
+ rte_port_ring_writer_flush(port);
+
rte_free(port);
return 0;
uint32_t tx_buf_count;
uint64_t bsz_mask;
uint64_t n_retries;
+ uint32_t is_multi;
};
static void *
-rte_port_ring_writer_nodrop_create(void *params, int socket_id)
+rte_port_ring_writer_nodrop_create_internal(void *params, int socket_id,
+ uint32_t is_multi)
{
struct rte_port_ring_writer_nodrop_params *conf =
(struct rte_port_ring_writer_nodrop_params *) params;
/* Check input parameters */
if ((conf == NULL) ||
- (conf->ring == NULL) ||
+ (conf->ring == NULL) ||
+ (conf->ring->prod.sp_enqueue && is_multi) ||
+ (!(conf->ring->prod.sp_enqueue) && !is_multi) ||
(conf->tx_burst_sz > RTE_PORT_IN_BURST_SIZE_MAX)) {
RTE_LOG(ERR, PORT, "%s: Invalid Parameters\n", __func__);
return NULL;
port->tx_burst_sz = conf->tx_burst_sz;
port->tx_buf_count = 0;
port->bsz_mask = 1LLU << (conf->tx_burst_sz - 1);
+ port->is_multi = is_multi;
/*
* When n_retries is 0 it means that we should wait for every packet to
return port;
}
+static void *
+rte_port_ring_writer_nodrop_create(void *params, int socket_id)
+{
+ return rte_port_ring_writer_nodrop_create_internal(params, socket_id, 0);
+}
+
+static void *
+rte_port_ring_multi_writer_nodrop_create(void *params, int socket_id)
+{
+ return rte_port_ring_writer_nodrop_create_internal(params, socket_id, 1);
+}
+
static inline void
send_burst_nodrop(struct rte_port_ring_writer_nodrop *p)
{
p->tx_buf_count = 0;
}
+static inline void
+send_burst_mp_nodrop(struct rte_port_ring_writer_nodrop *p)
+{
+ uint32_t nb_tx = 0, i;
+
+ nb_tx = rte_ring_mp_enqueue_burst(p->ring, (void **)p->tx_buf,
+ p->tx_buf_count);
+
+ /* We sent all the packets in a first try */
+ if (nb_tx >= p->tx_buf_count)
+ return;
+
+ for (i = 0; i < p->n_retries; i++) {
+ nb_tx += rte_ring_mp_enqueue_burst(p->ring,
+ (void **) (p->tx_buf + nb_tx), p->tx_buf_count - nb_tx);
+
+ /* We sent all the packets in more than one try */
+ if (nb_tx >= p->tx_buf_count)
+ return;
+ }
+
+ /* We didn't send the packets in maximum allowed attempts */
+ RTE_PORT_RING_WRITER_NODROP_STATS_PKTS_DROP_ADD(p, p->tx_buf_count - nb_tx);
+ for ( ; nb_tx < p->tx_buf_count; nb_tx++)
+ rte_pktmbuf_free(p->tx_buf[nb_tx]);
+
+ p->tx_buf_count = 0;
+}
+
static int
rte_port_ring_writer_nodrop_tx(void *port, struct rte_mbuf *pkt)
{
}
static int
-rte_port_ring_writer_nodrop_tx_bulk(void *port,
+rte_port_ring_multi_writer_nodrop_tx(void *port, struct rte_mbuf *pkt)
+{
+ struct rte_port_ring_writer_nodrop *p =
+ (struct rte_port_ring_writer_nodrop *) port;
+
+ p->tx_buf[p->tx_buf_count++] = pkt;
+ RTE_PORT_RING_WRITER_NODROP_STATS_PKTS_IN_ADD(p, 1);
+ if (p->tx_buf_count >= p->tx_burst_sz)
+ send_burst_mp_nodrop(p);
+
+ return 0;
+}
+
+static inline int __attribute__((always_inline))
+rte_port_ring_writer_nodrop_tx_bulk_internal(void *port,
struct rte_mbuf **pkts,
- uint64_t pkts_mask)
+ uint64_t pkts_mask,
+ uint32_t is_multi)
{
struct rte_port_ring_writer_nodrop *p =
(struct rte_port_ring_writer_nodrop *) port;
uint64_t n_pkts = __builtin_popcountll(pkts_mask);
uint32_t n_pkts_ok;
- if (tx_buf_count)
- send_burst_nodrop(p);
+ if (tx_buf_count) {
+ if (is_multi)
+ send_burst_mp_nodrop(p);
+ else
+ send_burst_nodrop(p);
+ }
RTE_PORT_RING_WRITER_NODROP_STATS_PKTS_IN_ADD(p, n_pkts);
- n_pkts_ok = rte_ring_sp_enqueue_burst(p->ring, (void **)pkts, n_pkts);
+ if (is_multi)
+ n_pkts_ok =
+ rte_ring_mp_enqueue_burst(p->ring, (void **)pkts, n_pkts);
+ else
+ n_pkts_ok =
+ rte_ring_sp_enqueue_burst(p->ring, (void **)pkts, n_pkts);
if (n_pkts_ok >= n_pkts)
return 0;
/*
- * If we didnt manage to send all packets in single burst, move
+ * If we didn't manage to send all packets in single burst, move
* remaining packets to the buffer and call send burst.
*/
for (; n_pkts_ok < n_pkts; n_pkts_ok++) {
struct rte_mbuf *pkt = pkts[n_pkts_ok];
+
p->tx_buf[p->tx_buf_count++] = pkt;
}
- send_burst_nodrop(p);
+ if (is_multi)
+ send_burst_mp_nodrop(p);
+ else
+ send_burst_nodrop(p);
} else {
for ( ; pkts_mask; ) {
uint32_t pkt_index = __builtin_ctzll(pkts_mask);
}
p->tx_buf_count = tx_buf_count;
- if (tx_buf_count >= p->tx_burst_sz)
- send_burst_nodrop(p);
+ if (tx_buf_count >= p->tx_burst_sz) {
+ if (is_multi)
+ send_burst_mp_nodrop(p);
+ else
+ send_burst_nodrop(p);
+ }
}
return 0;
}
+static int
+rte_port_ring_writer_nodrop_tx_bulk(void *port,
+ struct rte_mbuf **pkts,
+ uint64_t pkts_mask)
+{
+ return
+ rte_port_ring_writer_nodrop_tx_bulk_internal(port, pkts, pkts_mask, 0);
+}
+
+static int
+rte_port_ring_multi_writer_nodrop_tx_bulk(void *port,
+ struct rte_mbuf **pkts,
+ uint64_t pkts_mask)
+{
+ return
+ rte_port_ring_writer_nodrop_tx_bulk_internal(port, pkts, pkts_mask, 1);
+}
+
static int
rte_port_ring_writer_nodrop_flush(void *port)
{
return 0;
}
+static int
+rte_port_ring_multi_writer_nodrop_flush(void *port)
+{
+ struct rte_port_ring_writer_nodrop *p =
+ (struct rte_port_ring_writer_nodrop *) port;
+
+ if (p->tx_buf_count > 0)
+ send_burst_mp_nodrop(p);
+
+ return 0;
+}
+
static int
rte_port_ring_writer_nodrop_free(void *port)
{
+ struct rte_port_ring_writer_nodrop *p =
+ (struct rte_port_ring_writer_nodrop *) port;
+
if (port == NULL) {
RTE_LOG(ERR, PORT, "%s: Port is NULL\n", __func__);
return -EINVAL;
}
- rte_port_ring_writer_nodrop_flush(port);
+ if (p->is_multi)
+ rte_port_ring_multi_writer_nodrop_flush(port);
+ else
+ rte_port_ring_writer_nodrop_flush(port);
+
rte_free(port);
return 0;
.f_flush = rte_port_ring_writer_nodrop_flush,
.f_stats = rte_port_ring_writer_nodrop_stats_read,
};
+
+struct rte_port_in_ops rte_port_ring_multi_reader_ops = {
+ .f_create = rte_port_ring_multi_reader_create,
+ .f_free = rte_port_ring_reader_free,
+ .f_rx = rte_port_ring_multi_reader_rx,
+ .f_stats = rte_port_ring_reader_stats_read,
+};
+
+struct rte_port_out_ops rte_port_ring_multi_writer_ops = {
+ .f_create = rte_port_ring_multi_writer_create,
+ .f_free = rte_port_ring_writer_free,
+ .f_tx = rte_port_ring_multi_writer_tx,
+ .f_tx_bulk = rte_port_ring_multi_writer_tx_bulk,
+ .f_flush = rte_port_ring_multi_writer_flush,
+ .f_stats = rte_port_ring_writer_stats_read,
+};
+
+struct rte_port_out_ops rte_port_ring_multi_writer_nodrop_ops = {
+ .f_create = rte_port_ring_multi_writer_nodrop_create,
+ .f_free = rte_port_ring_writer_nodrop_free,
+ .f_tx = rte_port_ring_multi_writer_nodrop_tx,
+ .f_tx_bulk = rte_port_ring_multi_writer_nodrop_tx_bulk,
+ .f_flush = rte_port_ring_multi_writer_nodrop_flush,
+ .f_stats = rte_port_ring_writer_nodrop_stats_read,
+};