L2FWD_EVENT_TX_ENQ | L2FWD_EVENT_BURST);
}
+static __rte_always_inline void
+l2fwd_event_vector_fwd(struct l2fwd_resources *rsrc,
+ struct rte_event_vector *vec,
+ const uint64_t timer_period, const uint32_t flags)
+{
+ struct rte_mbuf **mbufs = vec->mbufs;
+ uint16_t i, j;
+
+ rte_prefetch0(rte_pktmbuf_mtod(mbufs[0], void *));
+
+ /* If vector attribute is valid, mbufs will be from same port/queue */
+ if (vec->attr_valid) {
+ vec->port = rsrc->dst_ports[mbufs[0]->port];
+ if (flags & L2FWD_EVENT_TX_DIRECT)
+ vec->queue = 0;
+
+ if (timer_period > 0)
+ __atomic_fetch_add(&rsrc->port_stats[mbufs[0]->port].rx,
+ vec->nb_elem, __ATOMIC_RELAXED);
+
+ for (i = 0, j = 1; i < vec->nb_elem; i++, j++) {
+ if (j < vec->nb_elem)
+ rte_prefetch0(
+ rte_pktmbuf_mtod(mbufs[j], void *));
+
+ if (flags & L2FWD_EVENT_UPDT_MAC)
+ l2fwd_mac_updating(
+ mbufs[i], vec->port,
+ &rsrc->eth_addr[vec->port]);
+ }
+
+ if (timer_period > 0)
+ __atomic_fetch_add(&rsrc->port_stats[vec->port].tx,
+ vec->nb_elem, __ATOMIC_RELAXED);
+ } else {
+ for (i = 0, j = 1; i < vec->nb_elem; i++, j++) {
+ if (timer_period > 0)
+ __atomic_fetch_add(
+ &rsrc->port_stats[mbufs[i]->port].rx, 1,
+ __ATOMIC_RELAXED);
+
+ if (j < vec->nb_elem)
+ rte_prefetch0(
+ rte_pktmbuf_mtod(mbufs[j], void *));
+
+ mbufs[i]->port = rsrc->dst_ports[mbufs[i]->port];
+
+ if (flags & L2FWD_EVENT_UPDT_MAC)
+ l2fwd_mac_updating(
+ mbufs[i], mbufs[i]->port,
+ &rsrc->eth_addr[mbufs[i]->port]);
+
+ if (flags & L2FWD_EVENT_TX_DIRECT)
+ rte_event_eth_tx_adapter_txq_set(mbufs[i], 0);
+
+ if (timer_period > 0)
+ __atomic_fetch_add(
+ &rsrc->port_stats[mbufs[i]->port].tx, 1,
+ __ATOMIC_RELAXED);
+ }
+ }
+}
+
+static __rte_always_inline void
+l2fwd_event_loop_vector(struct l2fwd_resources *rsrc, const uint32_t flags)
+{
+ struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
+ const int port_id = l2fwd_get_free_event_port(evt_rsrc);
+ const uint8_t tx_q_id =
+ evt_rsrc->evq.event_q_id[evt_rsrc->evq.nb_queues - 1];
+ const uint64_t timer_period = rsrc->timer_period;
+ const uint8_t event_d_id = evt_rsrc->event_d_id;
+ const uint8_t deq_len = evt_rsrc->deq_depth;
+ struct rte_event ev[MAX_PKT_BURST];
+ uint16_t nb_rx, nb_tx;
+ uint8_t i;
+
+ if (port_id < 0)
+ return;
+
+ printf("%s(): entering eventdev main loop on lcore %u\n", __func__,
+ rte_lcore_id());
+
+ while (!rsrc->force_quit) {
+ nb_rx = rte_event_dequeue_burst(event_d_id, port_id, ev,
+ deq_len, 0);
+ if (nb_rx == 0)
+ continue;
+
+ for (i = 0; i < nb_rx; i++) {
+ if (flags & L2FWD_EVENT_TX_ENQ) {
+ ev[i].queue_id = tx_q_id;
+ ev[i].op = RTE_EVENT_OP_FORWARD;
+ }
+
+ l2fwd_event_vector_fwd(rsrc, ev[i].vec, timer_period,
+ flags);
+ }
+
+ if (flags & L2FWD_EVENT_TX_ENQ) {
+ nb_tx = rte_event_enqueue_burst(event_d_id, port_id, ev,
+ nb_rx);
+ while (nb_tx < nb_rx && !rsrc->force_quit)
+ nb_tx += rte_event_enqueue_burst(
+ event_d_id, port_id, ev + nb_tx,
+ nb_rx - nb_tx);
+ }
+
+ if (flags & L2FWD_EVENT_TX_DIRECT) {
+ nb_tx = rte_event_eth_tx_adapter_enqueue(
+ event_d_id, port_id, ev, nb_rx, 0);
+ while (nb_tx < nb_rx && !rsrc->force_quit)
+ nb_tx += rte_event_eth_tx_adapter_enqueue(
+ event_d_id, port_id, ev + nb_tx,
+ nb_rx - nb_tx, 0);
+ }
+ }
+}
+
+static void __rte_noinline
+l2fwd_event_main_loop_tx_d_vec(struct l2fwd_resources *rsrc)
+{
+ l2fwd_event_loop_vector(rsrc, L2FWD_EVENT_TX_DIRECT);
+}
+
+static void __rte_noinline
+l2fwd_event_main_loop_tx_d_brst_vec(struct l2fwd_resources *rsrc)
+{
+ l2fwd_event_loop_vector(rsrc, L2FWD_EVENT_TX_DIRECT);
+}
+
+static void __rte_noinline
+l2fwd_event_main_loop_tx_q_vec(struct l2fwd_resources *rsrc)
+{
+ l2fwd_event_loop_vector(rsrc, L2FWD_EVENT_TX_ENQ);
+}
+
+static void __rte_noinline
+l2fwd_event_main_loop_tx_q_brst_vec(struct l2fwd_resources *rsrc)
+{
+ l2fwd_event_loop_vector(rsrc, L2FWD_EVENT_TX_ENQ);
+}
+
+static void __rte_noinline
+l2fwd_event_main_loop_tx_d_mac_vec(struct l2fwd_resources *rsrc)
+{
+ l2fwd_event_loop_vector(rsrc,
+ L2FWD_EVENT_UPDT_MAC | L2FWD_EVENT_TX_DIRECT);
+}
+
+static void __rte_noinline
+l2fwd_event_main_loop_tx_d_brst_mac_vec(struct l2fwd_resources *rsrc)
+{
+ l2fwd_event_loop_vector(rsrc,
+ L2FWD_EVENT_UPDT_MAC | L2FWD_EVENT_TX_DIRECT);
+}
+
+static void __rte_noinline
+l2fwd_event_main_loop_tx_q_mac_vec(struct l2fwd_resources *rsrc)
+{
+ l2fwd_event_loop_vector(rsrc,
+ L2FWD_EVENT_UPDT_MAC | L2FWD_EVENT_TX_ENQ);
+}
+
+static void __rte_noinline
+l2fwd_event_main_loop_tx_q_brst_mac_vec(struct l2fwd_resources *rsrc)
+{
+ l2fwd_event_loop_vector(rsrc,
+ L2FWD_EVENT_UPDT_MAC | L2FWD_EVENT_TX_ENQ);
+}
+
void
l2fwd_event_resource_setup(struct l2fwd_resources *rsrc)
{
/* [MAC_UPDT][TX_MODE][BURST] */
- const event_loop_cb event_loop[2][2][2] = {
- [0][0][0] = l2fwd_event_main_loop_tx_d,
- [0][0][1] = l2fwd_event_main_loop_tx_d_brst,
- [0][1][0] = l2fwd_event_main_loop_tx_q,
- [0][1][1] = l2fwd_event_main_loop_tx_q_brst,
- [1][0][0] = l2fwd_event_main_loop_tx_d_mac,
- [1][0][1] = l2fwd_event_main_loop_tx_d_brst_mac,
- [1][1][0] = l2fwd_event_main_loop_tx_q_mac,
- [1][1][1] = l2fwd_event_main_loop_tx_q_brst_mac,
+ const event_loop_cb event_loop[2][2][2][2] = {
+ [0][0][0][0] = l2fwd_event_main_loop_tx_d,
+ [0][0][0][1] = l2fwd_event_main_loop_tx_d_brst,
+ [0][0][1][0] = l2fwd_event_main_loop_tx_q,
+ [0][0][1][1] = l2fwd_event_main_loop_tx_q_brst,
+ [0][1][0][0] = l2fwd_event_main_loop_tx_d_mac,
+ [0][1][0][1] = l2fwd_event_main_loop_tx_d_brst_mac,
+ [0][1][1][0] = l2fwd_event_main_loop_tx_q_mac,
+ [0][1][1][1] = l2fwd_event_main_loop_tx_q_brst_mac,
+ [1][0][0][0] = l2fwd_event_main_loop_tx_d_vec,
+ [1][0][0][1] = l2fwd_event_main_loop_tx_d_brst_vec,
+ [1][0][1][0] = l2fwd_event_main_loop_tx_q_vec,
+ [1][0][1][1] = l2fwd_event_main_loop_tx_q_brst_vec,
+ [1][1][0][0] = l2fwd_event_main_loop_tx_d_mac_vec,
+ [1][1][0][1] = l2fwd_event_main_loop_tx_d_brst_mac_vec,
+ [1][1][1][0] = l2fwd_event_main_loop_tx_q_mac_vec,
+ [1][1][1][1] = l2fwd_event_main_loop_tx_q_brst_mac_vec,
};
struct l2fwd_event_resources *evt_rsrc;
uint32_t event_queue_cfg;
if (ret < 0)
rte_panic("Error in starting eventdev\n");
- evt_rsrc->ops.l2fwd_event_loop = event_loop
- [rsrc->mac_updating]
- [evt_rsrc->tx_mode_q]
- [evt_rsrc->has_burst];
+ evt_rsrc->ops.l2fwd_event_loop =
+ event_loop[rsrc->evt_vec.enabled][rsrc->mac_updating]
+ [evt_rsrc->tx_mode_q][evt_rsrc->has_burst];
}
" --eventq-sched: Event queue schedule type, ordered, atomic or parallel.\n"
" Default: atomic\n"
" Valid only if --mode=eventdev\n"
+ " --event-vector: Enable event vectorization.\n"
+ " --event-vector-size: Max vector size if event vectorization is enabled.\n"
+ " --event-vector-tmo: Max timeout to form vector in nanoseconds if event vectorization is enabled\n"
" --config: Configure forwarding port pair mapping\n"
" Default: alternate port pairs\n\n",
prgname);
#define CMD_LINE_OPT_MODE "mode"
#define CMD_LINE_OPT_EVENTQ_SCHED "eventq-sched"
#define CMD_LINE_OPT_PORT_PAIR_CONF "config"
+#define CMD_LINE_OPT_ENABLE_VECTOR "event-vector"
+#define CMD_LINE_OPT_VECTOR_SIZE "event-vector-size"
+#define CMD_LINE_OPT_VECTOR_TMO_NS "event-vector-tmo"
enum {
/* long options mapped to a short option */
CMD_LINE_OPT_MODE_NUM,
CMD_LINE_OPT_EVENTQ_SCHED_NUM,
CMD_LINE_OPT_PORT_PAIR_CONF_NUM,
+ CMD_LINE_OPT_ENABLE_VECTOR_NUM,
+ CMD_LINE_OPT_VECTOR_SIZE_NUM,
+ CMD_LINE_OPT_VECTOR_TMO_NS_NUM
};
/* Parse the argument given in the command line of the application */
CMD_LINE_OPT_EVENTQ_SCHED_NUM},
{ CMD_LINE_OPT_PORT_PAIR_CONF, required_argument, NULL,
CMD_LINE_OPT_PORT_PAIR_CONF_NUM},
+ {CMD_LINE_OPT_ENABLE_VECTOR, no_argument, NULL,
+ CMD_LINE_OPT_ENABLE_VECTOR_NUM},
+ {CMD_LINE_OPT_VECTOR_SIZE, required_argument, NULL,
+ CMD_LINE_OPT_VECTOR_SIZE_NUM},
+ {CMD_LINE_OPT_VECTOR_TMO_NS, required_argument, NULL,
+ CMD_LINE_OPT_VECTOR_TMO_NS_NUM},
{NULL, 0, 0, 0}
};
int opt, ret, timer_secs;
return -1;
}
break;
+ case CMD_LINE_OPT_ENABLE_VECTOR_NUM:
+ printf("event vectorization is enabled\n");
+ rsrc->evt_vec.enabled = 1;
+ break;
+ case CMD_LINE_OPT_VECTOR_SIZE_NUM:
+ rsrc->evt_vec.size = strtol(optarg, NULL, 10);
+ break;
+ case CMD_LINE_OPT_VECTOR_TMO_NS_NUM:
+ rsrc->evt_vec.timeout_ns = strtoull(optarg, NULL, 10);
+ break;
/* long options */
case 0:
rsrc->mac_updating = mac_updating;
+ if (rsrc->evt_vec.enabled && !rsrc->evt_vec.size) {
+ rsrc->evt_vec.size = VECTOR_SIZE_DEFAULT;
+ printf("vector size set to default (%" PRIu16 ")\n",
+ rsrc->evt_vec.size);
+ }
+
+ if (rsrc->evt_vec.enabled && !rsrc->evt_vec.timeout_ns) {
+ rsrc->evt_vec.timeout_ns = VECTOR_TMO_NS_DEFAULT;
+ printf("vector timeout set to default (%" PRIu64 " ns)\n",
+ rsrc->evt_vec.timeout_ns);
+ }
+
if (optind >= 0)
argv[optind-1] = prgname;
rte_panic("Cannot init mbuf pool\n");
/* >8 End of creation of mbuf pool. */
+ if (rsrc->evt_vec.enabled) {
+ unsigned int nb_vec, vec_size;
+
+ vec_size = rsrc->evt_vec.size;
+ nb_vec = (nb_mbufs + vec_size - 1) / vec_size;
+ rsrc->evt_vec_pool = rte_event_vector_pool_create(
+ "vector_pool", nb_vec, 0, vec_size, rte_socket_id());
+ if (rsrc->evt_vec_pool == NULL)
+ rte_panic("Cannot init event vector pool\n");
+ }
+
nb_ports_available = l2fwd_event_init_ports(rsrc);
if (!nb_ports_available)
rte_panic("All available ports are disabled. Please set portmask.\n");