[--mode]
[--eventq-sched]
[--event-eth-rxqs]
+ [--event-vector [--event-vector-size SIZE] [--event-vector-tmo NS]]
[-E]
[-L]
* ``--event-eth-rxqs:`` Optional, Number of ethernet RX queues per device. Only valid if --mode=eventdev.
+* ``--event-vector:`` Optional, Enable event vectorization. Only valid if --mode=eventdev.
+
+* ``--event-vector-size:`` Optional, Max vector size if event vectorization is enabled.
+
+* ``--event-vector-tmo:`` Optional, Max timeout to form vector in nanoseconds if event vectorization is enabled.
+
* ``-E:`` Optional, enable exact match,
legacy flag, please use ``--lookup=em`` instead.
#define MEMPOOL_CACHE_SIZE 256
#define MAX_RX_QUEUE_PER_LCORE 16
+#define VECTOR_SIZE_DEFAULT MAX_PKT_BURST
+#define VECTOR_TMO_NS_DEFAULT 1E6 /* 1ms */
/*
* Try to avoid TX buffering if we have at least MAX_TX_BURST packets to send.
*/
lpm_event_main_loop_tx_q(__rte_unused void *dummy);
int
lpm_event_main_loop_tx_q_burst(__rte_unused void *dummy);
+int
+lpm_event_main_loop_tx_d_vector(__rte_unused void *dummy);
+int
+lpm_event_main_loop_tx_d_burst_vector(__rte_unused void *dummy);
+int
+lpm_event_main_loop_tx_q_vector(__rte_unused void *dummy);
+int
+lpm_event_main_loop_tx_q_burst_vector(__rte_unused void *dummy);
int
em_event_main_loop_tx_d(__rte_unused void *dummy);
em_event_main_loop_tx_q(__rte_unused void *dummy);
int
em_event_main_loop_tx_q_burst(__rte_unused void *dummy);
+int
+em_event_main_loop_tx_d_vector(__rte_unused void *dummy);
+int
+em_event_main_loop_tx_d_burst_vector(__rte_unused void *dummy);
+int
+em_event_main_loop_tx_q_vector(__rte_unused void *dummy);
+int
+em_event_main_loop_tx_q_burst_vector(__rte_unused void *dummy);
int
fib_event_main_loop_tx_d(__rte_unused void *dummy);
fib_event_main_loop_tx_q(__rte_unused void *dummy);
int
fib_event_main_loop_tx_q_burst(__rte_unused void *dummy);
+int
+fib_event_main_loop_tx_d_vector(__rte_unused void *dummy);
+int
+fib_event_main_loop_tx_d_burst_vector(__rte_unused void *dummy);
+int
+fib_event_main_loop_tx_q_vector(__rte_unused void *dummy);
+int
+fib_event_main_loop_tx_q_burst_vector(__rte_unused void *dummy);
/* Return ipv4/ipv6 fwd lookup struct for LPM, EM or FIB. */
return 0;
}
+/* Same eventdev loop for single and burst of vector */
+static __rte_always_inline void
+em_event_loop_vector(struct l3fwd_event_resources *evt_rsrc,
+ const uint8_t flags)
+{
+ const int event_p_id = l3fwd_get_free_event_port(evt_rsrc);
+ const uint8_t tx_q_id =
+ evt_rsrc->evq.event_q_id[evt_rsrc->evq.nb_queues - 1];
+ const uint8_t event_d_id = evt_rsrc->event_d_id;
+ const uint16_t deq_len = evt_rsrc->deq_depth;
+ struct rte_event events[MAX_PKT_BURST];
+ struct lcore_conf *lconf;
+ unsigned int lcore_id;
+ int i, nb_enq, nb_deq;
+
+ if (event_p_id < 0)
+ return;
+
+ lcore_id = rte_lcore_id();
+ lconf = &lcore_conf[lcore_id];
+
+ RTE_LOG(INFO, L3FWD, "entering %s on lcore %u\n", __func__, lcore_id);
+
+ while (!force_quit) {
+ /* Read events from RX queues */
+ nb_deq = rte_event_dequeue_burst(event_d_id, event_p_id, events,
+ deq_len, 0);
+ if (nb_deq == 0) {
+ rte_pause();
+ continue;
+ }
+
+ for (i = 0; i < nb_deq; i++) {
+ if (flags & L3FWD_EVENT_TX_ENQ) {
+ events[i].queue_id = tx_q_id;
+ events[i].op = RTE_EVENT_OP_FORWARD;
+ }
+
+#if defined RTE_ARCH_X86 || defined __ARM_NEON
+ l3fwd_em_process_event_vector(events[i].vec, lconf);
+#else
+ l3fwd_em_no_opt_process_event_vector(events[i].vec,
+ lconf);
+#endif
+ if (flags & L3FWD_EVENT_TX_DIRECT)
+ event_vector_txq_set(events[i].vec, 0);
+ }
+
+ if (flags & L3FWD_EVENT_TX_ENQ) {
+ nb_enq = rte_event_enqueue_burst(event_d_id, event_p_id,
+ events, nb_deq);
+ while (nb_enq < nb_deq && !force_quit)
+ nb_enq += rte_event_enqueue_burst(
+ event_d_id, event_p_id, events + nb_enq,
+ nb_deq - nb_enq);
+ }
+
+ if (flags & L3FWD_EVENT_TX_DIRECT) {
+ nb_enq = rte_event_eth_tx_adapter_enqueue(
+ event_d_id, event_p_id, events, nb_deq, 0);
+ while (nb_enq < nb_deq && !force_quit)
+ nb_enq += rte_event_eth_tx_adapter_enqueue(
+ event_d_id, event_p_id, events + nb_enq,
+ nb_deq - nb_enq, 0);
+ }
+ }
+}
+
+int __rte_noinline
+em_event_main_loop_tx_d_vector(__rte_unused void *dummy)
+{
+ struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
+
+ em_event_loop_vector(evt_rsrc, L3FWD_EVENT_TX_DIRECT);
+ return 0;
+}
+
+int __rte_noinline
+em_event_main_loop_tx_d_burst_vector(__rte_unused void *dummy)
+{
+ struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
+
+ em_event_loop_vector(evt_rsrc, L3FWD_EVENT_TX_DIRECT);
+ return 0;
+}
+
+int __rte_noinline
+em_event_main_loop_tx_q_vector(__rte_unused void *dummy)
+{
+ struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
+
+ em_event_loop_vector(evt_rsrc, L3FWD_EVENT_TX_ENQ);
+ return 0;
+}
+
+int __rte_noinline
+em_event_main_loop_tx_q_burst_vector(__rte_unused void *dummy)
+{
+ struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
+
+ em_event_loop_vector(evt_rsrc, L3FWD_EVENT_TX_ENQ);
+ return 0;
+}
+
/* Initialize exact match (hash) parameters. 8< */
void
setup_hash(const int socketid)
l3fwd_em_simple_process(events[j]->mbuf, qconf);
}
+static inline void
+l3fwd_em_no_opt_process_event_vector(struct rte_event_vector *vec,
+ struct lcore_conf *qconf)
+{
+ struct rte_mbuf **mbufs = vec->mbufs;
+ int32_t i;
+
+ /* Prefetch first packets */
+ for (i = 0; i < PREFETCH_OFFSET && i < vec->nb_elem; i++)
+ rte_prefetch0(rte_pktmbuf_mtod(mbufs[i], void *));
+
+ /* Process first packet to init vector attributes */
+ l3fwd_em_simple_process(mbufs[0], qconf);
+ if (vec->attr_valid) {
+ if (mbufs[0]->port != BAD_PORT)
+ vec->port = mbufs[0]->port;
+ else
+ vec->attr_valid = 0;
+ }
+
+ /*
+ * Prefetch and forward already prefetched packets.
+ */
+ for (i = 1; i < (vec->nb_elem - PREFETCH_OFFSET); i++) {
+ rte_prefetch0(
+ rte_pktmbuf_mtod(mbufs[i + PREFETCH_OFFSET], void *));
+ l3fwd_em_simple_process(mbufs[i], qconf);
+ event_vector_attr_validate(vec, mbufs[i]);
+ }
+
+ /* Forward remaining prefetched packets */
+ for (; i < vec->nb_elem; i++) {
+ l3fwd_em_simple_process(mbufs[i], qconf);
+ event_vector_attr_validate(vec, mbufs[i]);
+ }
+}
+
#endif /* __L3FWD_EM_H__ */
process_packet(pkts_burst[j], &pkts_burst[j]->port);
}
}
+
+static inline void
+l3fwd_em_process_event_vector(struct rte_event_vector *vec,
+ struct lcore_conf *qconf)
+{
+ struct rte_mbuf **mbufs = vec->mbufs;
+ uint16_t dst_port[MAX_PKT_BURST];
+ int32_t i, j, n, pos;
+
+ for (j = 0; j < EM_HASH_LOOKUP_COUNT && j < vec->nb_elem; j++)
+ rte_prefetch0(
+ rte_pktmbuf_mtod(mbufs[j], struct rte_ether_hdr *) + 1);
+
+ if (vec->attr_valid)
+ vec->port = em_get_dst_port(qconf, mbufs[0], mbufs[0]->port);
+
+ n = RTE_ALIGN_FLOOR(vec->nb_elem, EM_HASH_LOOKUP_COUNT);
+ for (j = 0; j < n; j += EM_HASH_LOOKUP_COUNT) {
+ uint32_t pkt_type =
+ RTE_PTYPE_L3_MASK | RTE_PTYPE_L4_TCP | RTE_PTYPE_L4_UDP;
+ uint32_t l3_type, tcp_or_udp;
+
+ for (i = 0; i < EM_HASH_LOOKUP_COUNT; i++)
+ pkt_type &= mbufs[j + i]->packet_type;
+
+ l3_type = pkt_type & RTE_PTYPE_L3_MASK;
+ tcp_or_udp = pkt_type & (RTE_PTYPE_L4_TCP | RTE_PTYPE_L4_UDP);
+
+ for (i = 0, pos = j + EM_HASH_LOOKUP_COUNT;
+ i < EM_HASH_LOOKUP_COUNT && pos < vec->nb_elem;
+ i++, pos++) {
+ rte_prefetch0(rte_pktmbuf_mtod(mbufs[pos],
+ struct rte_ether_hdr *) +
+ 1);
+ }
+
+ if (tcp_or_udp && (l3_type == RTE_PTYPE_L3_IPV4)) {
+ em_get_dst_port_ipv4xN_events(qconf, &mbufs[j],
+ &dst_port[j]);
+ } else if (tcp_or_udp && (l3_type == RTE_PTYPE_L3_IPV6)) {
+ em_get_dst_port_ipv6xN_events(qconf, &mbufs[j],
+ &dst_port[j]);
+ } else {
+ for (i = 0; i < EM_HASH_LOOKUP_COUNT; i++) {
+ mbufs[j + i]->port =
+ em_get_dst_port(qconf, mbufs[j + i],
+ mbufs[j + i]->port);
+ process_packet(mbufs[j + i],
+ &mbufs[j + i]->port);
+ event_vector_attr_validate(vec, mbufs[j + i]);
+ }
+ continue;
+ }
+ processx4_step3(&mbufs[j], &dst_port[j]);
+
+ for (i = 0; i < EM_HASH_LOOKUP_COUNT; i++) {
+ mbufs[j + i]->port = dst_port[j + i];
+ event_vector_attr_validate(vec, mbufs[j + i]);
+ }
+ }
+
+ for (; j < vec->nb_elem; j++) {
+ mbufs[j]->port =
+ em_get_dst_port(qconf, mbufs[j], mbufs[j]->port);
+ process_packet(mbufs[j], &mbufs[j]->port);
+ event_vector_attr_validate(vec, mbufs[j]);
+ }
+}
+
#endif /* __L3FWD_EM_HLM_H__ */
process_packet(mbuf, &mbuf->port);
}
}
+
+static inline void
+l3fwd_em_process_event_vector(struct rte_event_vector *vec,
+ struct lcore_conf *qconf)
+{
+ struct rte_mbuf **mbufs = vec->mbufs;
+ int32_t i, j;
+
+ rte_prefetch0(rte_pktmbuf_mtod(mbufs[0], struct rte_ether_hdr *) + 1);
+
+ if (vec->attr_valid)
+ vec->port = em_get_dst_port(qconf, mbufs[0], mbufs[0]->port);
+
+ for (i = 0, j = 1; i < vec->nb_elem; i++, j++) {
+ if (j < vec->nb_elem)
+ rte_prefetch0(rte_pktmbuf_mtod(mbufs[j],
+ struct rte_ether_hdr *) +
+ 1);
+ mbufs[i]->port =
+ em_get_dst_port(qconf, mbufs[i], mbufs[i]->port);
+ process_packet(mbufs[i], &mbufs[i]->port);
+ event_vector_attr_validate(vec, mbufs[i]);
+ }
+}
+
#endif /* __L3FWD_EM_SEQUENTIAL_H__ */
l3fwd_event_resource_setup(struct rte_eth_conf *port_conf)
{
struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
- const event_loop_cb lpm_event_loop[2][2] = {
- [0][0] = lpm_event_main_loop_tx_d,
- [0][1] = lpm_event_main_loop_tx_d_burst,
- [1][0] = lpm_event_main_loop_tx_q,
- [1][1] = lpm_event_main_loop_tx_q_burst,
+ const event_loop_cb lpm_event_loop[2][2][2] = {
+ [0][0][0] = lpm_event_main_loop_tx_d,
+ [0][0][1] = lpm_event_main_loop_tx_d_burst,
+ [0][1][0] = lpm_event_main_loop_tx_q,
+ [0][1][1] = lpm_event_main_loop_tx_q_burst,
+ [1][0][0] = lpm_event_main_loop_tx_d_vector,
+ [1][0][1] = lpm_event_main_loop_tx_d_burst_vector,
+ [1][1][0] = lpm_event_main_loop_tx_q_vector,
+ [1][1][1] = lpm_event_main_loop_tx_q_burst_vector,
};
- const event_loop_cb em_event_loop[2][2] = {
- [0][0] = em_event_main_loop_tx_d,
- [0][1] = em_event_main_loop_tx_d_burst,
- [1][0] = em_event_main_loop_tx_q,
- [1][1] = em_event_main_loop_tx_q_burst,
+ const event_loop_cb em_event_loop[2][2][2] = {
+ [0][0][0] = em_event_main_loop_tx_d,
+ [0][0][1] = em_event_main_loop_tx_d_burst,
+ [0][1][0] = em_event_main_loop_tx_q,
+ [0][1][1] = em_event_main_loop_tx_q_burst,
+ [1][0][0] = em_event_main_loop_tx_d_vector,
+ [1][0][1] = em_event_main_loop_tx_d_burst_vector,
+ [1][1][0] = em_event_main_loop_tx_q_vector,
+ [1][1][1] = em_event_main_loop_tx_q_burst_vector,
};
- const event_loop_cb fib_event_loop[2][2] = {
- [0][0] = fib_event_main_loop_tx_d,
- [0][1] = fib_event_main_loop_tx_d_burst,
- [1][0] = fib_event_main_loop_tx_q,
- [1][1] = fib_event_main_loop_tx_q_burst,
+ const event_loop_cb fib_event_loop[2][2][2] = {
+ [0][0][0] = fib_event_main_loop_tx_d,
+ [0][0][1] = fib_event_main_loop_tx_d_burst,
+ [0][1][0] = fib_event_main_loop_tx_q,
+ [0][1][1] = fib_event_main_loop_tx_q_burst,
+ [1][0][0] = fib_event_main_loop_tx_d_vector,
+ [1][0][1] = fib_event_main_loop_tx_d_burst_vector,
+ [1][1][0] = fib_event_main_loop_tx_q_vector,
+ [1][1][1] = fib_event_main_loop_tx_q_burst_vector,
};
uint32_t event_queue_cfg;
int ret;
if (ret < 0)
rte_exit(EXIT_FAILURE, "Error in starting eventdev");
- evt_rsrc->ops.lpm_event_loop = lpm_event_loop[evt_rsrc->tx_mode_q]
- [evt_rsrc->has_burst];
+ evt_rsrc->ops.lpm_event_loop =
+ lpm_event_loop[evt_rsrc->vector_enabled][evt_rsrc->tx_mode_q]
+ [evt_rsrc->has_burst];
- evt_rsrc->ops.em_event_loop = em_event_loop[evt_rsrc->tx_mode_q]
- [evt_rsrc->has_burst];
+ evt_rsrc->ops.em_event_loop =
+ em_event_loop[evt_rsrc->vector_enabled][evt_rsrc->tx_mode_q]
+ [evt_rsrc->has_burst];
- evt_rsrc->ops.fib_event_loop = fib_event_loop[evt_rsrc->tx_mode_q]
- [evt_rsrc->has_burst];
+ evt_rsrc->ops.fib_event_loop =
+ fib_event_loop[evt_rsrc->vector_enabled][evt_rsrc->tx_mode_q]
+ [evt_rsrc->has_burst];
}
uint8_t disable_implicit_release;
struct l3fwd_event_setup_ops ops;
struct rte_mempool * (*pkt_pool)[NB_SOCKETS];
+ struct rte_mempool **vec_pool;
struct l3fwd_event_queues evq;
struct l3fwd_event_ports evp;
uint32_t port_mask;
uint8_t has_burst;
uint8_t enabled;
uint8_t eth_rx_queues;
+ uint8_t vector_enabled;
+ uint16_t vector_size;
+ uint64_t vector_tmo_ns;
};
+static inline void
+event_vector_attr_validate(struct rte_event_vector *vec, struct rte_mbuf *mbuf)
+{
+ /* l3fwd application only changes mbuf port while processing */
+ if (vec->attr_valid && (vec->port != mbuf->port))
+ vec->attr_valid = 0;
+}
+
+static inline void
+event_vector_txq_set(struct rte_event_vector *vec, uint16_t txq)
+{
+ if (vec->attr_valid) {
+ vec->queue = txq;
+ } else {
+ int i;
+
+ for (i = 0; i < vec->nb_elem; i++)
+ rte_event_eth_tx_adapter_txq_set(vec->mbufs[i], txq);
+ }
+}
+
struct l3fwd_event_resources *l3fwd_get_eventdev_rsrc(void);
void l3fwd_event_resource_setup(struct rte_eth_conf *port_conf);
int l3fwd_get_free_event_port(struct l3fwd_event_resources *eventdev_rsrc);
rte_panic("Failed to allocate memory for Rx adapter\n");
}
-
RTE_ETH_FOREACH_DEV(port_id) {
if ((evt_rsrc->port_mask & (1 << port_id)) == 0)
continue;
+
+ if (evt_rsrc->vector_enabled) {
+ uint32_t cap;
+
+ if (rte_event_eth_rx_adapter_caps_get(event_d_id,
+ port_id, &cap))
+ rte_panic(
+ "Failed to get event rx adapter capability");
+
+ if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR) {
+ eth_q_conf.vector_sz = evt_rsrc->vector_size;
+ eth_q_conf.vector_timeout_ns =
+ evt_rsrc->vector_tmo_ns;
+ eth_q_conf.vector_mp =
+ evt_rsrc->per_port_pool ?
+ evt_rsrc->vec_pool[port_id] :
+ evt_rsrc->vec_pool[0];
+ eth_q_conf.rx_queue_flags |=
+ RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR;
+ } else {
+ rte_panic(
+ "Rx adapter doesn't support event vector");
+ }
+ }
+
ret = rte_event_eth_rx_adapter_create(adapter_id, event_d_id,
- &evt_rsrc->def_p_conf);
+ &evt_rsrc->def_p_conf);
if (ret)
rte_panic("Failed to create rx adapter[%d]\n",
adapter_id);
return 0;
}
+static __rte_always_inline void
+fib_process_event_vector(struct rte_event_vector *vec)
+{
+ uint8_t ipv6_arr[MAX_PKT_BURST][RTE_FIB6_IPV6_ADDR_SIZE];
+ uint64_t hopsv4[MAX_PKT_BURST], hopsv6[MAX_PKT_BURST];
+ uint32_t ipv4_arr_assem, ipv6_arr_assem;
+ struct rte_mbuf **mbufs = vec->mbufs;
+ uint32_t ipv4_arr[MAX_PKT_BURST];
+ uint8_t type_arr[MAX_PKT_BURST];
+ uint32_t ipv4_cnt, ipv6_cnt;
+ struct lcore_conf *lconf;
+ uint16_t nh;
+ int i;
+
+ lconf = &lcore_conf[rte_lcore_id()];
+
+ /* Reset counters. */
+ ipv4_cnt = 0;
+ ipv6_cnt = 0;
+ ipv4_arr_assem = 0;
+ ipv6_arr_assem = 0;
+
+ /* Prefetch first packets. */
+ for (i = 0; i < FIB_PREFETCH_OFFSET && i < vec->nb_elem; i++)
+ rte_prefetch0(rte_pktmbuf_mtod(mbufs[i], void *));
+
+ /* Parse packet info and prefetch. */
+ for (i = 0; i < (vec->nb_elem - FIB_PREFETCH_OFFSET); i++) {
+ rte_prefetch0(rte_pktmbuf_mtod(mbufs[i + FIB_PREFETCH_OFFSET],
+ void *));
+ fib_parse_packet(mbufs[i], &ipv4_arr[ipv4_cnt], &ipv4_cnt,
+ ipv6_arr[ipv6_cnt], &ipv6_cnt, &type_arr[i]);
+ }
+
+ /* Parse remaining packet info. */
+ for (; i < vec->nb_elem; i++)
+ fib_parse_packet(mbufs[i], &ipv4_arr[ipv4_cnt], &ipv4_cnt,
+ ipv6_arr[ipv6_cnt], &ipv6_cnt, &type_arr[i]);
+
+ /* Lookup IPv4 hops if IPv4 packets are present. */
+ if (likely(ipv4_cnt > 0))
+ rte_fib_lookup_bulk(lconf->ipv4_lookup_struct, ipv4_arr, hopsv4,
+ ipv4_cnt);
+
+ /* Lookup IPv6 hops if IPv6 packets are present. */
+ if (ipv6_cnt > 0)
+ rte_fib6_lookup_bulk(lconf->ipv6_lookup_struct, ipv6_arr,
+ hopsv6, ipv6_cnt);
+
+ if (vec->attr_valid) {
+ nh = type_arr[0] ? (uint16_t)hopsv4[0] : (uint16_t)hopsv6[0];
+ if (nh != FIB_DEFAULT_HOP)
+ vec->port = nh;
+ else
+ vec->attr_valid = 0;
+ }
+
+ /* Assign ports looked up in fib depending on IPv4 or IPv6 */
+ for (i = 0; i < vec->nb_elem; i++) {
+ if (type_arr[i])
+ nh = (uint16_t)hopsv4[ipv4_arr_assem++];
+ else
+ nh = (uint16_t)hopsv6[ipv6_arr_assem++];
+ if (nh != FIB_DEFAULT_HOP)
+ mbufs[i]->port = nh;
+ event_vector_attr_validate(vec, mbufs[i]);
+ }
+}
+
+static __rte_always_inline void
+fib_event_loop_vector(struct l3fwd_event_resources *evt_rsrc,
+ const uint8_t flags)
+{
+ const int event_p_id = l3fwd_get_free_event_port(evt_rsrc);
+ const uint8_t tx_q_id =
+ evt_rsrc->evq.event_q_id[evt_rsrc->evq.nb_queues - 1];
+ const uint8_t event_d_id = evt_rsrc->event_d_id;
+ const uint16_t deq_len = evt_rsrc->deq_depth;
+ struct rte_event events[MAX_PKT_BURST];
+ int nb_enq, nb_deq, i;
+
+ if (event_p_id < 0)
+ return;
+
+ RTE_LOG(INFO, L3FWD, "entering %s on lcore %u\n", __func__,
+ rte_lcore_id());
+
+ while (!force_quit) {
+ /* Read events from RX queues. */
+ nb_deq = rte_event_dequeue_burst(event_d_id, event_p_id, events,
+ deq_len, 0);
+ if (nb_deq == 0) {
+ rte_pause();
+ continue;
+ }
+
+ for (i = 0; i < nb_deq; i++) {
+ if (flags & L3FWD_EVENT_TX_ENQ) {
+ events[i].queue_id = tx_q_id;
+ events[i].op = RTE_EVENT_OP_FORWARD;
+ }
+
+ fib_process_event_vector(events[i].vec);
+
+ if (flags & L3FWD_EVENT_TX_DIRECT)
+ event_vector_txq_set(events[i].vec, 0);
+ }
+
+ if (flags & L3FWD_EVENT_TX_ENQ) {
+ nb_enq = rte_event_enqueue_burst(event_d_id, event_p_id,
+ events, nb_deq);
+ while (nb_enq < nb_deq && !force_quit)
+ nb_enq += rte_event_enqueue_burst(
+ event_d_id, event_p_id, events + nb_enq,
+ nb_deq - nb_enq);
+ }
+
+ if (flags & L3FWD_EVENT_TX_DIRECT) {
+ nb_enq = rte_event_eth_tx_adapter_enqueue(
+ event_d_id, event_p_id, events, nb_deq, 0);
+ while (nb_enq < nb_deq && !force_quit)
+ nb_enq += rte_event_eth_tx_adapter_enqueue(
+ event_d_id, event_p_id, events + nb_enq,
+ nb_deq - nb_enq, 0);
+ }
+ }
+}
+
+int __rte_noinline
+fib_event_main_loop_tx_d_vector(__rte_unused void *dummy)
+{
+ struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
+
+ fib_event_loop_vector(evt_rsrc, L3FWD_EVENT_TX_DIRECT);
+ return 0;
+}
+
+int __rte_noinline
+fib_event_main_loop_tx_d_burst_vector(__rte_unused void *dummy)
+{
+ struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
+
+ fib_event_loop_vector(evt_rsrc, L3FWD_EVENT_TX_DIRECT);
+ return 0;
+}
+
+int __rte_noinline
+fib_event_main_loop_tx_q_vector(__rte_unused void *dummy)
+{
+ struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
+
+ fib_event_loop_vector(evt_rsrc, L3FWD_EVENT_TX_ENQ);
+ return 0;
+}
+
+int __rte_noinline
+fib_event_main_loop_tx_q_burst_vector(__rte_unused void *dummy)
+{
+ struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
+
+ fib_event_loop_vector(evt_rsrc, L3FWD_EVENT_TX_ENQ);
+ return 0;
+}
+
/* Function to setup fib. 8< */
void
setup_fib(const int socketid)
return 0;
}
+static __rte_always_inline void
+lpm_process_event_vector(struct rte_event_vector *vec, struct lcore_conf *lconf)
+{
+ struct rte_mbuf **mbufs = vec->mbufs;
+ int i;
+
+ /* Process first packet to init vector attributes */
+ lpm_process_event_pkt(lconf, mbufs[0]);
+ if (vec->attr_valid) {
+ if (mbufs[0]->port != BAD_PORT)
+ vec->port = mbufs[0]->port;
+ else
+ vec->attr_valid = 0;
+ }
+
+ for (i = 1; i < vec->nb_elem; i++) {
+ lpm_process_event_pkt(lconf, mbufs[i]);
+ event_vector_attr_validate(vec, mbufs[i]);
+ }
+}
+
+/* Same eventdev loop for single and burst of vector */
+static __rte_always_inline void
+lpm_event_loop_vector(struct l3fwd_event_resources *evt_rsrc,
+ const uint8_t flags)
+{
+ const int event_p_id = l3fwd_get_free_event_port(evt_rsrc);
+ const uint8_t tx_q_id =
+ evt_rsrc->evq.event_q_id[evt_rsrc->evq.nb_queues - 1];
+ const uint8_t event_d_id = evt_rsrc->event_d_id;
+ const uint16_t deq_len = evt_rsrc->deq_depth;
+ struct rte_event events[MAX_PKT_BURST];
+ struct lcore_conf *lconf;
+ unsigned int lcore_id;
+ int i, nb_enq, nb_deq;
+
+ if (event_p_id < 0)
+ return;
+
+ lcore_id = rte_lcore_id();
+ lconf = &lcore_conf[lcore_id];
+
+ RTE_LOG(INFO, L3FWD, "entering %s on lcore %u\n", __func__, lcore_id);
+
+ while (!force_quit) {
+ /* Read events from RX queues */
+ nb_deq = rte_event_dequeue_burst(event_d_id, event_p_id, events,
+ deq_len, 0);
+ if (nb_deq == 0) {
+ rte_pause();
+ continue;
+ }
+
+ for (i = 0; i < nb_deq; i++) {
+ if (flags & L3FWD_EVENT_TX_ENQ) {
+ events[i].queue_id = tx_q_id;
+ events[i].op = RTE_EVENT_OP_FORWARD;
+ }
+
+ lpm_process_event_vector(events[i].vec, lconf);
+
+ if (flags & L3FWD_EVENT_TX_DIRECT)
+ event_vector_txq_set(events[i].vec, 0);
+ }
+
+ if (flags & L3FWD_EVENT_TX_ENQ) {
+ nb_enq = rte_event_enqueue_burst(event_d_id, event_p_id,
+ events, nb_deq);
+ while (nb_enq < nb_deq && !force_quit)
+ nb_enq += rte_event_enqueue_burst(
+ event_d_id, event_p_id, events + nb_enq,
+ nb_deq - nb_enq);
+ }
+
+ if (flags & L3FWD_EVENT_TX_DIRECT) {
+ nb_enq = rte_event_eth_tx_adapter_enqueue(
+ event_d_id, event_p_id, events, nb_deq, 0);
+ while (nb_enq < nb_deq && !force_quit)
+ nb_enq += rte_event_eth_tx_adapter_enqueue(
+ event_d_id, event_p_id, events + nb_enq,
+ nb_deq - nb_enq, 0);
+ }
+ }
+}
+
+int __rte_noinline
+lpm_event_main_loop_tx_d_vector(__rte_unused void *dummy)
+{
+ struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
+
+ lpm_event_loop_vector(evt_rsrc, L3FWD_EVENT_TX_DIRECT);
+ return 0;
+}
+
+int __rte_noinline
+lpm_event_main_loop_tx_d_burst_vector(__rte_unused void *dummy)
+{
+ struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
+
+ lpm_event_loop_vector(evt_rsrc, L3FWD_EVENT_TX_DIRECT);
+ return 0;
+}
+
+int __rte_noinline
+lpm_event_main_loop_tx_q_vector(__rte_unused void *dummy)
+{
+ struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
+
+ lpm_event_loop_vector(evt_rsrc, L3FWD_EVENT_TX_ENQ);
+ return 0;
+}
+
+int __rte_noinline
+lpm_event_main_loop_tx_q_burst_vector(__rte_unused void *dummy)
+{
+ struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
+
+ lpm_event_loop_vector(evt_rsrc, L3FWD_EVENT_TX_ENQ);
+ return 0;
+}
+
void
setup_lpm(const int socketid)
{
static uint32_t max_pkt_len;
static struct rte_mempool *pktmbuf_pool[RTE_MAX_ETHPORTS][NB_SOCKETS];
+static struct rte_mempool *vector_pool[RTE_MAX_ETHPORTS];
static uint8_t lkp_per_socket[NB_SOCKETS];
struct l3fwd_lkp_mode {
" [--per-port-pool]"
" [--mode]"
" [--eventq-sched]"
+ " [--event-vector [--event-vector-size SIZE] [--event-vector-tmo NS]]"
" [-E]"
" [-L]\n\n"
" --event-eth-rxqs: Number of ethernet RX queues per device.\n"
" Default: 1\n"
" Valid only if --mode=eventdev\n"
+ " --event-vector: Enable event vectorization.\n"
+ " --event-vector-size: Max vector size if event vectorization is enabled.\n"
+ " --event-vector-tmo: Max timeout to form vector in nanoseconds if event vectorization is enabled\n"
" -E : Enable exact match, legacy flag please use --lookup=em instead\n"
" -L : Enable longest prefix match, legacy flag please use --lookup=lpm instead\n\n",
prgname);
#define CMD_LINE_OPT_EVENTQ_SYNC "eventq-sched"
#define CMD_LINE_OPT_EVENT_ETH_RX_QUEUES "event-eth-rxqs"
#define CMD_LINE_OPT_LOOKUP "lookup"
+#define CMD_LINE_OPT_ENABLE_VECTOR "event-vector"
+#define CMD_LINE_OPT_VECTOR_SIZE "event-vector-size"
+#define CMD_LINE_OPT_VECTOR_TMO_NS "event-vector-tmo"
+
enum {
/* long options mapped to a short option */
CMD_LINE_OPT_EVENTQ_SYNC_NUM,
CMD_LINE_OPT_EVENT_ETH_RX_QUEUES_NUM,
CMD_LINE_OPT_LOOKUP_NUM,
+ CMD_LINE_OPT_ENABLE_VECTOR_NUM,
+ CMD_LINE_OPT_VECTOR_SIZE_NUM,
+ CMD_LINE_OPT_VECTOR_TMO_NS_NUM
};
static const struct option lgopts[] = {
{CMD_LINE_OPT_EVENT_ETH_RX_QUEUES, 1, 0,
CMD_LINE_OPT_EVENT_ETH_RX_QUEUES_NUM},
{CMD_LINE_OPT_LOOKUP, 1, 0, CMD_LINE_OPT_LOOKUP_NUM},
+ {CMD_LINE_OPT_ENABLE_VECTOR, 0, 0, CMD_LINE_OPT_ENABLE_VECTOR_NUM},
+ {CMD_LINE_OPT_VECTOR_SIZE, 1, 0, CMD_LINE_OPT_VECTOR_SIZE_NUM},
+ {CMD_LINE_OPT_VECTOR_TMO_NS, 1, 0, CMD_LINE_OPT_VECTOR_TMO_NS_NUM},
{NULL, 0, 0, 0}
};
return -1;
break;
+ case CMD_LINE_OPT_ENABLE_VECTOR_NUM:
+ printf("event vectorization is enabled\n");
+ evt_rsrc->vector_enabled = 1;
+ break;
+ case CMD_LINE_OPT_VECTOR_SIZE_NUM:
+ evt_rsrc->vector_size = strtol(optarg, NULL, 10);
+ break;
+ case CMD_LINE_OPT_VECTOR_TMO_NS_NUM:
+ evt_rsrc->vector_tmo_ns = strtoull(optarg, NULL, 10);
+ break;
default:
print_usage(prgname);
return -1;
return -1;
}
+ if (evt_rsrc->vector_enabled && !evt_rsrc->vector_size) {
+ evt_rsrc->vector_size = VECTOR_SIZE_DEFAULT;
+ fprintf(stderr, "vector size set to default (%" PRIu16 ")\n",
+ evt_rsrc->vector_size);
+ }
+
+ if (evt_rsrc->vector_enabled && !evt_rsrc->vector_tmo_ns) {
+ evt_rsrc->vector_tmo_ns = VECTOR_TMO_NS_DEFAULT;
+ fprintf(stderr,
+ "vector timeout set to default (%" PRIu64 " ns)\n",
+ evt_rsrc->vector_tmo_ns);
+ }
+
/*
* Nothing is selected, pick longest-prefix match
* as default match.
int
init_mem(uint16_t portid, unsigned int nb_mbuf)
{
+ struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
struct lcore_conf *qconf;
int socketid;
unsigned lcore_id;
lkp_per_socket[socketid] = 1;
}
}
+
+ if (evt_rsrc->vector_enabled && vector_pool[portid] == NULL) {
+ unsigned int nb_vec;
+
+ nb_vec = (nb_mbuf + evt_rsrc->vector_size - 1) /
+ evt_rsrc->vector_size;
+ snprintf(s, sizeof(s), "vector_pool_%d", portid);
+ vector_pool[portid] = rte_event_vector_pool_create(
+ s, nb_vec, 0, evt_rsrc->vector_size, socketid);
+ if (vector_pool[portid] == NULL)
+ rte_exit(EXIT_FAILURE,
+ "Failed to create vector pool for port %d\n",
+ portid);
+ else
+ printf("Allocated vector pool for port %d\n",
+ portid);
+ }
+
qconf = &lcore_conf[lcore_id];
qconf->ipv4_lookup_struct =
l3fwd_lkp.get_ipv4_lookup_struct(socketid);
evt_rsrc->per_port_pool = per_port_pool;
evt_rsrc->pkt_pool = pktmbuf_pool;
+ evt_rsrc->vec_pool = vector_pool;
evt_rsrc->port_mask = enabled_port_mask;
/* Configure eventdev parameters if user has requested */
if (evt_rsrc->enabled) {