X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=examples%2Fl3fwd%2Fl3fwd_em.c;h=2a8ab6aab51e9c0ba8d8d6ac5f5d775687df6698;hb=4777674c4428f528fa2890d8afab551f6dd54c24;hp=0adf8f4bfb28b76edeb4db049af72291d1c3d44f;hpb=64d3955de1de4d7879a0930a6d2f501369d3445a;p=dpdk.git diff --git a/examples/l3fwd/l3fwd_em.c b/examples/l3fwd/l3fwd_em.c index 0adf8f4bfb..2a8ab6aab5 100644 --- a/examples/l3fwd/l3fwd_em.c +++ b/examples/l3fwd/l3fwd_em.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2016 Intel Corporation */ #include @@ -42,12 +13,11 @@ #include #include #include +#include #include #include #include -#include -#include #include #include #include @@ -56,14 +26,19 @@ #include #include "l3fwd.h" +#include "l3fwd_event.h" -#ifdef RTE_MACHINE_CPUFLAG_SSE4_2 +#if defined(RTE_ARCH_X86) || defined(__ARM_FEATURE_CRC32) +#define EM_HASH_CRC 1 +#endif + +#ifdef EM_HASH_CRC #include #define DEFAULT_HASH_FUNC rte_hash_crc #else #include #define DEFAULT_HASH_FUNC rte_jhash -#endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */ +#endif #define IPV6_ADDR_LEN 16 @@ -73,7 +48,7 @@ struct ipv4_5tuple { uint16_t port_dst; uint16_t port_src; uint8_t proto; -} __attribute__((__packed__)); +} __rte_packed; union ipv4_5tuple_host { struct { @@ -96,7 +71,7 @@ struct ipv6_5tuple { uint16_t port_dst; uint16_t port_src; uint8_t proto; -} __attribute__((__packed__)); +} __rte_packed; union ipv6_5tuple_host { struct { @@ -125,10 +100,10 @@ struct ipv6_l3fwd_em_route { }; static struct ipv4_l3fwd_em_route ipv4_l3fwd_em_route_array[] = { - {{IPv4(101, 0, 0, 0), IPv4(100, 10, 0, 1), 101, 11, IPPROTO_TCP}, 0}, - {{IPv4(201, 0, 0, 0), IPv4(200, 20, 0, 1), 102, 12, IPPROTO_TCP}, 1}, - {{IPv4(111, 0, 0, 0), IPv4(100, 30, 0, 1), 101, 11, IPPROTO_TCP}, 2}, - {{IPv4(211, 0, 0, 0), IPv4(200, 40, 0, 1), 102, 12, IPPROTO_TCP}, 3}, + {{RTE_IPV4(101, 0, 0, 0), RTE_IPV4(100, 10, 0, 1), 101, 11, IPPROTO_TCP}, 0}, + {{RTE_IPV4(201, 0, 0, 0), RTE_IPV4(200, 20, 0, 1), 102, 12, IPPROTO_TCP}, 1}, + {{RTE_IPV4(111, 0, 0, 0), RTE_IPV4(100, 30, 0, 1), 101, 11, IPPROTO_TCP}, 2}, + {{RTE_IPV4(211, 0, 0, 0), RTE_IPV4(200, 40, 0, 1), 102, 12, IPPROTO_TCP}, 3}, }; static struct ipv6_l3fwd_em_route ipv6_l3fwd_em_route_array[] = { @@ -168,17 +143,17 @@ ipv4_hash_crc(const void *data, __rte_unused uint32_t data_len, t = k->proto; p = (const uint32_t *)&k->port_src; -#ifdef RTE_MACHINE_CPUFLAG_SSE4_2 +#ifdef EM_HASH_CRC init_val = rte_hash_crc_4byte(t, init_val); init_val = rte_hash_crc_4byte(k->ip_src, init_val); init_val = rte_hash_crc_4byte(k->ip_dst, init_val); init_val = rte_hash_crc_4byte(*p, init_val); -#else /* RTE_MACHINE_CPUFLAG_SSE4_2 */ +#else init_val = rte_jhash_1word(t, init_val); init_val = rte_jhash_1word(k->ip_src, init_val); init_val = rte_jhash_1word(k->ip_dst, init_val); init_val = rte_jhash_1word(*p, init_val); -#endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */ +#endif return init_val; } @@ -190,16 +165,16 @@ ipv6_hash_crc(const void *data, __rte_unused uint32_t data_len, const union ipv6_5tuple_host *k; uint32_t t; const uint32_t *p; -#ifdef RTE_MACHINE_CPUFLAG_SSE4_2 +#ifdef EM_HASH_CRC const uint32_t *ip_src0, *ip_src1, *ip_src2, *ip_src3; const uint32_t *ip_dst0, *ip_dst1, *ip_dst2, *ip_dst3; -#endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */ +#endif k = data; t = k->proto; p = (const uint32_t *)&k->port_src; -#ifdef RTE_MACHINE_CPUFLAG_SSE4_2 +#ifdef EM_HASH_CRC ip_src0 = (const uint32_t *) k->ip_src; ip_src1 = (const uint32_t *)(k->ip_src+4); ip_src2 = (const uint32_t *)(k->ip_src+8); @@ -218,22 +193,20 @@ ipv6_hash_crc(const void *data, __rte_unused uint32_t data_len, init_val = rte_hash_crc_4byte(*ip_dst2, init_val); init_val = rte_hash_crc_4byte(*ip_dst3, init_val); init_val = rte_hash_crc_4byte(*p, init_val); -#else /* RTE_MACHINE_CPUFLAG_SSE4_2 */ +#else init_val = rte_jhash_1word(t, init_val); init_val = rte_jhash(k->ip_src, sizeof(uint8_t) * IPV6_ADDR_LEN, init_val); init_val = rte_jhash(k->ip_dst, sizeof(uint8_t) * IPV6_ADDR_LEN, init_val); init_val = rte_jhash_1word(*p, init_val); -#endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */ +#endif return init_val; } -#define IPV4_L3FWD_EM_NUM_ROUTES \ - (sizeof(ipv4_l3fwd_em_route_array) / sizeof(ipv4_l3fwd_em_route_array[0])) +#define IPV4_L3FWD_EM_NUM_ROUTES RTE_DIM(ipv4_l3fwd_em_route_array) -#define IPV6_L3FWD_EM_NUM_ROUTES \ - (sizeof(ipv6_l3fwd_em_route_array) / sizeof(ipv6_l3fwd_em_route_array[0])) +#define IPV6_L3FWD_EM_NUM_ROUTES RTE_DIM(ipv6_l3fwd_em_route_array) static uint8_t ipv4_l3fwd_out_if[L3FWD_HASH_ENTRIES] __rte_cache_aligned; static uint8_t ipv6_l3fwd_out_if[L3FWD_HASH_ENTRIES] __rte_cache_aligned; @@ -258,17 +231,29 @@ em_mask_key(void *key, xmm_t mask) return vandq_s32(data, mask); } +#elif defined(__ALTIVEC__) +static inline xmm_t +em_mask_key(void *key, xmm_t mask) +{ + xmm_t data = vec_ld(0, (xmm_t *)(key)); + + return vec_and(data, mask); +} +#else +#error No vector engine (SSE, NEON, ALTIVEC) available, check your toolchain #endif -static inline uint8_t -em_get_ipv4_dst_port(void *ipv4_hdr, uint8_t portid, void *lookup_struct) +/* Performing hash-based lookups. 8< */ +static inline uint16_t +em_get_ipv4_dst_port(void *ipv4_hdr, uint16_t portid, void *lookup_struct) { int ret = 0; union ipv4_5tuple_host key; struct rte_hash *ipv4_l3fwd_lookup_struct = (struct rte_hash *)lookup_struct; - ipv4_hdr = (uint8_t *)ipv4_hdr + offsetof(struct ipv4_hdr, time_to_live); + ipv4_hdr = (uint8_t *)ipv4_hdr + + offsetof(struct rte_ipv4_hdr, time_to_live); /* * Get 5 tuple: dst port, src port, dst IP address, @@ -278,18 +263,20 @@ em_get_ipv4_dst_port(void *ipv4_hdr, uint8_t portid, void *lookup_struct) /* Find destination port */ ret = rte_hash_lookup(ipv4_l3fwd_lookup_struct, (const void *)&key); - return (uint8_t)((ret < 0) ? portid : ipv4_l3fwd_out_if[ret]); + return (ret < 0) ? portid : ipv4_l3fwd_out_if[ret]; } +/* >8 End of performing hash-based lookups. */ -static inline uint8_t -em_get_ipv6_dst_port(void *ipv6_hdr, uint8_t portid, void *lookup_struct) +static inline uint16_t +em_get_ipv6_dst_port(void *ipv6_hdr, uint16_t portid, void *lookup_struct) { int ret = 0; union ipv6_5tuple_host key; struct rte_hash *ipv6_l3fwd_lookup_struct = (struct rte_hash *)lookup_struct; - ipv6_hdr = (uint8_t *)ipv6_hdr + offsetof(struct ipv6_hdr, payload_len); + ipv6_hdr = (uint8_t *)ipv6_hdr + + offsetof(struct rte_ipv6_hdr, payload_len); void *data0 = ipv6_hdr; void *data1 = ((uint8_t *)ipv6_hdr) + sizeof(xmm_t); void *data2 = ((uint8_t *)ipv6_hdr) + sizeof(xmm_t) + sizeof(xmm_t); @@ -301,7 +288,11 @@ em_get_ipv6_dst_port(void *ipv6_hdr, uint8_t portid, void *lookup_struct) * Get part of 5 tuple: dst IP address lower 96 bits * and src IP address higher 32 bits. */ +#if defined RTE_ARCH_X86 + key.xmm[1] = _mm_loadu_si128(data1); +#else key.xmm[1] = *(xmm_t *)data1; +#endif /* * Get part of 5 tuple: dst port and src port @@ -311,19 +302,14 @@ em_get_ipv6_dst_port(void *ipv6_hdr, uint8_t portid, void *lookup_struct) /* Find destination port */ ret = rte_hash_lookup(ipv6_l3fwd_lookup_struct, (const void *)&key); - return (uint8_t)((ret < 0) ? portid : ipv6_l3fwd_out_if[ret]); + return (ret < 0) ? portid : ipv6_l3fwd_out_if[ret]; } - -/* - * Include header file if SSE4_1 is enabled for - * buffer optimization i.e. ENABLE_MULTI_BUFFER_OPTIMIZE=1. - */ -#if defined(__SSE4_1__) -#ifndef HASH_MULTI_LOOKUP -#include "l3fwd_em_sse.h" +#if defined RTE_ARCH_X86 || defined __ARM_NEON +#if defined(NO_HASH_MULTI_LOOKUP) +#include "l3fwd_em_sequential.h" #else -#include "l3fwd_em_hlm_sse.h" +#include "l3fwd_em_hlm.h" #endif #else #include "l3fwd_em.h" @@ -445,19 +431,19 @@ populate_ipv4_many_flow_into_table(const struct rte_hash *h, switch (i & (NUMBER_PORT_USED - 1)) { case 0: entry = ipv4_l3fwd_em_route_array[0]; - entry.key.ip_dst = IPv4(101, c, b, a); + entry.key.ip_dst = RTE_IPV4(101, c, b, a); break; case 1: entry = ipv4_l3fwd_em_route_array[1]; - entry.key.ip_dst = IPv4(201, c, b, a); + entry.key.ip_dst = RTE_IPV4(201, c, b, a); break; case 2: entry = ipv4_l3fwd_em_route_array[2]; - entry.key.ip_dst = IPv4(111, c, b, a); + entry.key.ip_dst = RTE_IPV4(111, c, b, a); break; case 3: entry = ipv4_l3fwd_em_route_array[3]; - entry.key.ip_dst = IPv4(211, c, b, a); + entry.key.ip_dst = RTE_IPV4(211, c, b, a); break; }; convert_ipv4_5tuple(&entry.key, &newkey); @@ -524,43 +510,152 @@ populate_ipv6_many_flow_into_table(const struct rte_hash *h, printf("Hash: Adding 0x%x keys\n", nr_flow); } +/* Requirements: + * 1. IP packets without extension; + * 2. L4 payload should be either TCP or UDP. + */ +int +em_check_ptype(int portid) +{ + int i, ret; + int ptype_l3_ipv4_ext = 0; + int ptype_l3_ipv6_ext = 0; + int ptype_l4_tcp = 0; + int ptype_l4_udp = 0; + uint32_t ptype_mask = RTE_PTYPE_L3_MASK | RTE_PTYPE_L4_MASK; + + ret = rte_eth_dev_get_supported_ptypes(portid, ptype_mask, NULL, 0); + if (ret <= 0) + return 0; + + uint32_t ptypes[ret]; + + ret = rte_eth_dev_get_supported_ptypes(portid, ptype_mask, ptypes, ret); + for (i = 0; i < ret; ++i) { + switch (ptypes[i]) { + case RTE_PTYPE_L3_IPV4_EXT: + ptype_l3_ipv4_ext = 1; + break; + case RTE_PTYPE_L3_IPV6_EXT: + ptype_l3_ipv6_ext = 1; + break; + case RTE_PTYPE_L4_TCP: + ptype_l4_tcp = 1; + break; + case RTE_PTYPE_L4_UDP: + ptype_l4_udp = 1; + break; + } + } + + if (ptype_l3_ipv4_ext == 0) + printf("port %d cannot parse RTE_PTYPE_L3_IPV4_EXT\n", portid); + if (ptype_l3_ipv6_ext == 0) + printf("port %d cannot parse RTE_PTYPE_L3_IPV6_EXT\n", portid); + if (!ptype_l3_ipv4_ext || !ptype_l3_ipv6_ext) + return 0; + + if (ptype_l4_tcp == 0) + printf("port %d cannot parse RTE_PTYPE_L4_TCP\n", portid); + if (ptype_l4_udp == 0) + printf("port %d cannot parse RTE_PTYPE_L4_UDP\n", portid); + if (ptype_l4_tcp && ptype_l4_udp) + return 1; + + return 0; +} + +static inline void +em_parse_ptype(struct rte_mbuf *m) +{ + struct rte_ether_hdr *eth_hdr; + uint32_t packet_type = RTE_PTYPE_UNKNOWN; + uint16_t ether_type; + void *l3; + int hdr_len; + struct rte_ipv4_hdr *ipv4_hdr; + struct rte_ipv6_hdr *ipv6_hdr; + + eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *); + ether_type = eth_hdr->ether_type; + l3 = (uint8_t *)eth_hdr + sizeof(struct rte_ether_hdr); + if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) { + ipv4_hdr = (struct rte_ipv4_hdr *)l3; + hdr_len = rte_ipv4_hdr_len(ipv4_hdr); + if (hdr_len == sizeof(struct rte_ipv4_hdr)) { + packet_type |= RTE_PTYPE_L3_IPV4; + if (ipv4_hdr->next_proto_id == IPPROTO_TCP) + packet_type |= RTE_PTYPE_L4_TCP; + else if (ipv4_hdr->next_proto_id == IPPROTO_UDP) + packet_type |= RTE_PTYPE_L4_UDP; + } else + packet_type |= RTE_PTYPE_L3_IPV4_EXT; + } else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) { + ipv6_hdr = (struct rte_ipv6_hdr *)l3; + if (ipv6_hdr->proto == IPPROTO_TCP) + packet_type |= RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP; + else if (ipv6_hdr->proto == IPPROTO_UDP) + packet_type |= RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP; + else + packet_type |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN; + } + + m->packet_type = packet_type; +} + +uint16_t +em_cb_parse_ptype(uint16_t port __rte_unused, uint16_t queue __rte_unused, + struct rte_mbuf *pkts[], uint16_t nb_pkts, + uint16_t max_pkts __rte_unused, + void *user_param __rte_unused) +{ + unsigned i; + + for (i = 0; i < nb_pkts; ++i) + em_parse_ptype(pkts[i]); + + return nb_pkts; +} + /* main processing loop */ int -em_main_loop(__attribute__((unused)) void *dummy) +em_main_loop(__rte_unused void *dummy) { struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; unsigned lcore_id; uint64_t prev_tsc, diff_tsc, cur_tsc; int i, nb_rx; - uint8_t portid, queueid; + uint8_t queueid; + uint16_t portid; struct lcore_conf *qconf; const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US; - prev_tsc = 0; - lcore_id = rte_lcore_id(); qconf = &lcore_conf[lcore_id]; - if (qconf->n_rx_queue == 0) { + const uint16_t n_rx_q = qconf->n_rx_queue; + const uint16_t n_tx_p = qconf->n_tx_port; + if (n_rx_q == 0) { RTE_LOG(INFO, L3FWD, "lcore %u has nothing to do\n", lcore_id); return 0; } RTE_LOG(INFO, L3FWD, "entering main loop on lcore %u\n", lcore_id); - for (i = 0; i < qconf->n_rx_queue; i++) { + for (i = 0; i < n_rx_q; i++) { portid = qconf->rx_queue_list[i].port_id; queueid = qconf->rx_queue_list[i].queue_id; RTE_LOG(INFO, L3FWD, - " -- lcoreid=%u portid=%hhu rxqueueid=%hhu\n", + " -- lcoreid=%u portid=%u rxqueueid=%hhu\n", lcore_id, portid, queueid); } - while (!force_quit) { + cur_tsc = rte_rdtsc(); + prev_tsc = cur_tsc; - cur_tsc = rte_rdtsc(); + while (!force_quit) { /* * TX burst queue drain @@ -568,8 +663,8 @@ em_main_loop(__attribute__((unused)) void *dummy) diff_tsc = cur_tsc - prev_tsc; if (unlikely(diff_tsc > drain_tsc)) { - for (i = 0; i < qconf->n_rx_queue; i++) { - portid = qconf->rx_queue_list[i].port_id; + for (i = 0; i < n_tx_p; ++i) { + portid = qconf->tx_port_id[i]; if (qconf->tx_mbufs[portid].len == 0) continue; send_burst(qconf, @@ -584,7 +679,7 @@ em_main_loop(__attribute__((unused)) void *dummy) /* * Read packet from RX queues */ - for (i = 0; i < qconf->n_rx_queue; ++i) { + for (i = 0; i < n_rx_q; ++i) { portid = qconf->rx_queue_list[i].port_id; queueid = qconf->rx_queue_list[i].queue_id; nb_rx = rte_eth_rx_burst(portid, queueid, pkts_burst, @@ -592,26 +687,198 @@ em_main_loop(__attribute__((unused)) void *dummy) if (nb_rx == 0) continue; - /* - * For SSE4_1 use ENABLE_MULTI_BUFFER_OPTIMIZE=1 - * code. - */ -#if defined(__SSE4_1__) +#if defined RTE_ARCH_X86 || defined __ARM_NEON l3fwd_em_send_packets(nb_rx, pkts_burst, portid, qconf); #else l3fwd_em_no_opt_send_packets(nb_rx, pkts_burst, portid, qconf); -#endif /* __SSE_4_1__ */ +#endif } + + cur_tsc = rte_rdtsc(); } return 0; } -/* - * Initialize exact match (hash) parameters. - */ +static __rte_always_inline void +em_event_loop_single(struct l3fwd_event_resources *evt_rsrc, + const uint8_t flags) +{ + const int event_p_id = l3fwd_get_free_event_port(evt_rsrc); + const uint8_t tx_q_id = evt_rsrc->evq.event_q_id[ + evt_rsrc->evq.nb_queues - 1]; + const uint8_t event_d_id = evt_rsrc->event_d_id; + struct lcore_conf *lconf; + unsigned int lcore_id; + struct rte_event ev; + + if (event_p_id < 0) + return; + + lcore_id = rte_lcore_id(); + lconf = &lcore_conf[lcore_id]; + + RTE_LOG(INFO, L3FWD, "entering %s on lcore %u\n", __func__, lcore_id); + while (!force_quit) { + if (!rte_event_dequeue_burst(event_d_id, event_p_id, &ev, 1, 0)) + continue; + + struct rte_mbuf *mbuf = ev.mbuf; + +#if defined RTE_ARCH_X86 || defined __ARM_NEON + mbuf->port = em_get_dst_port(lconf, mbuf, mbuf->port); + process_packet(mbuf, &mbuf->port); +#else + l3fwd_em_simple_process(mbuf, lconf); +#endif + if (mbuf->port == BAD_PORT) { + rte_pktmbuf_free(mbuf); + continue; + } + + if (flags & L3FWD_EVENT_TX_ENQ) { + ev.queue_id = tx_q_id; + ev.op = RTE_EVENT_OP_FORWARD; + while (rte_event_enqueue_burst(event_d_id, event_p_id, + &ev, 1) && !force_quit) + ; + } + + if (flags & L3FWD_EVENT_TX_DIRECT) { + rte_event_eth_tx_adapter_txq_set(mbuf, 0); + while (!rte_event_eth_tx_adapter_enqueue(event_d_id, + event_p_id, &ev, 1, 0) && + !force_quit) + ; + } + } +} + +static __rte_always_inline void +em_event_loop_burst(struct l3fwd_event_resources *evt_rsrc, + const uint8_t flags) +{ + const int event_p_id = l3fwd_get_free_event_port(evt_rsrc); + const uint8_t tx_q_id = evt_rsrc->evq.event_q_id[ + evt_rsrc->evq.nb_queues - 1]; + const uint8_t event_d_id = evt_rsrc->event_d_id; + const uint16_t deq_len = evt_rsrc->deq_depth; + struct rte_event events[MAX_PKT_BURST]; + struct lcore_conf *lconf; + unsigned int lcore_id; + int i, nb_enq, nb_deq; + + if (event_p_id < 0) + return; + + lcore_id = rte_lcore_id(); + + lconf = &lcore_conf[lcore_id]; + + RTE_LOG(INFO, L3FWD, "entering %s on lcore %u\n", __func__, lcore_id); + + while (!force_quit) { + /* Read events from RX queues */ + nb_deq = rte_event_dequeue_burst(event_d_id, event_p_id, + events, deq_len, 0); + if (nb_deq == 0) { + rte_pause(); + continue; + } + +#if defined RTE_ARCH_X86 || defined __ARM_NEON + l3fwd_em_process_events(nb_deq, (struct rte_event **)&events, + lconf); +#else + l3fwd_em_no_opt_process_events(nb_deq, + (struct rte_event **)&events, + lconf); +#endif + for (i = 0; i < nb_deq; i++) { + if (flags & L3FWD_EVENT_TX_ENQ) { + events[i].queue_id = tx_q_id; + events[i].op = RTE_EVENT_OP_FORWARD; + } + + if (flags & L3FWD_EVENT_TX_DIRECT) + rte_event_eth_tx_adapter_txq_set(events[i].mbuf, + 0); + } + + if (flags & L3FWD_EVENT_TX_ENQ) { + nb_enq = rte_event_enqueue_burst(event_d_id, event_p_id, + events, nb_deq); + while (nb_enq < nb_deq && !force_quit) + nb_enq += rte_event_enqueue_burst(event_d_id, + event_p_id, events + nb_enq, + nb_deq - nb_enq); + } + + if (flags & L3FWD_EVENT_TX_DIRECT) { + nb_enq = rte_event_eth_tx_adapter_enqueue(event_d_id, + event_p_id, events, nb_deq, 0); + while (nb_enq < nb_deq && !force_quit) + nb_enq += rte_event_eth_tx_adapter_enqueue( + event_d_id, event_p_id, + events + nb_enq, + nb_deq - nb_enq, 0); + } + } +} + +static __rte_always_inline void +em_event_loop(struct l3fwd_event_resources *evt_rsrc, + const uint8_t flags) +{ + if (flags & L3FWD_EVENT_SINGLE) + em_event_loop_single(evt_rsrc, flags); + if (flags & L3FWD_EVENT_BURST) + em_event_loop_burst(evt_rsrc, flags); +} + +int __rte_noinline +em_event_main_loop_tx_d(__rte_unused void *dummy) +{ + struct l3fwd_event_resources *evt_rsrc = + l3fwd_get_eventdev_rsrc(); + + em_event_loop(evt_rsrc, L3FWD_EVENT_TX_DIRECT | L3FWD_EVENT_SINGLE); + return 0; +} + +int __rte_noinline +em_event_main_loop_tx_d_burst(__rte_unused void *dummy) +{ + struct l3fwd_event_resources *evt_rsrc = + l3fwd_get_eventdev_rsrc(); + + em_event_loop(evt_rsrc, L3FWD_EVENT_TX_DIRECT | L3FWD_EVENT_BURST); + return 0; +} + +int __rte_noinline +em_event_main_loop_tx_q(__rte_unused void *dummy) +{ + struct l3fwd_event_resources *evt_rsrc = + l3fwd_get_eventdev_rsrc(); + + em_event_loop(evt_rsrc, L3FWD_EVENT_TX_ENQ | L3FWD_EVENT_SINGLE); + return 0; +} + +int __rte_noinline +em_event_main_loop_tx_q_burst(__rte_unused void *dummy) +{ + struct l3fwd_event_resources *evt_rsrc = + l3fwd_get_eventdev_rsrc(); + + em_event_loop(evt_rsrc, L3FWD_EVENT_TX_ENQ | L3FWD_EVENT_BURST); + return 0; +} + +/* Initialize exact match (hash) parameters. 8< */ void setup_hash(const int socketid) { @@ -686,6 +953,7 @@ setup_hash(const int socketid) } } } +/* >8 End of initialization of hash parameters. */ /* Return ipv4/ipv6 em fwd lookup struct. */ void *