1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
5 #ifndef __L3FWD_EM_SEQUENTIAL_H__
6 #define __L3FWD_EM_SEQUENTIAL_H__
10 * This is an optional implementation of packet classification in Exact-Match
11 * path using sequential packet classification method.
12 * While hash lookup multi seems to provide better performance, it's disabled
13 * by default and can be enabled with NO_HASH_LOOKUP_MULTI global define in
17 #if defined RTE_ARCH_X86
18 #include "l3fwd_sse.h"
19 #elif defined __ARM_NEON
20 #include "l3fwd_neon.h"
23 static __rte_always_inline uint16_t
24 em_get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
28 struct rte_ipv4_hdr *ipv4_hdr;
29 struct rte_ipv6_hdr *ipv6_hdr;
33 tcp_or_udp = pkt->packet_type & (RTE_PTYPE_L4_TCP | RTE_PTYPE_L4_UDP);
34 l3_ptypes = pkt->packet_type & RTE_PTYPE_L3_MASK;
36 if (tcp_or_udp && (l3_ptypes == RTE_PTYPE_L3_IPV4)) {
38 /* Handle IPv4 headers.*/
39 ipv4_hdr = rte_pktmbuf_mtod_offset(pkt, struct rte_ipv4_hdr *,
40 sizeof(struct rte_ether_hdr));
42 next_hop = em_get_ipv4_dst_port(ipv4_hdr, portid,
43 qconf->ipv4_lookup_struct);
45 if (next_hop >= RTE_MAX_ETHPORTS ||
46 (enabled_port_mask & 1 << next_hop) == 0)
51 } else if (tcp_or_udp && (l3_ptypes == RTE_PTYPE_L3_IPV6)) {
53 /* Handle IPv6 headers.*/
54 ipv6_hdr = rte_pktmbuf_mtod_offset(pkt, struct rte_ipv6_hdr *,
55 sizeof(struct rte_ether_hdr));
57 next_hop = em_get_ipv6_dst_port(ipv6_hdr, portid,
58 qconf->ipv6_lookup_struct);
60 if (next_hop >= RTE_MAX_ETHPORTS ||
61 (enabled_port_mask & 1 << next_hop) == 0)
72 * Buffer optimized handling of packets, invoked
76 l3fwd_em_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
77 uint16_t portid, struct lcore_conf *qconf)
80 uint16_t dst_port[MAX_PKT_BURST];
83 rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[0],
84 struct rte_ether_hdr *) + 1);
87 for (i = 1, j = 0; j < nb_rx; i++, j++) {
89 rte_prefetch0(rte_pktmbuf_mtod(
91 struct rte_ether_hdr *) + 1);
93 dst_port[j] = em_get_dst_port(qconf, pkts_burst[j], portid);
96 send_packets_multi(qconf, pkts_burst, dst_port, nb_rx);
100 * Buffer optimized handling of events, invoked
104 l3fwd_em_process_events(int nb_rx, struct rte_event **events,
105 struct lcore_conf *qconf)
109 rte_prefetch0(rte_pktmbuf_mtod(events[0]->mbuf,
110 struct rte_ether_hdr *) + 1);
112 for (i = 1, j = 0; j < nb_rx; i++, j++) {
113 struct rte_mbuf *mbuf = events[j]->mbuf;
116 rte_prefetch0(rte_pktmbuf_mtod(
118 struct rte_ether_hdr *) + 1);
120 mbuf->port = em_get_dst_port(qconf, mbuf, mbuf->port);
121 process_packet(mbuf, &mbuf->port);
124 #endif /* __L3FWD_EM_SEQUENTIAL_H__ */