1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2018 Intel Corporation.
3 * Copyright(c) 2017-2018 Linaro Limited.
6 #ifndef __L3FWD_EM_HLM_H__
7 #define __L3FWD_EM_HLM_H__
9 #if defined RTE_ARCH_X86
10 #include "l3fwd_sse.h"
11 #include "l3fwd_em_hlm_sse.h"
12 #elif defined __ARM_NEON
13 #include "l3fwd_neon.h"
14 #include "l3fwd_em_hlm_neon.h"
18 #define EM_HASH_LOOKUP_COUNT 16
20 #define EM_HASH_LOOKUP_COUNT 8
24 static __rte_always_inline void
25 em_get_dst_port_ipv4xN(struct lcore_conf *qconf, struct rte_mbuf *m[],
26 uint16_t portid, uint16_t dst_port[])
29 int32_t ret[EM_HASH_LOOKUP_COUNT];
30 union ipv4_5tuple_host key[EM_HASH_LOOKUP_COUNT];
31 const void *key_array[EM_HASH_LOOKUP_COUNT];
33 for (i = 0; i < EM_HASH_LOOKUP_COUNT; i++) {
34 get_ipv4_5tuple(m[i], mask0.x, &key[i]);
35 key_array[i] = &key[i];
38 rte_hash_lookup_bulk(qconf->ipv4_lookup_struct, &key_array[0],
39 EM_HASH_LOOKUP_COUNT, ret);
41 for (i = 0; i < EM_HASH_LOOKUP_COUNT; i++) {
42 dst_port[i] = ((ret[i] < 0) ?
43 portid : ipv4_l3fwd_out_if[ret[i]]);
45 if (dst_port[i] >= RTE_MAX_ETHPORTS ||
46 (enabled_port_mask & 1 << dst_port[i]) == 0)
51 static __rte_always_inline void
52 em_get_dst_port_ipv6xN(struct lcore_conf *qconf, struct rte_mbuf *m[],
53 uint16_t portid, uint16_t dst_port[])
56 int32_t ret[EM_HASH_LOOKUP_COUNT];
57 union ipv6_5tuple_host key[EM_HASH_LOOKUP_COUNT];
58 const void *key_array[EM_HASH_LOOKUP_COUNT];
60 for (i = 0; i < EM_HASH_LOOKUP_COUNT; i++) {
61 get_ipv6_5tuple(m[i], mask1.x, mask2.x, &key[i]);
62 key_array[i] = &key[i];
65 rte_hash_lookup_bulk(qconf->ipv6_lookup_struct, &key_array[0],
66 EM_HASH_LOOKUP_COUNT, ret);
68 for (i = 0; i < EM_HASH_LOOKUP_COUNT; i++) {
69 dst_port[i] = ((ret[i] < 0) ?
70 portid : ipv6_l3fwd_out_if[ret[i]]);
72 if (dst_port[i] >= RTE_MAX_ETHPORTS ||
73 (enabled_port_mask & 1 << dst_port[i]) == 0)
78 static __rte_always_inline void
79 em_get_dst_port_ipv4xN_events(struct lcore_conf *qconf, struct rte_mbuf *m[],
83 int32_t ret[EM_HASH_LOOKUP_COUNT];
84 union ipv4_5tuple_host key[EM_HASH_LOOKUP_COUNT];
85 const void *key_array[EM_HASH_LOOKUP_COUNT];
87 for (i = 0; i < EM_HASH_LOOKUP_COUNT; i++) {
88 get_ipv4_5tuple(m[i], mask0.x, &key[i]);
89 key_array[i] = &key[i];
92 rte_hash_lookup_bulk(qconf->ipv4_lookup_struct, &key_array[0],
93 EM_HASH_LOOKUP_COUNT, ret);
95 for (i = 0; i < EM_HASH_LOOKUP_COUNT; i++) {
96 dst_port[i] = ((ret[i] < 0) ?
97 m[i]->port : ipv4_l3fwd_out_if[ret[i]]);
99 if (dst_port[i] >= RTE_MAX_ETHPORTS ||
100 (enabled_port_mask & 1 << dst_port[i]) == 0)
101 dst_port[i] = m[i]->port;
105 static __rte_always_inline void
106 em_get_dst_port_ipv6xN_events(struct lcore_conf *qconf, struct rte_mbuf *m[],
110 int32_t ret[EM_HASH_LOOKUP_COUNT];
111 union ipv6_5tuple_host key[EM_HASH_LOOKUP_COUNT];
112 const void *key_array[EM_HASH_LOOKUP_COUNT];
114 for (i = 0; i < EM_HASH_LOOKUP_COUNT; i++) {
115 get_ipv6_5tuple(m[i], mask1.x, mask2.x, &key[i]);
116 key_array[i] = &key[i];
119 rte_hash_lookup_bulk(qconf->ipv6_lookup_struct, &key_array[0],
120 EM_HASH_LOOKUP_COUNT, ret);
122 for (i = 0; i < EM_HASH_LOOKUP_COUNT; i++) {
123 dst_port[i] = ((ret[i] < 0) ?
124 m[i]->port : ipv6_l3fwd_out_if[ret[i]]);
126 if (dst_port[i] >= RTE_MAX_ETHPORTS ||
127 (enabled_port_mask & 1 << dst_port[i]) == 0)
128 dst_port[i] = m[i]->port;
132 static __rte_always_inline uint16_t
133 em_get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
137 struct rte_ipv4_hdr *ipv4_hdr;
138 struct rte_ipv6_hdr *ipv6_hdr;
142 tcp_or_udp = pkt->packet_type & (RTE_PTYPE_L4_TCP | RTE_PTYPE_L4_UDP);
143 l3_ptypes = pkt->packet_type & RTE_PTYPE_L3_MASK;
145 if (tcp_or_udp && (l3_ptypes == RTE_PTYPE_L3_IPV4)) {
147 /* Handle IPv4 headers.*/
148 ipv4_hdr = rte_pktmbuf_mtod_offset(pkt, struct rte_ipv4_hdr *,
149 sizeof(struct rte_ether_hdr));
151 next_hop = em_get_ipv4_dst_port(ipv4_hdr, portid,
152 qconf->ipv4_lookup_struct);
154 if (next_hop >= RTE_MAX_ETHPORTS ||
155 (enabled_port_mask & 1 << next_hop) == 0)
160 } else if (tcp_or_udp && (l3_ptypes == RTE_PTYPE_L3_IPV6)) {
162 /* Handle IPv6 headers.*/
163 ipv6_hdr = rte_pktmbuf_mtod_offset(pkt, struct rte_ipv6_hdr *,
164 sizeof(struct rte_ether_hdr));
166 next_hop = em_get_ipv6_dst_port(ipv6_hdr, portid,
167 qconf->ipv6_lookup_struct);
169 if (next_hop >= RTE_MAX_ETHPORTS ||
170 (enabled_port_mask & 1 << next_hop) == 0)
181 * Buffer optimized handling of packets, invoked
185 l3fwd_em_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
186 uint16_t portid, struct lcore_conf *qconf)
189 uint16_t dst_port[MAX_PKT_BURST];
192 * Send nb_rx - nb_rx % EM_HASH_LOOKUP_COUNT packets
193 * in groups of EM_HASH_LOOKUP_COUNT.
195 int32_t n = RTE_ALIGN_FLOOR(nb_rx, EM_HASH_LOOKUP_COUNT);
197 for (j = 0; j < EM_HASH_LOOKUP_COUNT && j < nb_rx; j++) {
198 rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[j],
199 struct rte_ether_hdr *) + 1);
202 for (j = 0; j < n; j += EM_HASH_LOOKUP_COUNT) {
204 uint32_t pkt_type = RTE_PTYPE_L3_MASK |
205 RTE_PTYPE_L4_TCP | RTE_PTYPE_L4_UDP;
206 uint32_t l3_type, tcp_or_udp;
208 for (i = 0; i < EM_HASH_LOOKUP_COUNT; i++)
209 pkt_type &= pkts_burst[j + i]->packet_type;
211 l3_type = pkt_type & RTE_PTYPE_L3_MASK;
212 tcp_or_udp = pkt_type & (RTE_PTYPE_L4_TCP | RTE_PTYPE_L4_UDP);
214 for (i = 0, pos = j + EM_HASH_LOOKUP_COUNT;
215 i < EM_HASH_LOOKUP_COUNT && pos < nb_rx; i++, pos++) {
216 rte_prefetch0(rte_pktmbuf_mtod(
218 struct rte_ether_hdr *) + 1);
221 if (tcp_or_udp && (l3_type == RTE_PTYPE_L3_IPV4)) {
223 em_get_dst_port_ipv4xN(qconf, &pkts_burst[j], portid,
226 } else if (tcp_or_udp && (l3_type == RTE_PTYPE_L3_IPV6)) {
228 em_get_dst_port_ipv6xN(qconf, &pkts_burst[j], portid,
232 for (i = 0; i < EM_HASH_LOOKUP_COUNT; i++)
233 dst_port[j + i] = em_get_dst_port(qconf,
234 pkts_burst[j + i], portid);
238 for (; j < nb_rx; j++)
239 dst_port[j] = em_get_dst_port(qconf, pkts_burst[j], portid);
241 send_packets_multi(qconf, pkts_burst, dst_port, nb_rx);
246 * Buffer optimized handling of events, invoked
250 l3fwd_em_process_events(int nb_rx, struct rte_event **ev,
251 struct lcore_conf *qconf)
254 uint16_t dst_port[MAX_PKT_BURST];
255 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
258 * Send nb_rx - nb_rx % EM_HASH_LOOKUP_COUNT packets
259 * in groups of EM_HASH_LOOKUP_COUNT.
261 int32_t n = RTE_ALIGN_FLOOR(nb_rx, EM_HASH_LOOKUP_COUNT);
263 for (j = 0; j < EM_HASH_LOOKUP_COUNT && j < nb_rx; j++) {
264 pkts_burst[j] = ev[j]->mbuf;
265 rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[j],
266 struct rte_ether_hdr *) + 1);
269 for (j = 0; j < n; j += EM_HASH_LOOKUP_COUNT) {
271 uint32_t pkt_type = RTE_PTYPE_L3_MASK |
272 RTE_PTYPE_L4_TCP | RTE_PTYPE_L4_UDP;
273 uint32_t l3_type, tcp_or_udp;
275 for (i = 0; i < EM_HASH_LOOKUP_COUNT; i++)
276 pkt_type &= pkts_burst[j + i]->packet_type;
278 l3_type = pkt_type & RTE_PTYPE_L3_MASK;
279 tcp_or_udp = pkt_type & (RTE_PTYPE_L4_TCP | RTE_PTYPE_L4_UDP);
281 for (i = 0, pos = j + EM_HASH_LOOKUP_COUNT;
282 i < EM_HASH_LOOKUP_COUNT && pos < nb_rx; i++, pos++) {
283 rte_prefetch0(rte_pktmbuf_mtod(
285 struct rte_ether_hdr *) + 1);
288 if (tcp_or_udp && (l3_type == RTE_PTYPE_L3_IPV4)) {
290 em_get_dst_port_ipv4xN_events(qconf, &pkts_burst[j],
293 } else if (tcp_or_udp && (l3_type == RTE_PTYPE_L3_IPV6)) {
295 em_get_dst_port_ipv6xN_events(qconf, &pkts_burst[j],
299 for (i = 0; i < EM_HASH_LOOKUP_COUNT; i++) {
300 pkts_burst[j + i]->port = em_get_dst_port(qconf,
302 pkts_burst[j + i]->port);
303 process_packet(pkts_burst[j + i],
304 &pkts_burst[j + i]->port);
308 processx4_step3(&pkts_burst[j], &dst_port[j]);
310 for (i = 0; i < EM_HASH_LOOKUP_COUNT; i++)
311 pkts_burst[j + i]->port = dst_port[j + i];
315 for (; j < nb_rx; j++) {
316 pkts_burst[j]->port = em_get_dst_port(qconf, pkts_burst[j],
317 pkts_burst[j]->port);
318 process_packet(pkts_burst[j], &pkts_burst[j]->port);
321 #endif /* __L3FWD_EM_HLM_H__ */