1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
11 #include <sys/queue.h>
16 #include <netinet/in.h>
18 #include <rte_debug.h>
19 #include <rte_ether.h>
20 #include <rte_ethdev.h>
21 #include <rte_cycles.h>
29 #include "l3fwd_event.h"
30 #include "em_route_parse.c"
32 #if defined(RTE_ARCH_X86) || defined(__ARM_FEATURE_CRC32)
37 #include <rte_hash_crc.h>
38 #define DEFAULT_HASH_FUNC rte_hash_crc
40 #include <rte_jhash.h>
41 #define DEFAULT_HASH_FUNC rte_jhash
44 #define IPV6_ADDR_LEN 16
46 union ipv4_5tuple_host {
59 #define XMM_NUM_IN_IPV6_5TUPLE 3
61 union ipv6_5tuple_host {
66 uint8_t ip_src[IPV6_ADDR_LEN];
67 uint8_t ip_dst[IPV6_ADDR_LEN];
72 xmm_t xmm[XMM_NUM_IN_IPV6_5TUPLE];
75 /* 198.18.0.0/16 are set aside for RFC2544 benchmarking (RFC5735).
76 * Use RFC863 Discard Protocol.
78 const struct ipv4_l3fwd_em_route ipv4_l3fwd_em_route_array[] = {
79 {{RTE_IPV4(198, 18, 0, 0), RTE_IPV4(198, 18, 0, 1), 9, 9, IPPROTO_UDP}, 0},
80 {{RTE_IPV4(198, 18, 1, 0), RTE_IPV4(198, 18, 1, 1), 9, 9, IPPROTO_UDP}, 1},
81 {{RTE_IPV4(198, 18, 2, 0), RTE_IPV4(198, 18, 2, 1), 9, 9, IPPROTO_UDP}, 2},
82 {{RTE_IPV4(198, 18, 3, 0), RTE_IPV4(198, 18, 3, 1), 9, 9, IPPROTO_UDP}, 3},
83 {{RTE_IPV4(198, 18, 4, 0), RTE_IPV4(198, 18, 4, 1), 9, 9, IPPROTO_UDP}, 4},
84 {{RTE_IPV4(198, 18, 5, 0), RTE_IPV4(198, 18, 5, 1), 9, 9, IPPROTO_UDP}, 5},
85 {{RTE_IPV4(198, 18, 6, 0), RTE_IPV4(198, 18, 6, 1), 9, 9, IPPROTO_UDP}, 6},
86 {{RTE_IPV4(198, 18, 7, 0), RTE_IPV4(198, 18, 7, 1), 9, 9, IPPROTO_UDP}, 7},
87 {{RTE_IPV4(198, 18, 8, 0), RTE_IPV4(198, 18, 8, 1), 9, 9, IPPROTO_UDP}, 8},
88 {{RTE_IPV4(198, 18, 9, 0), RTE_IPV4(198, 18, 9, 1), 9, 9, IPPROTO_UDP}, 9},
89 {{RTE_IPV4(198, 18, 10, 0), RTE_IPV4(198, 18, 10, 1), 9, 9, IPPROTO_UDP}, 10},
90 {{RTE_IPV4(198, 18, 11, 0), RTE_IPV4(198, 18, 11, 1), 9, 9, IPPROTO_UDP}, 11},
91 {{RTE_IPV4(198, 18, 12, 0), RTE_IPV4(198, 18, 12, 1), 9, 9, IPPROTO_UDP}, 12},
92 {{RTE_IPV4(198, 18, 13, 0), RTE_IPV4(198, 18, 13, 1), 9, 9, IPPROTO_UDP}, 13},
93 {{RTE_IPV4(198, 18, 14, 0), RTE_IPV4(198, 18, 14, 1), 9, 9, IPPROTO_UDP}, 14},
94 {{RTE_IPV4(198, 18, 15, 0), RTE_IPV4(198, 18, 15, 1), 9, 9, IPPROTO_UDP}, 15},
97 /* 2001:0200::/48 is IANA reserved range for IPv6 benchmarking (RFC5180).
98 * Use RFC863 Discard Protocol.
100 const struct ipv6_l3fwd_em_route ipv6_l3fwd_em_route_array[] = {
101 {{{32, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
102 {32, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}, 9, 9, IPPROTO_UDP}, 0},
103 {{{32, 1, 2, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0},
104 {32, 1, 2, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1}, 9, 9, IPPROTO_UDP}, 1},
105 {{{32, 1, 2, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0},
106 {32, 1, 2, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 1}, 9, 9, IPPROTO_UDP}, 2},
107 {{{32, 1, 2, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0},
108 {32, 1, 2, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 1}, 9, 9, IPPROTO_UDP}, 3},
109 {{{32, 1, 2, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0},
110 {32, 1, 2, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 1}, 9, 9, IPPROTO_UDP}, 4},
111 {{{32, 1, 2, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0},
112 {32, 1, 2, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 1}, 9, 9, IPPROTO_UDP}, 5},
113 {{{32, 1, 2, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 0},
114 {32, 1, 2, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 1}, 9, 9, IPPROTO_UDP}, 6},
115 {{{32, 1, 2, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0},
116 {32, 1, 2, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 1}, 9, 9, IPPROTO_UDP}, 7},
117 {{{32, 1, 2, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0},
118 {32, 1, 2, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 1}, 9, 9, IPPROTO_UDP}, 8},
119 {{{32, 1, 2, 0, 0, 0, 0, 9, 0, 0, 0, 0, 0, 0, 0, 0},
120 {32, 1, 2, 0, 0, 0, 0, 9, 0, 0, 0, 0, 0, 0, 0, 1}, 9, 9, IPPROTO_UDP}, 9},
121 {{{32, 1, 2, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0},
122 {32, 1, 2, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 1}, 9, 9, IPPROTO_UDP}, 10},
123 {{{32, 1, 2, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0, 0},
124 {32, 1, 2, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0, 1}, 9, 9, IPPROTO_UDP}, 11},
125 {{{32, 1, 2, 0, 0, 0, 0, 12, 0, 0, 0, 0, 0, 0, 0, 0},
126 {32, 1, 2, 0, 0, 0, 0, 12, 0, 0, 0, 0, 0, 0, 0, 1}, 9, 9, IPPROTO_UDP}, 12},
127 {{{32, 1, 2, 0, 0, 0, 0, 13, 0, 0, 0, 0, 0, 0, 0, 0},
128 {32, 1, 2, 0, 0, 0, 0, 13, 0, 0, 0, 0, 0, 0, 0, 1}, 9, 9, IPPROTO_UDP}, 13},
129 {{{32, 1, 2, 0, 0, 0, 0, 14, 0, 0, 0, 0, 0, 0, 0, 0},
130 {32, 1, 2, 0, 0, 0, 0, 14, 0, 0, 0, 0, 0, 0, 0, 1}, 9, 9, IPPROTO_UDP}, 14},
131 {{{32, 1, 2, 0, 0, 0, 0, 15, 0, 0, 0, 0, 0, 0, 0, 0},
132 {32, 1, 2, 0, 0, 0, 0, 15, 0, 0, 0, 0, 0, 0, 0, 1}, 9, 9, IPPROTO_UDP}, 15},
135 struct rte_hash *ipv4_l3fwd_em_lookup_struct[NB_SOCKETS];
136 struct rte_hash *ipv6_l3fwd_em_lookup_struct[NB_SOCKETS];
138 static inline uint32_t
139 ipv4_hash_crc(const void *data, __rte_unused uint32_t data_len,
142 const union ipv4_5tuple_host *k;
148 p = (const uint32_t *)&k->port_src;
151 init_val = rte_hash_crc_4byte(t, init_val);
152 init_val = rte_hash_crc_4byte(k->ip_src, init_val);
153 init_val = rte_hash_crc_4byte(k->ip_dst, init_val);
154 init_val = rte_hash_crc_4byte(*p, init_val);
156 init_val = rte_jhash_1word(t, init_val);
157 init_val = rte_jhash_1word(k->ip_src, init_val);
158 init_val = rte_jhash_1word(k->ip_dst, init_val);
159 init_val = rte_jhash_1word(*p, init_val);
165 static inline uint32_t
166 ipv6_hash_crc(const void *data, __rte_unused uint32_t data_len,
169 const union ipv6_5tuple_host *k;
173 const uint32_t *ip_src0, *ip_src1, *ip_src2, *ip_src3;
174 const uint32_t *ip_dst0, *ip_dst1, *ip_dst2, *ip_dst3;
179 p = (const uint32_t *)&k->port_src;
182 ip_src0 = (const uint32_t *) k->ip_src;
183 ip_src1 = (const uint32_t *)(k->ip_src+4);
184 ip_src2 = (const uint32_t *)(k->ip_src+8);
185 ip_src3 = (const uint32_t *)(k->ip_src+12);
186 ip_dst0 = (const uint32_t *) k->ip_dst;
187 ip_dst1 = (const uint32_t *)(k->ip_dst+4);
188 ip_dst2 = (const uint32_t *)(k->ip_dst+8);
189 ip_dst3 = (const uint32_t *)(k->ip_dst+12);
190 init_val = rte_hash_crc_4byte(t, init_val);
191 init_val = rte_hash_crc_4byte(*ip_src0, init_val);
192 init_val = rte_hash_crc_4byte(*ip_src1, init_val);
193 init_val = rte_hash_crc_4byte(*ip_src2, init_val);
194 init_val = rte_hash_crc_4byte(*ip_src3, init_val);
195 init_val = rte_hash_crc_4byte(*ip_dst0, init_val);
196 init_val = rte_hash_crc_4byte(*ip_dst1, init_val);
197 init_val = rte_hash_crc_4byte(*ip_dst2, init_val);
198 init_val = rte_hash_crc_4byte(*ip_dst3, init_val);
199 init_val = rte_hash_crc_4byte(*p, init_val);
201 init_val = rte_jhash_1word(t, init_val);
202 init_val = rte_jhash(k->ip_src,
203 sizeof(uint8_t) * IPV6_ADDR_LEN, init_val);
204 init_val = rte_jhash(k->ip_dst,
205 sizeof(uint8_t) * IPV6_ADDR_LEN, init_val);
206 init_val = rte_jhash_1word(*p, init_val);
211 static uint8_t ipv4_l3fwd_out_if[L3FWD_HASH_ENTRIES] __rte_cache_aligned;
212 static uint8_t ipv6_l3fwd_out_if[L3FWD_HASH_ENTRIES] __rte_cache_aligned;
214 static rte_xmm_t mask0;
215 static rte_xmm_t mask1;
216 static rte_xmm_t mask2;
218 #if defined(__SSE2__)
220 em_mask_key(void *key, xmm_t mask)
222 __m128i data = _mm_loadu_si128((__m128i *)(key));
224 return _mm_and_si128(data, mask);
226 #elif defined(__ARM_NEON)
228 em_mask_key(void *key, xmm_t mask)
230 int32x4_t data = vld1q_s32((int32_t *)key);
232 return vandq_s32(data, mask);
234 #elif defined(__ALTIVEC__)
236 em_mask_key(void *key, xmm_t mask)
238 xmm_t data = vec_ld(0, (xmm_t *)(key));
240 return vec_and(data, mask);
243 #error No vector engine (SSE, NEON, ALTIVEC) available, check your toolchain
246 /* Performing hash-based lookups. 8< */
247 static inline uint16_t
248 em_get_ipv4_dst_port(void *ipv4_hdr, uint16_t portid, void *lookup_struct)
251 union ipv4_5tuple_host key;
252 struct rte_hash *ipv4_l3fwd_lookup_struct =
253 (struct rte_hash *)lookup_struct;
255 ipv4_hdr = (uint8_t *)ipv4_hdr +
256 offsetof(struct rte_ipv4_hdr, time_to_live);
259 * Get 5 tuple: dst port, src port, dst IP address,
260 * src IP address and protocol.
262 key.xmm = em_mask_key(ipv4_hdr, mask0.x);
264 /* Find destination port */
265 ret = rte_hash_lookup(ipv4_l3fwd_lookup_struct, (const void *)&key);
266 return (ret < 0) ? portid : ipv4_l3fwd_out_if[ret];
268 /* >8 End of performing hash-based lookups. */
270 static inline uint16_t
271 em_get_ipv6_dst_port(void *ipv6_hdr, uint16_t portid, void *lookup_struct)
274 union ipv6_5tuple_host key;
275 struct rte_hash *ipv6_l3fwd_lookup_struct =
276 (struct rte_hash *)lookup_struct;
278 ipv6_hdr = (uint8_t *)ipv6_hdr +
279 offsetof(struct rte_ipv6_hdr, payload_len);
280 void *data0 = ipv6_hdr;
281 void *data1 = ((uint8_t *)ipv6_hdr) + sizeof(xmm_t);
282 void *data2 = ((uint8_t *)ipv6_hdr) + sizeof(xmm_t) + sizeof(xmm_t);
284 /* Get part of 5 tuple: src IP address lower 96 bits and protocol */
285 key.xmm[0] = em_mask_key(data0, mask1.x);
288 * Get part of 5 tuple: dst IP address lower 96 bits
289 * and src IP address higher 32 bits.
291 #if defined RTE_ARCH_X86
292 key.xmm[1] = _mm_loadu_si128(data1);
294 key.xmm[1] = *(xmm_t *)data1;
298 * Get part of 5 tuple: dst port and src port
299 * and dst IP address higher 32 bits.
301 key.xmm[2] = em_mask_key(data2, mask2.x);
303 /* Find destination port */
304 ret = rte_hash_lookup(ipv6_l3fwd_lookup_struct, (const void *)&key);
305 return (ret < 0) ? portid : ipv6_l3fwd_out_if[ret];
308 #if defined RTE_ARCH_X86 || defined __ARM_NEON
309 #if defined(NO_HASH_MULTI_LOOKUP)
310 #include "l3fwd_em_sequential.h"
312 #include "l3fwd_em_hlm.h"
315 #include "l3fwd_em.h"
319 convert_ipv4_5tuple(struct ipv4_5tuple *key1,
320 union ipv4_5tuple_host *key2)
322 key2->ip_dst = rte_cpu_to_be_32(key1->ip_dst);
323 key2->ip_src = rte_cpu_to_be_32(key1->ip_src);
324 key2->port_dst = rte_cpu_to_be_16(key1->port_dst);
325 key2->port_src = rte_cpu_to_be_16(key1->port_src);
326 key2->proto = key1->proto;
332 convert_ipv6_5tuple(struct ipv6_5tuple *key1,
333 union ipv6_5tuple_host *key2)
337 for (i = 0; i < 16; i++) {
338 key2->ip_dst[i] = key1->ip_dst[i];
339 key2->ip_src[i] = key1->ip_src[i];
341 key2->port_dst = rte_cpu_to_be_16(key1->port_dst);
342 key2->port_src = rte_cpu_to_be_16(key1->port_src);
343 key2->proto = key1->proto;
349 #define BYTE_VALUE_MAX 256
350 #define ALL_32_BITS 0xffffffff
351 #define BIT_8_TO_15 0x0000ff00
354 populate_ipv4_flow_into_table(const struct rte_hash *h)
358 struct rte_eth_dev_info dev_info;
359 char srcbuf[INET6_ADDRSTRLEN];
360 char dstbuf[INET6_ADDRSTRLEN];
362 mask0 = (rte_xmm_t){.u32 = {BIT_8_TO_15, ALL_32_BITS,
363 ALL_32_BITS, ALL_32_BITS} };
365 for (i = 0; i < route_num_v4; i++) {
366 struct em_rule *entry;
367 union ipv4_5tuple_host newkey;
371 if ((1 << em_route_base_v4[i].if_out &
372 enabled_port_mask) == 0)
375 entry = &em_route_base_v4[i];
376 convert_ipv4_5tuple(&(entry->v4_key), &newkey);
377 ret = rte_hash_add_key(h, (void *) &newkey);
379 rte_exit(EXIT_FAILURE, "Unable to add entry %" PRIu32
380 " to the l3fwd hash.\n", i);
382 ipv4_l3fwd_out_if[ret] = entry->if_out;
383 ret = rte_eth_dev_info_get(em_route_base_v4[i].if_out,
386 rte_exit(EXIT_FAILURE,
387 "Error during getting device (port %u) info: %s\n",
388 em_route_base_v4[i].if_out, strerror(-ret));
390 src.s_addr = htonl(em_route_base_v4[i].v4_key.ip_src);
391 dst.s_addr = htonl(em_route_base_v4[i].v4_key.ip_dst);
392 printf("EM: Adding route %s, %s, %d, %d, %d (%d) [%s]\n",
393 inet_ntop(AF_INET, &dst, dstbuf, sizeof(dstbuf)),
394 inet_ntop(AF_INET, &src, srcbuf, sizeof(srcbuf)),
395 em_route_base_v4[i].v4_key.port_dst,
396 em_route_base_v4[i].v4_key.port_src,
397 em_route_base_v4[i].v4_key.proto,
398 em_route_base_v4[i].if_out, dev_info.device->name);
400 printf("Hash: Adding 0x%" PRIx64 " keys\n",
401 (uint64_t)route_num_v4);
404 #define BIT_16_TO_23 0x00ff0000
406 populate_ipv6_flow_into_table(const struct rte_hash *h)
410 struct rte_eth_dev_info dev_info;
411 char srcbuf[INET6_ADDRSTRLEN];
412 char dstbuf[INET6_ADDRSTRLEN];
414 mask1 = (rte_xmm_t){.u32 = {BIT_16_TO_23, ALL_32_BITS,
415 ALL_32_BITS, ALL_32_BITS} };
417 mask2 = (rte_xmm_t){.u32 = {ALL_32_BITS, ALL_32_BITS, 0, 0} };
419 for (i = 0; i < route_num_v6; i++) {
420 struct em_rule *entry;
421 union ipv6_5tuple_host newkey;
423 if ((1 << em_route_base_v6[i].if_out &
424 enabled_port_mask) == 0)
427 entry = &em_route_base_v6[i];
428 convert_ipv6_5tuple(&(entry->v6_key), &newkey);
429 ret = rte_hash_add_key(h, (void *) &newkey);
431 rte_exit(EXIT_FAILURE, "Unable to add entry %" PRIu32
432 " to the l3fwd hash.\n", i);
434 ipv6_l3fwd_out_if[ret] = entry->if_out;
435 ret = rte_eth_dev_info_get(em_route_base_v6[i].if_out,
438 rte_exit(EXIT_FAILURE,
439 "Error during getting device (port %u) info: %s\n",
440 em_route_base_v6[i].if_out, strerror(-ret));
442 printf("EM: Adding route %s, %s, %d, %d, %d (%d) [%s]\n",
443 inet_ntop(AF_INET6, em_route_base_v6[i].v6_key.ip_dst,
444 dstbuf, sizeof(dstbuf)),
445 inet_ntop(AF_INET6, em_route_base_v6[i].v6_key.ip_src,
446 srcbuf, sizeof(srcbuf)),
447 em_route_base_v6[i].v6_key.port_dst,
448 em_route_base_v6[i].v6_key.port_src,
449 em_route_base_v6[i].v6_key.proto,
450 em_route_base_v6[i].if_out, dev_info.device->name);
452 printf("Hash: Adding 0x%" PRIx64 "keys\n",
453 (uint64_t)route_num_v6);
457 * 1. IP packets without extension;
458 * 2. L4 payload should be either TCP or UDP.
461 em_check_ptype(int portid)
464 int ptype_l3_ipv4_ext = 0;
465 int ptype_l3_ipv6_ext = 0;
466 int ptype_l4_tcp = 0;
467 int ptype_l4_udp = 0;
468 uint32_t ptype_mask = RTE_PTYPE_L3_MASK | RTE_PTYPE_L4_MASK;
470 ret = rte_eth_dev_get_supported_ptypes(portid, ptype_mask, NULL, 0);
474 uint32_t ptypes[ret];
476 ret = rte_eth_dev_get_supported_ptypes(portid, ptype_mask, ptypes, ret);
477 for (i = 0; i < ret; ++i) {
479 case RTE_PTYPE_L3_IPV4_EXT:
480 ptype_l3_ipv4_ext = 1;
482 case RTE_PTYPE_L3_IPV6_EXT:
483 ptype_l3_ipv6_ext = 1;
485 case RTE_PTYPE_L4_TCP:
488 case RTE_PTYPE_L4_UDP:
494 if (ptype_l3_ipv4_ext == 0)
495 printf("port %d cannot parse RTE_PTYPE_L3_IPV4_EXT\n", portid);
496 if (ptype_l3_ipv6_ext == 0)
497 printf("port %d cannot parse RTE_PTYPE_L3_IPV6_EXT\n", portid);
498 if (!ptype_l3_ipv4_ext || !ptype_l3_ipv6_ext)
501 if (ptype_l4_tcp == 0)
502 printf("port %d cannot parse RTE_PTYPE_L4_TCP\n", portid);
503 if (ptype_l4_udp == 0)
504 printf("port %d cannot parse RTE_PTYPE_L4_UDP\n", portid);
505 if (ptype_l4_tcp && ptype_l4_udp)
512 em_parse_ptype(struct rte_mbuf *m)
514 struct rte_ether_hdr *eth_hdr;
515 uint32_t packet_type = RTE_PTYPE_UNKNOWN;
519 struct rte_ipv4_hdr *ipv4_hdr;
520 struct rte_ipv6_hdr *ipv6_hdr;
522 eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
523 ether_type = eth_hdr->ether_type;
524 l3 = (uint8_t *)eth_hdr + sizeof(struct rte_ether_hdr);
525 if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
526 ipv4_hdr = (struct rte_ipv4_hdr *)l3;
527 hdr_len = rte_ipv4_hdr_len(ipv4_hdr);
528 if (hdr_len == sizeof(struct rte_ipv4_hdr)) {
529 packet_type |= RTE_PTYPE_L3_IPV4;
530 if (ipv4_hdr->next_proto_id == IPPROTO_TCP)
531 packet_type |= RTE_PTYPE_L4_TCP;
532 else if (ipv4_hdr->next_proto_id == IPPROTO_UDP)
533 packet_type |= RTE_PTYPE_L4_UDP;
535 packet_type |= RTE_PTYPE_L3_IPV4_EXT;
536 } else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
537 ipv6_hdr = (struct rte_ipv6_hdr *)l3;
538 if (ipv6_hdr->proto == IPPROTO_TCP)
539 packet_type |= RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP;
540 else if (ipv6_hdr->proto == IPPROTO_UDP)
541 packet_type |= RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP;
543 packet_type |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
546 m->packet_type = packet_type;
550 em_cb_parse_ptype(uint16_t port __rte_unused, uint16_t queue __rte_unused,
551 struct rte_mbuf *pkts[], uint16_t nb_pkts,
552 uint16_t max_pkts __rte_unused,
553 void *user_param __rte_unused)
557 for (i = 0; i < nb_pkts; ++i)
558 em_parse_ptype(pkts[i]);
563 /* main processing loop */
565 em_main_loop(__rte_unused void *dummy)
567 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
569 uint64_t prev_tsc, diff_tsc, cur_tsc;
573 struct lcore_conf *qconf;
574 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /
575 US_PER_S * BURST_TX_DRAIN_US;
577 lcore_id = rte_lcore_id();
578 qconf = &lcore_conf[lcore_id];
580 const uint16_t n_rx_q = qconf->n_rx_queue;
581 const uint16_t n_tx_p = qconf->n_tx_port;
583 RTE_LOG(INFO, L3FWD, "lcore %u has nothing to do\n", lcore_id);
587 RTE_LOG(INFO, L3FWD, "entering main loop on lcore %u\n", lcore_id);
589 for (i = 0; i < n_rx_q; i++) {
591 portid = qconf->rx_queue_list[i].port_id;
592 queueid = qconf->rx_queue_list[i].queue_id;
594 " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
595 lcore_id, portid, queueid);
598 cur_tsc = rte_rdtsc();
601 while (!force_quit) {
604 * TX burst queue drain
606 diff_tsc = cur_tsc - prev_tsc;
607 if (unlikely(diff_tsc > drain_tsc)) {
609 for (i = 0; i < n_tx_p; ++i) {
610 portid = qconf->tx_port_id[i];
611 if (qconf->tx_mbufs[portid].len == 0)
614 qconf->tx_mbufs[portid].len,
616 qconf->tx_mbufs[portid].len = 0;
623 * Read packet from RX queues
625 for (i = 0; i < n_rx_q; ++i) {
626 portid = qconf->rx_queue_list[i].port_id;
627 queueid = qconf->rx_queue_list[i].queue_id;
628 nb_rx = rte_eth_rx_burst(portid, queueid, pkts_burst,
633 #if defined RTE_ARCH_X86 || defined __ARM_NEON
634 l3fwd_em_send_packets(nb_rx, pkts_burst,
637 l3fwd_em_no_opt_send_packets(nb_rx, pkts_burst,
642 cur_tsc = rte_rdtsc();
648 static __rte_always_inline void
649 em_event_loop_single(struct l3fwd_event_resources *evt_rsrc,
652 const int event_p_id = l3fwd_get_free_event_port(evt_rsrc);
653 const uint8_t tx_q_id = evt_rsrc->evq.event_q_id[
654 evt_rsrc->evq.nb_queues - 1];
655 const uint8_t event_d_id = evt_rsrc->event_d_id;
656 struct lcore_conf *lconf;
657 unsigned int lcore_id;
663 lcore_id = rte_lcore_id();
664 lconf = &lcore_conf[lcore_id];
666 RTE_LOG(INFO, L3FWD, "entering %s on lcore %u\n", __func__, lcore_id);
667 while (!force_quit) {
668 if (!rte_event_dequeue_burst(event_d_id, event_p_id, &ev, 1, 0))
671 struct rte_mbuf *mbuf = ev.mbuf;
673 #if defined RTE_ARCH_X86 || defined __ARM_NEON
674 mbuf->port = em_get_dst_port(lconf, mbuf, mbuf->port);
675 process_packet(mbuf, &mbuf->port);
677 l3fwd_em_simple_process(mbuf, lconf);
679 if (mbuf->port == BAD_PORT) {
680 rte_pktmbuf_free(mbuf);
684 if (flags & L3FWD_EVENT_TX_ENQ) {
685 ev.queue_id = tx_q_id;
686 ev.op = RTE_EVENT_OP_FORWARD;
687 while (rte_event_enqueue_burst(event_d_id, event_p_id,
688 &ev, 1) && !force_quit)
692 if (flags & L3FWD_EVENT_TX_DIRECT) {
693 rte_event_eth_tx_adapter_txq_set(mbuf, 0);
694 while (!rte_event_eth_tx_adapter_enqueue(event_d_id,
695 event_p_id, &ev, 1, 0) &&
702 static __rte_always_inline void
703 em_event_loop_burst(struct l3fwd_event_resources *evt_rsrc,
706 const int event_p_id = l3fwd_get_free_event_port(evt_rsrc);
707 const uint8_t tx_q_id = evt_rsrc->evq.event_q_id[
708 evt_rsrc->evq.nb_queues - 1];
709 const uint8_t event_d_id = evt_rsrc->event_d_id;
710 const uint16_t deq_len = evt_rsrc->deq_depth;
711 struct rte_event events[MAX_PKT_BURST];
712 struct lcore_conf *lconf;
713 unsigned int lcore_id;
714 int i, nb_enq, nb_deq;
719 lcore_id = rte_lcore_id();
721 lconf = &lcore_conf[lcore_id];
723 RTE_LOG(INFO, L3FWD, "entering %s on lcore %u\n", __func__, lcore_id);
725 while (!force_quit) {
726 /* Read events from RX queues */
727 nb_deq = rte_event_dequeue_burst(event_d_id, event_p_id,
734 #if defined RTE_ARCH_X86 || defined __ARM_NEON
735 l3fwd_em_process_events(nb_deq, (struct rte_event **)&events,
738 l3fwd_em_no_opt_process_events(nb_deq,
739 (struct rte_event **)&events,
742 for (i = 0; i < nb_deq; i++) {
743 if (flags & L3FWD_EVENT_TX_ENQ) {
744 events[i].queue_id = tx_q_id;
745 events[i].op = RTE_EVENT_OP_FORWARD;
748 if (flags & L3FWD_EVENT_TX_DIRECT)
749 rte_event_eth_tx_adapter_txq_set(events[i].mbuf,
753 if (flags & L3FWD_EVENT_TX_ENQ) {
754 nb_enq = rte_event_enqueue_burst(event_d_id, event_p_id,
756 while (nb_enq < nb_deq && !force_quit)
757 nb_enq += rte_event_enqueue_burst(event_d_id,
758 event_p_id, events + nb_enq,
762 if (flags & L3FWD_EVENT_TX_DIRECT) {
763 nb_enq = rte_event_eth_tx_adapter_enqueue(event_d_id,
764 event_p_id, events, nb_deq, 0);
765 while (nb_enq < nb_deq && !force_quit)
766 nb_enq += rte_event_eth_tx_adapter_enqueue(
767 event_d_id, event_p_id,
774 static __rte_always_inline void
775 em_event_loop(struct l3fwd_event_resources *evt_rsrc,
778 if (flags & L3FWD_EVENT_SINGLE)
779 em_event_loop_single(evt_rsrc, flags);
780 if (flags & L3FWD_EVENT_BURST)
781 em_event_loop_burst(evt_rsrc, flags);
785 em_event_main_loop_tx_d(__rte_unused void *dummy)
787 struct l3fwd_event_resources *evt_rsrc =
788 l3fwd_get_eventdev_rsrc();
790 em_event_loop(evt_rsrc, L3FWD_EVENT_TX_DIRECT | L3FWD_EVENT_SINGLE);
795 em_event_main_loop_tx_d_burst(__rte_unused void *dummy)
797 struct l3fwd_event_resources *evt_rsrc =
798 l3fwd_get_eventdev_rsrc();
800 em_event_loop(evt_rsrc, L3FWD_EVENT_TX_DIRECT | L3FWD_EVENT_BURST);
805 em_event_main_loop_tx_q(__rte_unused void *dummy)
807 struct l3fwd_event_resources *evt_rsrc =
808 l3fwd_get_eventdev_rsrc();
810 em_event_loop(evt_rsrc, L3FWD_EVENT_TX_ENQ | L3FWD_EVENT_SINGLE);
815 em_event_main_loop_tx_q_burst(__rte_unused void *dummy)
817 struct l3fwd_event_resources *evt_rsrc =
818 l3fwd_get_eventdev_rsrc();
820 em_event_loop(evt_rsrc, L3FWD_EVENT_TX_ENQ | L3FWD_EVENT_BURST);
824 /* Same eventdev loop for single and burst of vector */
825 static __rte_always_inline void
826 em_event_loop_vector(struct l3fwd_event_resources *evt_rsrc,
829 const int event_p_id = l3fwd_get_free_event_port(evt_rsrc);
830 const uint8_t tx_q_id =
831 evt_rsrc->evq.event_q_id[evt_rsrc->evq.nb_queues - 1];
832 const uint8_t event_d_id = evt_rsrc->event_d_id;
833 const uint16_t deq_len = evt_rsrc->deq_depth;
834 struct rte_event events[MAX_PKT_BURST];
835 struct lcore_conf *lconf;
836 unsigned int lcore_id;
837 int i, nb_enq, nb_deq;
842 lcore_id = rte_lcore_id();
843 lconf = &lcore_conf[lcore_id];
845 RTE_LOG(INFO, L3FWD, "entering %s on lcore %u\n", __func__, lcore_id);
847 while (!force_quit) {
848 /* Read events from RX queues */
849 nb_deq = rte_event_dequeue_burst(event_d_id, event_p_id, events,
856 for (i = 0; i < nb_deq; i++) {
857 if (flags & L3FWD_EVENT_TX_ENQ) {
858 events[i].queue_id = tx_q_id;
859 events[i].op = RTE_EVENT_OP_FORWARD;
862 #if defined RTE_ARCH_X86 || defined __ARM_NEON
863 l3fwd_em_process_event_vector(events[i].vec, lconf);
865 l3fwd_em_no_opt_process_event_vector(events[i].vec,
868 if (flags & L3FWD_EVENT_TX_DIRECT)
869 event_vector_txq_set(events[i].vec, 0);
872 if (flags & L3FWD_EVENT_TX_ENQ) {
873 nb_enq = rte_event_enqueue_burst(event_d_id, event_p_id,
875 while (nb_enq < nb_deq && !force_quit)
876 nb_enq += rte_event_enqueue_burst(
877 event_d_id, event_p_id, events + nb_enq,
881 if (flags & L3FWD_EVENT_TX_DIRECT) {
882 nb_enq = rte_event_eth_tx_adapter_enqueue(
883 event_d_id, event_p_id, events, nb_deq, 0);
884 while (nb_enq < nb_deq && !force_quit)
885 nb_enq += rte_event_eth_tx_adapter_enqueue(
886 event_d_id, event_p_id, events + nb_enq,
893 em_event_main_loop_tx_d_vector(__rte_unused void *dummy)
895 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
897 em_event_loop_vector(evt_rsrc, L3FWD_EVENT_TX_DIRECT);
902 em_event_main_loop_tx_d_burst_vector(__rte_unused void *dummy)
904 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
906 em_event_loop_vector(evt_rsrc, L3FWD_EVENT_TX_DIRECT);
911 em_event_main_loop_tx_q_vector(__rte_unused void *dummy)
913 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
915 em_event_loop_vector(evt_rsrc, L3FWD_EVENT_TX_ENQ);
920 em_event_main_loop_tx_q_burst_vector(__rte_unused void *dummy)
922 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
924 em_event_loop_vector(evt_rsrc, L3FWD_EVENT_TX_ENQ);
928 /* Initialize exact match (hash) parameters. 8< */
930 setup_hash(const int socketid)
932 struct rte_hash_parameters ipv4_l3fwd_hash_params = {
934 .entries = L3FWD_HASH_ENTRIES,
935 .key_len = sizeof(union ipv4_5tuple_host),
936 .hash_func = ipv4_hash_crc,
937 .hash_func_init_val = 0,
940 struct rte_hash_parameters ipv6_l3fwd_hash_params = {
942 .entries = L3FWD_HASH_ENTRIES,
943 .key_len = sizeof(union ipv6_5tuple_host),
944 .hash_func = ipv6_hash_crc,
945 .hash_func_init_val = 0,
950 /* create ipv4 hash */
951 snprintf(s, sizeof(s), "ipv4_l3fwd_hash_%d", socketid);
952 ipv4_l3fwd_hash_params.name = s;
953 ipv4_l3fwd_hash_params.socket_id = socketid;
954 ipv4_l3fwd_em_lookup_struct[socketid] =
955 rte_hash_create(&ipv4_l3fwd_hash_params);
956 if (ipv4_l3fwd_em_lookup_struct[socketid] == NULL)
957 rte_exit(EXIT_FAILURE,
958 "Unable to create the l3fwd hash on socket %d\n",
961 /* create ipv6 hash */
962 snprintf(s, sizeof(s), "ipv6_l3fwd_hash_%d", socketid);
963 ipv6_l3fwd_hash_params.name = s;
964 ipv6_l3fwd_hash_params.socket_id = socketid;
965 ipv6_l3fwd_em_lookup_struct[socketid] =
966 rte_hash_create(&ipv6_l3fwd_hash_params);
967 if (ipv6_l3fwd_em_lookup_struct[socketid] == NULL)
968 rte_exit(EXIT_FAILURE,
969 "Unable to create the l3fwd hash on socket %d\n",
973 * Use data from ipv4/ipv6 l3fwd config file
974 * directly to initialize the hash table.
977 /* populate the ipv4 hash */
978 populate_ipv4_flow_into_table(
979 ipv4_l3fwd_em_lookup_struct[socketid]);
981 /* populate the ipv6 hash */
982 populate_ipv6_flow_into_table(
983 ipv6_l3fwd_em_lookup_struct[socketid]);
986 /* >8 End of initialization of hash parameters. */
988 /* Return ipv4/ipv6 em fwd lookup struct. */
990 em_get_ipv4_l3fwd_lookup_struct(const int socketid)
992 return ipv4_l3fwd_em_lookup_struct[socketid];
996 em_get_ipv6_l3fwd_lookup_struct(const int socketid)
998 return ipv6_l3fwd_em_lookup_struct[socketid];