4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <sys/types.h>
40 #include <sys/queue.h>
45 #include <netinet/in.h>
47 #include <rte_debug.h>
48 #include <rte_ether.h>
49 #include <rte_ethdev.h>
51 #include <rte_mempool.h>
52 #include <rte_cycles.h>
61 #ifdef RTE_MACHINE_CPUFLAG_SSE4_2
62 #include <rte_hash_crc.h>
63 #define DEFAULT_HASH_FUNC rte_hash_crc
65 #include <rte_jhash.h>
66 #define DEFAULT_HASH_FUNC rte_jhash
67 #endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */
69 #define IPV6_ADDR_LEN 16
77 } __attribute__((__packed__));
79 union ipv4_5tuple_host {
92 #define XMM_NUM_IN_IPV6_5TUPLE 3
95 uint8_t ip_dst[IPV6_ADDR_LEN];
96 uint8_t ip_src[IPV6_ADDR_LEN];
100 } __attribute__((__packed__));
102 union ipv6_5tuple_host {
107 uint8_t ip_src[IPV6_ADDR_LEN];
108 uint8_t ip_dst[IPV6_ADDR_LEN];
113 xmm_t xmm[XMM_NUM_IN_IPV6_5TUPLE];
118 struct ipv4_l3fwd_em_route {
119 struct ipv4_5tuple key;
123 struct ipv6_l3fwd_em_route {
124 struct ipv6_5tuple key;
128 static struct ipv4_l3fwd_em_route ipv4_l3fwd_em_route_array[] = {
129 {{IPv4(101, 0, 0, 0), IPv4(100, 10, 0, 1), 101, 11, IPPROTO_TCP}, 0},
130 {{IPv4(201, 0, 0, 0), IPv4(200, 20, 0, 1), 102, 12, IPPROTO_TCP}, 1},
131 {{IPv4(111, 0, 0, 0), IPv4(100, 30, 0, 1), 101, 11, IPPROTO_TCP}, 2},
132 {{IPv4(211, 0, 0, 0), IPv4(200, 40, 0, 1), 102, 12, IPPROTO_TCP}, 3},
135 static struct ipv6_l3fwd_em_route ipv6_l3fwd_em_route_array[] = {
137 {0xfe, 0x80, 0, 0, 0, 0, 0, 0, 0x02, 0x1e, 0x67, 0xff, 0xfe, 0, 0, 0},
138 {0xfe, 0x80, 0, 0, 0, 0, 0, 0, 0x02, 0x1b, 0x21, 0xff, 0xfe, 0x91, 0x38, 0x05},
139 101, 11, IPPROTO_TCP}, 0},
142 {0xfe, 0x90, 0, 0, 0, 0, 0, 0, 0x02, 0x1e, 0x67, 0xff, 0xfe, 0, 0, 0},
143 {0xfe, 0x90, 0, 0, 0, 0, 0, 0, 0x02, 0x1b, 0x21, 0xff, 0xfe, 0x91, 0x38, 0x05},
144 102, 12, IPPROTO_TCP}, 1},
147 {0xfe, 0xa0, 0, 0, 0, 0, 0, 0, 0x02, 0x1e, 0x67, 0xff, 0xfe, 0, 0, 0},
148 {0xfe, 0xa0, 0, 0, 0, 0, 0, 0, 0x02, 0x1b, 0x21, 0xff, 0xfe, 0x91, 0x38, 0x05},
149 101, 11, IPPROTO_TCP}, 2},
152 {0xfe, 0xb0, 0, 0, 0, 0, 0, 0, 0x02, 0x1e, 0x67, 0xff, 0xfe, 0, 0, 0},
153 {0xfe, 0xb0, 0, 0, 0, 0, 0, 0, 0x02, 0x1b, 0x21, 0xff, 0xfe, 0x91, 0x38, 0x05},
154 102, 12, IPPROTO_TCP}, 3},
157 struct rte_hash *ipv4_l3fwd_em_lookup_struct[NB_SOCKETS];
158 struct rte_hash *ipv6_l3fwd_em_lookup_struct[NB_SOCKETS];
160 static inline uint32_t
161 ipv4_hash_crc(const void *data, __rte_unused uint32_t data_len,
164 const union ipv4_5tuple_host *k;
170 p = (const uint32_t *)&k->port_src;
172 #ifdef RTE_MACHINE_CPUFLAG_SSE4_2
173 init_val = rte_hash_crc_4byte(t, init_val);
174 init_val = rte_hash_crc_4byte(k->ip_src, init_val);
175 init_val = rte_hash_crc_4byte(k->ip_dst, init_val);
176 init_val = rte_hash_crc_4byte(*p, init_val);
177 #else /* RTE_MACHINE_CPUFLAG_SSE4_2 */
178 init_val = rte_jhash_1word(t, init_val);
179 init_val = rte_jhash_1word(k->ip_src, init_val);
180 init_val = rte_jhash_1word(k->ip_dst, init_val);
181 init_val = rte_jhash_1word(*p, init_val);
182 #endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */
187 static inline uint32_t
188 ipv6_hash_crc(const void *data, __rte_unused uint32_t data_len,
191 const union ipv6_5tuple_host *k;
194 #ifdef RTE_MACHINE_CPUFLAG_SSE4_2
195 const uint32_t *ip_src0, *ip_src1, *ip_src2, *ip_src3;
196 const uint32_t *ip_dst0, *ip_dst1, *ip_dst2, *ip_dst3;
197 #endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */
201 p = (const uint32_t *)&k->port_src;
203 #ifdef RTE_MACHINE_CPUFLAG_SSE4_2
204 ip_src0 = (const uint32_t *) k->ip_src;
205 ip_src1 = (const uint32_t *)(k->ip_src+4);
206 ip_src2 = (const uint32_t *)(k->ip_src+8);
207 ip_src3 = (const uint32_t *)(k->ip_src+12);
208 ip_dst0 = (const uint32_t *) k->ip_dst;
209 ip_dst1 = (const uint32_t *)(k->ip_dst+4);
210 ip_dst2 = (const uint32_t *)(k->ip_dst+8);
211 ip_dst3 = (const uint32_t *)(k->ip_dst+12);
212 init_val = rte_hash_crc_4byte(t, init_val);
213 init_val = rte_hash_crc_4byte(*ip_src0, init_val);
214 init_val = rte_hash_crc_4byte(*ip_src1, init_val);
215 init_val = rte_hash_crc_4byte(*ip_src2, init_val);
216 init_val = rte_hash_crc_4byte(*ip_src3, init_val);
217 init_val = rte_hash_crc_4byte(*ip_dst0, init_val);
218 init_val = rte_hash_crc_4byte(*ip_dst1, init_val);
219 init_val = rte_hash_crc_4byte(*ip_dst2, init_val);
220 init_val = rte_hash_crc_4byte(*ip_dst3, init_val);
221 init_val = rte_hash_crc_4byte(*p, init_val);
222 #else /* RTE_MACHINE_CPUFLAG_SSE4_2 */
223 init_val = rte_jhash_1word(t, init_val);
224 init_val = rte_jhash(k->ip_src,
225 sizeof(uint8_t) * IPV6_ADDR_LEN, init_val);
226 init_val = rte_jhash(k->ip_dst,
227 sizeof(uint8_t) * IPV6_ADDR_LEN, init_val);
228 init_val = rte_jhash_1word(*p, init_val);
229 #endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */
233 #define IPV4_L3FWD_EM_NUM_ROUTES \
234 (sizeof(ipv4_l3fwd_em_route_array) / sizeof(ipv4_l3fwd_em_route_array[0]))
236 #define IPV6_L3FWD_EM_NUM_ROUTES \
237 (sizeof(ipv6_l3fwd_em_route_array) / sizeof(ipv6_l3fwd_em_route_array[0]))
239 static uint8_t ipv4_l3fwd_out_if[L3FWD_HASH_ENTRIES] __rte_cache_aligned;
240 static uint8_t ipv6_l3fwd_out_if[L3FWD_HASH_ENTRIES] __rte_cache_aligned;
242 static rte_xmm_t mask0;
243 static rte_xmm_t mask1;
244 static rte_xmm_t mask2;
246 #if defined(__SSE2__)
248 em_mask_key(void *key, xmm_t mask)
250 __m128i data = _mm_loadu_si128((__m128i *)(key));
252 return _mm_and_si128(data, mask);
254 #elif defined(RTE_MACHINE_CPUFLAG_NEON)
256 em_mask_key(void *key, xmm_t mask)
258 int32x4_t data = vld1q_s32((int32_t *)key);
260 return vandq_s32(data, mask);
262 #elif defined(RTE_MACHINE_CPUFLAG_ALTIVEC)
264 em_mask_key(void *key, xmm_t mask)
266 xmm_t data = vec_ld(0, (xmm_t *)(key));
268 return vec_and(data, mask);
271 #error No vector engine (SSE, NEON, ALTIVEC) available, check your toolchain
274 static inline uint8_t
275 em_get_ipv4_dst_port(void *ipv4_hdr, uint8_t portid, void *lookup_struct)
278 union ipv4_5tuple_host key;
279 struct rte_hash *ipv4_l3fwd_lookup_struct =
280 (struct rte_hash *)lookup_struct;
282 ipv4_hdr = (uint8_t *)ipv4_hdr + offsetof(struct ipv4_hdr, time_to_live);
285 * Get 5 tuple: dst port, src port, dst IP address,
286 * src IP address and protocol.
288 key.xmm = em_mask_key(ipv4_hdr, mask0.x);
290 /* Find destination port */
291 ret = rte_hash_lookup(ipv4_l3fwd_lookup_struct, (const void *)&key);
292 return (uint8_t)((ret < 0) ? portid : ipv4_l3fwd_out_if[ret]);
295 static inline uint8_t
296 em_get_ipv6_dst_port(void *ipv6_hdr, uint8_t portid, void *lookup_struct)
299 union ipv6_5tuple_host key;
300 struct rte_hash *ipv6_l3fwd_lookup_struct =
301 (struct rte_hash *)lookup_struct;
303 ipv6_hdr = (uint8_t *)ipv6_hdr + offsetof(struct ipv6_hdr, payload_len);
304 void *data0 = ipv6_hdr;
305 void *data1 = ((uint8_t *)ipv6_hdr) + sizeof(xmm_t);
306 void *data2 = ((uint8_t *)ipv6_hdr) + sizeof(xmm_t) + sizeof(xmm_t);
308 /* Get part of 5 tuple: src IP address lower 96 bits and protocol */
309 key.xmm[0] = em_mask_key(data0, mask1.x);
312 * Get part of 5 tuple: dst IP address lower 96 bits
313 * and src IP address higher 32 bits.
315 key.xmm[1] = *(xmm_t *)data1;
318 * Get part of 5 tuple: dst port and src port
319 * and dst IP address higher 32 bits.
321 key.xmm[2] = em_mask_key(data2, mask2.x);
323 /* Find destination port */
324 ret = rte_hash_lookup(ipv6_l3fwd_lookup_struct, (const void *)&key);
325 return (uint8_t)((ret < 0) ? portid : ipv6_l3fwd_out_if[ret]);
328 #if defined(__SSE4_1__)
329 #if defined(NO_HASH_MULTI_LOOKUP)
330 #include "l3fwd_em_sse.h"
332 #include "l3fwd_em_hlm_sse.h"
335 #include "l3fwd_em.h"
339 convert_ipv4_5tuple(struct ipv4_5tuple *key1,
340 union ipv4_5tuple_host *key2)
342 key2->ip_dst = rte_cpu_to_be_32(key1->ip_dst);
343 key2->ip_src = rte_cpu_to_be_32(key1->ip_src);
344 key2->port_dst = rte_cpu_to_be_16(key1->port_dst);
345 key2->port_src = rte_cpu_to_be_16(key1->port_src);
346 key2->proto = key1->proto;
352 convert_ipv6_5tuple(struct ipv6_5tuple *key1,
353 union ipv6_5tuple_host *key2)
357 for (i = 0; i < 16; i++) {
358 key2->ip_dst[i] = key1->ip_dst[i];
359 key2->ip_src[i] = key1->ip_src[i];
361 key2->port_dst = rte_cpu_to_be_16(key1->port_dst);
362 key2->port_src = rte_cpu_to_be_16(key1->port_src);
363 key2->proto = key1->proto;
369 #define BYTE_VALUE_MAX 256
370 #define ALL_32_BITS 0xffffffff
371 #define BIT_8_TO_15 0x0000ff00
374 populate_ipv4_few_flow_into_table(const struct rte_hash *h)
379 mask0 = (rte_xmm_t){.u32 = {BIT_8_TO_15, ALL_32_BITS,
380 ALL_32_BITS, ALL_32_BITS} };
382 for (i = 0; i < IPV4_L3FWD_EM_NUM_ROUTES; i++) {
383 struct ipv4_l3fwd_em_route entry;
384 union ipv4_5tuple_host newkey;
386 entry = ipv4_l3fwd_em_route_array[i];
387 convert_ipv4_5tuple(&entry.key, &newkey);
388 ret = rte_hash_add_key(h, (void *) &newkey);
390 rte_exit(EXIT_FAILURE, "Unable to add entry %" PRIu32
391 " to the l3fwd hash.\n", i);
393 ipv4_l3fwd_out_if[ret] = entry.if_out;
395 printf("Hash: Adding 0x%" PRIx64 " keys\n",
396 (uint64_t)IPV4_L3FWD_EM_NUM_ROUTES);
399 #define BIT_16_TO_23 0x00ff0000
401 populate_ipv6_few_flow_into_table(const struct rte_hash *h)
406 mask1 = (rte_xmm_t){.u32 = {BIT_16_TO_23, ALL_32_BITS,
407 ALL_32_BITS, ALL_32_BITS} };
409 mask2 = (rte_xmm_t){.u32 = {ALL_32_BITS, ALL_32_BITS, 0, 0} };
411 for (i = 0; i < IPV6_L3FWD_EM_NUM_ROUTES; i++) {
412 struct ipv6_l3fwd_em_route entry;
413 union ipv6_5tuple_host newkey;
415 entry = ipv6_l3fwd_em_route_array[i];
416 convert_ipv6_5tuple(&entry.key, &newkey);
417 ret = rte_hash_add_key(h, (void *) &newkey);
419 rte_exit(EXIT_FAILURE, "Unable to add entry %" PRIu32
420 " to the l3fwd hash.\n", i);
422 ipv6_l3fwd_out_if[ret] = entry.if_out;
424 printf("Hash: Adding 0x%" PRIx64 "keys\n",
425 (uint64_t)IPV6_L3FWD_EM_NUM_ROUTES);
428 #define NUMBER_PORT_USED 4
430 populate_ipv4_many_flow_into_table(const struct rte_hash *h,
431 unsigned int nr_flow)
435 mask0 = (rte_xmm_t){.u32 = {BIT_8_TO_15, ALL_32_BITS,
436 ALL_32_BITS, ALL_32_BITS} };
438 for (i = 0; i < nr_flow; i++) {
439 struct ipv4_l3fwd_em_route entry;
440 union ipv4_5tuple_host newkey;
442 uint8_t a = (uint8_t)
443 ((i/NUMBER_PORT_USED)%BYTE_VALUE_MAX);
444 uint8_t b = (uint8_t)
445 (((i/NUMBER_PORT_USED)/BYTE_VALUE_MAX)%BYTE_VALUE_MAX);
446 uint8_t c = (uint8_t)
447 ((i/NUMBER_PORT_USED)/(BYTE_VALUE_MAX*BYTE_VALUE_MAX));
449 /* Create the ipv4 exact match flow */
450 memset(&entry, 0, sizeof(entry));
451 switch (i & (NUMBER_PORT_USED - 1)) {
453 entry = ipv4_l3fwd_em_route_array[0];
454 entry.key.ip_dst = IPv4(101, c, b, a);
457 entry = ipv4_l3fwd_em_route_array[1];
458 entry.key.ip_dst = IPv4(201, c, b, a);
461 entry = ipv4_l3fwd_em_route_array[2];
462 entry.key.ip_dst = IPv4(111, c, b, a);
465 entry = ipv4_l3fwd_em_route_array[3];
466 entry.key.ip_dst = IPv4(211, c, b, a);
469 convert_ipv4_5tuple(&entry.key, &newkey);
470 int32_t ret = rte_hash_add_key(h, (void *) &newkey);
473 rte_exit(EXIT_FAILURE, "Unable to add entry %u\n", i);
475 ipv4_l3fwd_out_if[ret] = (uint8_t) entry.if_out;
478 printf("Hash: Adding 0x%x keys\n", nr_flow);
482 populate_ipv6_many_flow_into_table(const struct rte_hash *h,
483 unsigned int nr_flow)
487 mask1 = (rte_xmm_t){.u32 = {BIT_16_TO_23, ALL_32_BITS,
488 ALL_32_BITS, ALL_32_BITS} };
489 mask2 = (rte_xmm_t){.u32 = {ALL_32_BITS, ALL_32_BITS, 0, 0} };
491 for (i = 0; i < nr_flow; i++) {
492 struct ipv6_l3fwd_em_route entry;
493 union ipv6_5tuple_host newkey;
495 uint8_t a = (uint8_t)
496 ((i/NUMBER_PORT_USED)%BYTE_VALUE_MAX);
497 uint8_t b = (uint8_t)
498 (((i/NUMBER_PORT_USED)/BYTE_VALUE_MAX)%BYTE_VALUE_MAX);
499 uint8_t c = (uint8_t)
500 ((i/NUMBER_PORT_USED)/(BYTE_VALUE_MAX*BYTE_VALUE_MAX));
502 /* Create the ipv6 exact match flow */
503 memset(&entry, 0, sizeof(entry));
504 switch (i & (NUMBER_PORT_USED - 1)) {
506 entry = ipv6_l3fwd_em_route_array[0];
509 entry = ipv6_l3fwd_em_route_array[1];
512 entry = ipv6_l3fwd_em_route_array[2];
515 entry = ipv6_l3fwd_em_route_array[3];
518 entry.key.ip_dst[13] = c;
519 entry.key.ip_dst[14] = b;
520 entry.key.ip_dst[15] = a;
521 convert_ipv6_5tuple(&entry.key, &newkey);
522 int32_t ret = rte_hash_add_key(h, (void *) &newkey);
525 rte_exit(EXIT_FAILURE, "Unable to add entry %u\n", i);
527 ipv6_l3fwd_out_if[ret] = (uint8_t) entry.if_out;
530 printf("Hash: Adding 0x%x keys\n", nr_flow);
534 * 1. IP packets without extension;
535 * 2. L4 payload should be either TCP or UDP.
538 em_check_ptype(int portid)
541 int ptype_l3_ipv4_ext = 0;
542 int ptype_l3_ipv6_ext = 0;
543 int ptype_l4_tcp = 0;
544 int ptype_l4_udp = 0;
545 uint32_t ptype_mask = RTE_PTYPE_L3_MASK | RTE_PTYPE_L4_MASK;
547 ret = rte_eth_dev_get_supported_ptypes(portid, ptype_mask, NULL, 0);
551 uint32_t ptypes[ret];
553 ret = rte_eth_dev_get_supported_ptypes(portid, ptype_mask, ptypes, ret);
554 for (i = 0; i < ret; ++i) {
556 case RTE_PTYPE_L3_IPV4_EXT:
557 ptype_l3_ipv4_ext = 1;
559 case RTE_PTYPE_L3_IPV6_EXT:
560 ptype_l3_ipv6_ext = 1;
562 case RTE_PTYPE_L4_TCP:
565 case RTE_PTYPE_L4_UDP:
571 if (ptype_l3_ipv4_ext == 0)
572 printf("port %d cannot parse RTE_PTYPE_L3_IPV4_EXT\n", portid);
573 if (ptype_l3_ipv6_ext == 0)
574 printf("port %d cannot parse RTE_PTYPE_L3_IPV6_EXT\n", portid);
575 if (!ptype_l3_ipv4_ext || !ptype_l3_ipv6_ext)
578 if (ptype_l4_tcp == 0)
579 printf("port %d cannot parse RTE_PTYPE_L4_TCP\n", portid);
580 if (ptype_l4_udp == 0)
581 printf("port %d cannot parse RTE_PTYPE_L4_UDP\n", portid);
582 if (ptype_l4_tcp && ptype_l4_udp)
589 em_parse_ptype(struct rte_mbuf *m)
591 struct ether_hdr *eth_hdr;
592 uint32_t packet_type = RTE_PTYPE_UNKNOWN;
596 struct ipv4_hdr *ipv4_hdr;
597 struct ipv6_hdr *ipv6_hdr;
599 eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
600 ether_type = eth_hdr->ether_type;
601 l3 = (uint8_t *)eth_hdr + sizeof(struct ether_hdr);
602 if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
603 ipv4_hdr = (struct ipv4_hdr *)l3;
604 hdr_len = (ipv4_hdr->version_ihl & IPV4_HDR_IHL_MASK) *
606 if (hdr_len == sizeof(struct ipv4_hdr)) {
607 packet_type |= RTE_PTYPE_L3_IPV4;
608 if (ipv4_hdr->next_proto_id == IPPROTO_TCP)
609 packet_type |= RTE_PTYPE_L4_TCP;
610 else if (ipv4_hdr->next_proto_id == IPPROTO_UDP)
611 packet_type |= RTE_PTYPE_L4_UDP;
613 packet_type |= RTE_PTYPE_L3_IPV4_EXT;
614 } else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
615 ipv6_hdr = (struct ipv6_hdr *)l3;
616 if (ipv6_hdr->proto == IPPROTO_TCP)
617 packet_type |= RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP;
618 else if (ipv6_hdr->proto == IPPROTO_UDP)
619 packet_type |= RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP;
621 packet_type |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
624 m->packet_type = packet_type;
628 em_cb_parse_ptype(uint8_t port __rte_unused, uint16_t queue __rte_unused,
629 struct rte_mbuf *pkts[], uint16_t nb_pkts,
630 uint16_t max_pkts __rte_unused,
631 void *user_param __rte_unused)
635 for (i = 0; i < nb_pkts; ++i)
636 em_parse_ptype(pkts[i]);
641 /* main processing loop */
643 em_main_loop(__attribute__((unused)) void *dummy)
645 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
647 uint64_t prev_tsc, diff_tsc, cur_tsc;
649 uint8_t portid, queueid;
650 struct lcore_conf *qconf;
651 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /
652 US_PER_S * BURST_TX_DRAIN_US;
656 lcore_id = rte_lcore_id();
657 qconf = &lcore_conf[lcore_id];
659 if (qconf->n_rx_queue == 0) {
660 RTE_LOG(INFO, L3FWD, "lcore %u has nothing to do\n", lcore_id);
664 RTE_LOG(INFO, L3FWD, "entering main loop on lcore %u\n", lcore_id);
666 for (i = 0; i < qconf->n_rx_queue; i++) {
668 portid = qconf->rx_queue_list[i].port_id;
669 queueid = qconf->rx_queue_list[i].queue_id;
671 " -- lcoreid=%u portid=%hhu rxqueueid=%hhu\n",
672 lcore_id, portid, queueid);
675 while (!force_quit) {
677 cur_tsc = rte_rdtsc();
680 * TX burst queue drain
682 diff_tsc = cur_tsc - prev_tsc;
683 if (unlikely(diff_tsc > drain_tsc)) {
685 for (i = 0; i < qconf->n_tx_port; ++i) {
686 portid = qconf->tx_port_id[i];
687 if (qconf->tx_mbufs[portid].len == 0)
690 qconf->tx_mbufs[portid].len,
692 qconf->tx_mbufs[portid].len = 0;
699 * Read packet from RX queues
701 for (i = 0; i < qconf->n_rx_queue; ++i) {
702 portid = qconf->rx_queue_list[i].port_id;
703 queueid = qconf->rx_queue_list[i].queue_id;
704 nb_rx = rte_eth_rx_burst(portid, queueid, pkts_burst,
709 #if defined(__SSE4_1__)
710 l3fwd_em_send_packets(nb_rx, pkts_burst,
713 l3fwd_em_no_opt_send_packets(nb_rx, pkts_burst,
715 #endif /* __SSE_4_1__ */
723 * Initialize exact match (hash) parameters.
726 setup_hash(const int socketid)
728 struct rte_hash_parameters ipv4_l3fwd_hash_params = {
730 .entries = L3FWD_HASH_ENTRIES,
731 .key_len = sizeof(union ipv4_5tuple_host),
732 .hash_func = ipv4_hash_crc,
733 .hash_func_init_val = 0,
736 struct rte_hash_parameters ipv6_l3fwd_hash_params = {
738 .entries = L3FWD_HASH_ENTRIES,
739 .key_len = sizeof(union ipv6_5tuple_host),
740 .hash_func = ipv6_hash_crc,
741 .hash_func_init_val = 0,
746 /* create ipv4 hash */
747 snprintf(s, sizeof(s), "ipv4_l3fwd_hash_%d", socketid);
748 ipv4_l3fwd_hash_params.name = s;
749 ipv4_l3fwd_hash_params.socket_id = socketid;
750 ipv4_l3fwd_em_lookup_struct[socketid] =
751 rte_hash_create(&ipv4_l3fwd_hash_params);
752 if (ipv4_l3fwd_em_lookup_struct[socketid] == NULL)
753 rte_exit(EXIT_FAILURE,
754 "Unable to create the l3fwd hash on socket %d\n",
757 /* create ipv6 hash */
758 snprintf(s, sizeof(s), "ipv6_l3fwd_hash_%d", socketid);
759 ipv6_l3fwd_hash_params.name = s;
760 ipv6_l3fwd_hash_params.socket_id = socketid;
761 ipv6_l3fwd_em_lookup_struct[socketid] =
762 rte_hash_create(&ipv6_l3fwd_hash_params);
763 if (ipv6_l3fwd_em_lookup_struct[socketid] == NULL)
764 rte_exit(EXIT_FAILURE,
765 "Unable to create the l3fwd hash on socket %d\n",
768 if (hash_entry_number != HASH_ENTRY_NUMBER_DEFAULT) {
769 /* For testing hash matching with a large number of flows we
770 * generate millions of IP 5-tuples with an incremented dst
771 * address to initialize the hash table. */
773 /* populate the ipv4 hash */
774 populate_ipv4_many_flow_into_table(
775 ipv4_l3fwd_em_lookup_struct[socketid],
778 /* populate the ipv6 hash */
779 populate_ipv6_many_flow_into_table(
780 ipv6_l3fwd_em_lookup_struct[socketid],
785 * Use data in ipv4/ipv6 l3fwd lookup table
786 * directly to initialize the hash table.
789 /* populate the ipv4 hash */
790 populate_ipv4_few_flow_into_table(
791 ipv4_l3fwd_em_lookup_struct[socketid]);
793 /* populate the ipv6 hash */
794 populate_ipv6_few_flow_into_table(
795 ipv6_l3fwd_em_lookup_struct[socketid]);
800 /* Return ipv4/ipv6 em fwd lookup struct. */
802 em_get_ipv4_l3fwd_lookup_struct(const int socketid)
804 return ipv4_l3fwd_em_lookup_struct[socketid];
808 em_get_ipv6_l3fwd_lookup_struct(const int socketid)
810 return ipv6_l3fwd_em_lookup_struct[socketid];