4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <sys/types.h>
40 #include <sys/queue.h>
45 #include <netinet/in.h>
47 #include <rte_debug.h>
48 #include <rte_ether.h>
49 #include <rte_ethdev.h>
50 #include <rte_mempool.h>
51 #include <rte_cycles.h>
60 #ifdef RTE_MACHINE_CPUFLAG_SSE4_2
61 #include <rte_hash_crc.h>
62 #define DEFAULT_HASH_FUNC rte_hash_crc
64 #include <rte_jhash.h>
65 #define DEFAULT_HASH_FUNC rte_jhash
66 #endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */
68 #define IPV6_ADDR_LEN 16
76 } __attribute__((__packed__));
78 union ipv4_5tuple_host {
91 #define XMM_NUM_IN_IPV6_5TUPLE 3
94 uint8_t ip_dst[IPV6_ADDR_LEN];
95 uint8_t ip_src[IPV6_ADDR_LEN];
99 } __attribute__((__packed__));
101 union ipv6_5tuple_host {
106 uint8_t ip_src[IPV6_ADDR_LEN];
107 uint8_t ip_dst[IPV6_ADDR_LEN];
112 xmm_t xmm[XMM_NUM_IN_IPV6_5TUPLE];
117 struct ipv4_l3fwd_em_route {
118 struct ipv4_5tuple key;
122 struct ipv6_l3fwd_em_route {
123 struct ipv6_5tuple key;
127 static struct ipv4_l3fwd_em_route ipv4_l3fwd_em_route_array[] = {
128 {{IPv4(101, 0, 0, 0), IPv4(100, 10, 0, 1), 101, 11, IPPROTO_TCP}, 0},
129 {{IPv4(201, 0, 0, 0), IPv4(200, 20, 0, 1), 102, 12, IPPROTO_TCP}, 1},
130 {{IPv4(111, 0, 0, 0), IPv4(100, 30, 0, 1), 101, 11, IPPROTO_TCP}, 2},
131 {{IPv4(211, 0, 0, 0), IPv4(200, 40, 0, 1), 102, 12, IPPROTO_TCP}, 3},
134 static struct ipv6_l3fwd_em_route ipv6_l3fwd_em_route_array[] = {
136 {0xfe, 0x80, 0, 0, 0, 0, 0, 0, 0x02, 0x1e, 0x67, 0xff, 0xfe, 0, 0, 0},
137 {0xfe, 0x80, 0, 0, 0, 0, 0, 0, 0x02, 0x1b, 0x21, 0xff, 0xfe, 0x91, 0x38, 0x05},
138 101, 11, IPPROTO_TCP}, 0},
141 {0xfe, 0x90, 0, 0, 0, 0, 0, 0, 0x02, 0x1e, 0x67, 0xff, 0xfe, 0, 0, 0},
142 {0xfe, 0x90, 0, 0, 0, 0, 0, 0, 0x02, 0x1b, 0x21, 0xff, 0xfe, 0x91, 0x38, 0x05},
143 102, 12, IPPROTO_TCP}, 1},
146 {0xfe, 0xa0, 0, 0, 0, 0, 0, 0, 0x02, 0x1e, 0x67, 0xff, 0xfe, 0, 0, 0},
147 {0xfe, 0xa0, 0, 0, 0, 0, 0, 0, 0x02, 0x1b, 0x21, 0xff, 0xfe, 0x91, 0x38, 0x05},
148 101, 11, IPPROTO_TCP}, 2},
151 {0xfe, 0xb0, 0, 0, 0, 0, 0, 0, 0x02, 0x1e, 0x67, 0xff, 0xfe, 0, 0, 0},
152 {0xfe, 0xb0, 0, 0, 0, 0, 0, 0, 0x02, 0x1b, 0x21, 0xff, 0xfe, 0x91, 0x38, 0x05},
153 102, 12, IPPROTO_TCP}, 3},
156 struct rte_hash *ipv4_l3fwd_em_lookup_struct[NB_SOCKETS];
157 struct rte_hash *ipv6_l3fwd_em_lookup_struct[NB_SOCKETS];
159 static inline uint32_t
160 ipv4_hash_crc(const void *data, __rte_unused uint32_t data_len,
163 const union ipv4_5tuple_host *k;
169 p = (const uint32_t *)&k->port_src;
171 #ifdef RTE_MACHINE_CPUFLAG_SSE4_2
172 init_val = rte_hash_crc_4byte(t, init_val);
173 init_val = rte_hash_crc_4byte(k->ip_src, init_val);
174 init_val = rte_hash_crc_4byte(k->ip_dst, init_val);
175 init_val = rte_hash_crc_4byte(*p, init_val);
176 #else /* RTE_MACHINE_CPUFLAG_SSE4_2 */
177 init_val = rte_jhash_1word(t, init_val);
178 init_val = rte_jhash_1word(k->ip_src, init_val);
179 init_val = rte_jhash_1word(k->ip_dst, init_val);
180 init_val = rte_jhash_1word(*p, init_val);
181 #endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */
186 static inline uint32_t
187 ipv6_hash_crc(const void *data, __rte_unused uint32_t data_len,
190 const union ipv6_5tuple_host *k;
193 #ifdef RTE_MACHINE_CPUFLAG_SSE4_2
194 const uint32_t *ip_src0, *ip_src1, *ip_src2, *ip_src3;
195 const uint32_t *ip_dst0, *ip_dst1, *ip_dst2, *ip_dst3;
196 #endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */
200 p = (const uint32_t *)&k->port_src;
202 #ifdef RTE_MACHINE_CPUFLAG_SSE4_2
203 ip_src0 = (const uint32_t *) k->ip_src;
204 ip_src1 = (const uint32_t *)(k->ip_src+4);
205 ip_src2 = (const uint32_t *)(k->ip_src+8);
206 ip_src3 = (const uint32_t *)(k->ip_src+12);
207 ip_dst0 = (const uint32_t *) k->ip_dst;
208 ip_dst1 = (const uint32_t *)(k->ip_dst+4);
209 ip_dst2 = (const uint32_t *)(k->ip_dst+8);
210 ip_dst3 = (const uint32_t *)(k->ip_dst+12);
211 init_val = rte_hash_crc_4byte(t, init_val);
212 init_val = rte_hash_crc_4byte(*ip_src0, init_val);
213 init_val = rte_hash_crc_4byte(*ip_src1, init_val);
214 init_val = rte_hash_crc_4byte(*ip_src2, init_val);
215 init_val = rte_hash_crc_4byte(*ip_src3, init_val);
216 init_val = rte_hash_crc_4byte(*ip_dst0, init_val);
217 init_val = rte_hash_crc_4byte(*ip_dst1, init_val);
218 init_val = rte_hash_crc_4byte(*ip_dst2, init_val);
219 init_val = rte_hash_crc_4byte(*ip_dst3, init_val);
220 init_val = rte_hash_crc_4byte(*p, init_val);
221 #else /* RTE_MACHINE_CPUFLAG_SSE4_2 */
222 init_val = rte_jhash_1word(t, init_val);
223 init_val = rte_jhash(k->ip_src,
224 sizeof(uint8_t) * IPV6_ADDR_LEN, init_val);
225 init_val = rte_jhash(k->ip_dst,
226 sizeof(uint8_t) * IPV6_ADDR_LEN, init_val);
227 init_val = rte_jhash_1word(*p, init_val);
228 #endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */
232 #define IPV4_L3FWD_EM_NUM_ROUTES \
233 (sizeof(ipv4_l3fwd_em_route_array) / sizeof(ipv4_l3fwd_em_route_array[0]))
235 #define IPV6_L3FWD_EM_NUM_ROUTES \
236 (sizeof(ipv6_l3fwd_em_route_array) / sizeof(ipv6_l3fwd_em_route_array[0]))
238 static uint8_t ipv4_l3fwd_out_if[L3FWD_HASH_ENTRIES] __rte_cache_aligned;
239 static uint8_t ipv6_l3fwd_out_if[L3FWD_HASH_ENTRIES] __rte_cache_aligned;
241 static rte_xmm_t mask0;
242 static rte_xmm_t mask1;
243 static rte_xmm_t mask2;
245 #if defined(__SSE2__)
247 em_mask_key(void *key, xmm_t mask)
249 __m128i data = _mm_loadu_si128((__m128i *)(key));
251 return _mm_and_si128(data, mask);
253 #elif defined(RTE_MACHINE_CPUFLAG_NEON)
255 em_mask_key(void *key, xmm_t mask)
257 int32x4_t data = vld1q_s32((int32_t *)key);
259 return vandq_s32(data, mask);
261 #elif defined(RTE_MACHINE_CPUFLAG_ALTIVEC)
263 em_mask_key(void *key, xmm_t mask)
265 xmm_t data = vec_ld(0, (xmm_t *)(key));
267 return vec_and(data, mask);
270 #error No vector engine (SSE, NEON, ALTIVEC) available, check your toolchain
273 static inline uint8_t
274 em_get_ipv4_dst_port(void *ipv4_hdr, uint8_t portid, void *lookup_struct)
277 union ipv4_5tuple_host key;
278 struct rte_hash *ipv4_l3fwd_lookup_struct =
279 (struct rte_hash *)lookup_struct;
281 ipv4_hdr = (uint8_t *)ipv4_hdr + offsetof(struct ipv4_hdr, time_to_live);
284 * Get 5 tuple: dst port, src port, dst IP address,
285 * src IP address and protocol.
287 key.xmm = em_mask_key(ipv4_hdr, mask0.x);
289 /* Find destination port */
290 ret = rte_hash_lookup(ipv4_l3fwd_lookup_struct, (const void *)&key);
291 return (uint8_t)((ret < 0) ? portid : ipv4_l3fwd_out_if[ret]);
294 static inline uint8_t
295 em_get_ipv6_dst_port(void *ipv6_hdr, uint8_t portid, void *lookup_struct)
298 union ipv6_5tuple_host key;
299 struct rte_hash *ipv6_l3fwd_lookup_struct =
300 (struct rte_hash *)lookup_struct;
302 ipv6_hdr = (uint8_t *)ipv6_hdr + offsetof(struct ipv6_hdr, payload_len);
303 void *data0 = ipv6_hdr;
304 void *data1 = ((uint8_t *)ipv6_hdr) + sizeof(xmm_t);
305 void *data2 = ((uint8_t *)ipv6_hdr) + sizeof(xmm_t) + sizeof(xmm_t);
307 /* Get part of 5 tuple: src IP address lower 96 bits and protocol */
308 key.xmm[0] = em_mask_key(data0, mask1.x);
311 * Get part of 5 tuple: dst IP address lower 96 bits
312 * and src IP address higher 32 bits.
314 key.xmm[1] = *(xmm_t *)data1;
317 * Get part of 5 tuple: dst port and src port
318 * and dst IP address higher 32 bits.
320 key.xmm[2] = em_mask_key(data2, mask2.x);
322 /* Find destination port */
323 ret = rte_hash_lookup(ipv6_l3fwd_lookup_struct, (const void *)&key);
324 return (uint8_t)((ret < 0) ? portid : ipv6_l3fwd_out_if[ret]);
327 #if defined(__SSE4_1__)
328 #if defined(NO_HASH_MULTI_LOOKUP)
329 #include "l3fwd_em_sse.h"
331 #include "l3fwd_em_hlm_sse.h"
334 #include "l3fwd_em.h"
338 convert_ipv4_5tuple(struct ipv4_5tuple *key1,
339 union ipv4_5tuple_host *key2)
341 key2->ip_dst = rte_cpu_to_be_32(key1->ip_dst);
342 key2->ip_src = rte_cpu_to_be_32(key1->ip_src);
343 key2->port_dst = rte_cpu_to_be_16(key1->port_dst);
344 key2->port_src = rte_cpu_to_be_16(key1->port_src);
345 key2->proto = key1->proto;
351 convert_ipv6_5tuple(struct ipv6_5tuple *key1,
352 union ipv6_5tuple_host *key2)
356 for (i = 0; i < 16; i++) {
357 key2->ip_dst[i] = key1->ip_dst[i];
358 key2->ip_src[i] = key1->ip_src[i];
360 key2->port_dst = rte_cpu_to_be_16(key1->port_dst);
361 key2->port_src = rte_cpu_to_be_16(key1->port_src);
362 key2->proto = key1->proto;
368 #define BYTE_VALUE_MAX 256
369 #define ALL_32_BITS 0xffffffff
370 #define BIT_8_TO_15 0x0000ff00
373 populate_ipv4_few_flow_into_table(const struct rte_hash *h)
378 mask0 = (rte_xmm_t){.u32 = {BIT_8_TO_15, ALL_32_BITS,
379 ALL_32_BITS, ALL_32_BITS} };
381 for (i = 0; i < IPV4_L3FWD_EM_NUM_ROUTES; i++) {
382 struct ipv4_l3fwd_em_route entry;
383 union ipv4_5tuple_host newkey;
385 entry = ipv4_l3fwd_em_route_array[i];
386 convert_ipv4_5tuple(&entry.key, &newkey);
387 ret = rte_hash_add_key(h, (void *) &newkey);
389 rte_exit(EXIT_FAILURE, "Unable to add entry %" PRIu32
390 " to the l3fwd hash.\n", i);
392 ipv4_l3fwd_out_if[ret] = entry.if_out;
394 printf("Hash: Adding 0x%" PRIx64 " keys\n",
395 (uint64_t)IPV4_L3FWD_EM_NUM_ROUTES);
398 #define BIT_16_TO_23 0x00ff0000
400 populate_ipv6_few_flow_into_table(const struct rte_hash *h)
405 mask1 = (rte_xmm_t){.u32 = {BIT_16_TO_23, ALL_32_BITS,
406 ALL_32_BITS, ALL_32_BITS} };
408 mask2 = (rte_xmm_t){.u32 = {ALL_32_BITS, ALL_32_BITS, 0, 0} };
410 for (i = 0; i < IPV6_L3FWD_EM_NUM_ROUTES; i++) {
411 struct ipv6_l3fwd_em_route entry;
412 union ipv6_5tuple_host newkey;
414 entry = ipv6_l3fwd_em_route_array[i];
415 convert_ipv6_5tuple(&entry.key, &newkey);
416 ret = rte_hash_add_key(h, (void *) &newkey);
418 rte_exit(EXIT_FAILURE, "Unable to add entry %" PRIu32
419 " to the l3fwd hash.\n", i);
421 ipv6_l3fwd_out_if[ret] = entry.if_out;
423 printf("Hash: Adding 0x%" PRIx64 "keys\n",
424 (uint64_t)IPV6_L3FWD_EM_NUM_ROUTES);
427 #define NUMBER_PORT_USED 4
429 populate_ipv4_many_flow_into_table(const struct rte_hash *h,
430 unsigned int nr_flow)
434 mask0 = (rte_xmm_t){.u32 = {BIT_8_TO_15, ALL_32_BITS,
435 ALL_32_BITS, ALL_32_BITS} };
437 for (i = 0; i < nr_flow; i++) {
438 struct ipv4_l3fwd_em_route entry;
439 union ipv4_5tuple_host newkey;
441 uint8_t a = (uint8_t)
442 ((i/NUMBER_PORT_USED)%BYTE_VALUE_MAX);
443 uint8_t b = (uint8_t)
444 (((i/NUMBER_PORT_USED)/BYTE_VALUE_MAX)%BYTE_VALUE_MAX);
445 uint8_t c = (uint8_t)
446 ((i/NUMBER_PORT_USED)/(BYTE_VALUE_MAX*BYTE_VALUE_MAX));
448 /* Create the ipv4 exact match flow */
449 memset(&entry, 0, sizeof(entry));
450 switch (i & (NUMBER_PORT_USED - 1)) {
452 entry = ipv4_l3fwd_em_route_array[0];
453 entry.key.ip_dst = IPv4(101, c, b, a);
456 entry = ipv4_l3fwd_em_route_array[1];
457 entry.key.ip_dst = IPv4(201, c, b, a);
460 entry = ipv4_l3fwd_em_route_array[2];
461 entry.key.ip_dst = IPv4(111, c, b, a);
464 entry = ipv4_l3fwd_em_route_array[3];
465 entry.key.ip_dst = IPv4(211, c, b, a);
468 convert_ipv4_5tuple(&entry.key, &newkey);
469 int32_t ret = rte_hash_add_key(h, (void *) &newkey);
472 rte_exit(EXIT_FAILURE, "Unable to add entry %u\n", i);
474 ipv4_l3fwd_out_if[ret] = (uint8_t) entry.if_out;
477 printf("Hash: Adding 0x%x keys\n", nr_flow);
481 populate_ipv6_many_flow_into_table(const struct rte_hash *h,
482 unsigned int nr_flow)
486 mask1 = (rte_xmm_t){.u32 = {BIT_16_TO_23, ALL_32_BITS,
487 ALL_32_BITS, ALL_32_BITS} };
488 mask2 = (rte_xmm_t){.u32 = {ALL_32_BITS, ALL_32_BITS, 0, 0} };
490 for (i = 0; i < nr_flow; i++) {
491 struct ipv6_l3fwd_em_route entry;
492 union ipv6_5tuple_host newkey;
494 uint8_t a = (uint8_t)
495 ((i/NUMBER_PORT_USED)%BYTE_VALUE_MAX);
496 uint8_t b = (uint8_t)
497 (((i/NUMBER_PORT_USED)/BYTE_VALUE_MAX)%BYTE_VALUE_MAX);
498 uint8_t c = (uint8_t)
499 ((i/NUMBER_PORT_USED)/(BYTE_VALUE_MAX*BYTE_VALUE_MAX));
501 /* Create the ipv6 exact match flow */
502 memset(&entry, 0, sizeof(entry));
503 switch (i & (NUMBER_PORT_USED - 1)) {
505 entry = ipv6_l3fwd_em_route_array[0];
508 entry = ipv6_l3fwd_em_route_array[1];
511 entry = ipv6_l3fwd_em_route_array[2];
514 entry = ipv6_l3fwd_em_route_array[3];
517 entry.key.ip_dst[13] = c;
518 entry.key.ip_dst[14] = b;
519 entry.key.ip_dst[15] = a;
520 convert_ipv6_5tuple(&entry.key, &newkey);
521 int32_t ret = rte_hash_add_key(h, (void *) &newkey);
524 rte_exit(EXIT_FAILURE, "Unable to add entry %u\n", i);
526 ipv6_l3fwd_out_if[ret] = (uint8_t) entry.if_out;
529 printf("Hash: Adding 0x%x keys\n", nr_flow);
533 * 1. IP packets without extension;
534 * 2. L4 payload should be either TCP or UDP.
537 em_check_ptype(int portid)
540 int ptype_l3_ipv4_ext = 0;
541 int ptype_l3_ipv6_ext = 0;
542 int ptype_l4_tcp = 0;
543 int ptype_l4_udp = 0;
544 uint32_t ptype_mask = RTE_PTYPE_L3_MASK | RTE_PTYPE_L4_MASK;
546 ret = rte_eth_dev_get_supported_ptypes(portid, ptype_mask, NULL, 0);
550 uint32_t ptypes[ret];
552 ret = rte_eth_dev_get_supported_ptypes(portid, ptype_mask, ptypes, ret);
553 for (i = 0; i < ret; ++i) {
555 case RTE_PTYPE_L3_IPV4_EXT:
556 ptype_l3_ipv4_ext = 1;
558 case RTE_PTYPE_L3_IPV6_EXT:
559 ptype_l3_ipv6_ext = 1;
561 case RTE_PTYPE_L4_TCP:
564 case RTE_PTYPE_L4_UDP:
570 if (ptype_l3_ipv4_ext == 0)
571 printf("port %d cannot parse RTE_PTYPE_L3_IPV4_EXT\n", portid);
572 if (ptype_l3_ipv6_ext == 0)
573 printf("port %d cannot parse RTE_PTYPE_L3_IPV6_EXT\n", portid);
574 if (!ptype_l3_ipv4_ext || !ptype_l3_ipv6_ext)
577 if (ptype_l4_tcp == 0)
578 printf("port %d cannot parse RTE_PTYPE_L4_TCP\n", portid);
579 if (ptype_l4_udp == 0)
580 printf("port %d cannot parse RTE_PTYPE_L4_UDP\n", portid);
581 if (ptype_l4_tcp && ptype_l4_udp)
588 em_parse_ptype(struct rte_mbuf *m)
590 struct ether_hdr *eth_hdr;
591 uint32_t packet_type = RTE_PTYPE_UNKNOWN;
595 struct ipv4_hdr *ipv4_hdr;
596 struct ipv6_hdr *ipv6_hdr;
598 eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
599 ether_type = eth_hdr->ether_type;
600 l3 = (uint8_t *)eth_hdr + sizeof(struct ether_hdr);
601 if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
602 ipv4_hdr = (struct ipv4_hdr *)l3;
603 hdr_len = (ipv4_hdr->version_ihl & IPV4_HDR_IHL_MASK) *
605 if (hdr_len == sizeof(struct ipv4_hdr)) {
606 packet_type |= RTE_PTYPE_L3_IPV4;
607 if (ipv4_hdr->next_proto_id == IPPROTO_TCP)
608 packet_type |= RTE_PTYPE_L4_TCP;
609 else if (ipv4_hdr->next_proto_id == IPPROTO_UDP)
610 packet_type |= RTE_PTYPE_L4_UDP;
612 packet_type |= RTE_PTYPE_L3_IPV4_EXT;
613 } else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
614 ipv6_hdr = (struct ipv6_hdr *)l3;
615 if (ipv6_hdr->proto == IPPROTO_TCP)
616 packet_type |= RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP;
617 else if (ipv6_hdr->proto == IPPROTO_UDP)
618 packet_type |= RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP;
620 packet_type |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
623 m->packet_type = packet_type;
627 em_cb_parse_ptype(uint8_t port __rte_unused, uint16_t queue __rte_unused,
628 struct rte_mbuf *pkts[], uint16_t nb_pkts,
629 uint16_t max_pkts __rte_unused,
630 void *user_param __rte_unused)
634 for (i = 0; i < nb_pkts; ++i)
635 em_parse_ptype(pkts[i]);
640 /* main processing loop */
642 em_main_loop(__attribute__((unused)) void *dummy)
644 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
646 uint64_t prev_tsc, diff_tsc, cur_tsc;
648 uint8_t portid, queueid;
649 struct lcore_conf *qconf;
650 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /
651 US_PER_S * BURST_TX_DRAIN_US;
655 lcore_id = rte_lcore_id();
656 qconf = &lcore_conf[lcore_id];
658 if (qconf->n_rx_queue == 0) {
659 RTE_LOG(INFO, L3FWD, "lcore %u has nothing to do\n", lcore_id);
663 RTE_LOG(INFO, L3FWD, "entering main loop on lcore %u\n", lcore_id);
665 for (i = 0; i < qconf->n_rx_queue; i++) {
667 portid = qconf->rx_queue_list[i].port_id;
668 queueid = qconf->rx_queue_list[i].queue_id;
670 " -- lcoreid=%u portid=%hhu rxqueueid=%hhu\n",
671 lcore_id, portid, queueid);
674 while (!force_quit) {
676 cur_tsc = rte_rdtsc();
679 * TX burst queue drain
681 diff_tsc = cur_tsc - prev_tsc;
682 if (unlikely(diff_tsc > drain_tsc)) {
684 for (i = 0; i < qconf->n_tx_port; ++i) {
685 portid = qconf->tx_port_id[i];
686 if (qconf->tx_mbufs[portid].len == 0)
689 qconf->tx_mbufs[portid].len,
691 qconf->tx_mbufs[portid].len = 0;
698 * Read packet from RX queues
700 for (i = 0; i < qconf->n_rx_queue; ++i) {
701 portid = qconf->rx_queue_list[i].port_id;
702 queueid = qconf->rx_queue_list[i].queue_id;
703 nb_rx = rte_eth_rx_burst(portid, queueid, pkts_burst,
708 #if defined(__SSE4_1__)
709 l3fwd_em_send_packets(nb_rx, pkts_burst,
712 l3fwd_em_no_opt_send_packets(nb_rx, pkts_burst,
714 #endif /* __SSE_4_1__ */
722 * Initialize exact match (hash) parameters.
725 setup_hash(const int socketid)
727 struct rte_hash_parameters ipv4_l3fwd_hash_params = {
729 .entries = L3FWD_HASH_ENTRIES,
730 .key_len = sizeof(union ipv4_5tuple_host),
731 .hash_func = ipv4_hash_crc,
732 .hash_func_init_val = 0,
735 struct rte_hash_parameters ipv6_l3fwd_hash_params = {
737 .entries = L3FWD_HASH_ENTRIES,
738 .key_len = sizeof(union ipv6_5tuple_host),
739 .hash_func = ipv6_hash_crc,
740 .hash_func_init_val = 0,
745 /* create ipv4 hash */
746 snprintf(s, sizeof(s), "ipv4_l3fwd_hash_%d", socketid);
747 ipv4_l3fwd_hash_params.name = s;
748 ipv4_l3fwd_hash_params.socket_id = socketid;
749 ipv4_l3fwd_em_lookup_struct[socketid] =
750 rte_hash_create(&ipv4_l3fwd_hash_params);
751 if (ipv4_l3fwd_em_lookup_struct[socketid] == NULL)
752 rte_exit(EXIT_FAILURE,
753 "Unable to create the l3fwd hash on socket %d\n",
756 /* create ipv6 hash */
757 snprintf(s, sizeof(s), "ipv6_l3fwd_hash_%d", socketid);
758 ipv6_l3fwd_hash_params.name = s;
759 ipv6_l3fwd_hash_params.socket_id = socketid;
760 ipv6_l3fwd_em_lookup_struct[socketid] =
761 rte_hash_create(&ipv6_l3fwd_hash_params);
762 if (ipv6_l3fwd_em_lookup_struct[socketid] == NULL)
763 rte_exit(EXIT_FAILURE,
764 "Unable to create the l3fwd hash on socket %d\n",
767 if (hash_entry_number != HASH_ENTRY_NUMBER_DEFAULT) {
768 /* For testing hash matching with a large number of flows we
769 * generate millions of IP 5-tuples with an incremented dst
770 * address to initialize the hash table. */
772 /* populate the ipv4 hash */
773 populate_ipv4_many_flow_into_table(
774 ipv4_l3fwd_em_lookup_struct[socketid],
777 /* populate the ipv6 hash */
778 populate_ipv6_many_flow_into_table(
779 ipv6_l3fwd_em_lookup_struct[socketid],
784 * Use data in ipv4/ipv6 l3fwd lookup table
785 * directly to initialize the hash table.
788 /* populate the ipv4 hash */
789 populate_ipv4_few_flow_into_table(
790 ipv4_l3fwd_em_lookup_struct[socketid]);
792 /* populate the ipv6 hash */
793 populate_ipv6_few_flow_into_table(
794 ipv6_l3fwd_em_lookup_struct[socketid]);
799 /* Return ipv4/ipv6 em fwd lookup struct. */
801 em_get_ipv4_l3fwd_lookup_struct(const int socketid)
803 return ipv4_l3fwd_em_lookup_struct[socketid];
807 em_get_ipv6_l3fwd_lookup_struct(const int socketid)
809 return ipv6_l3fwd_em_lookup_struct[socketid];