4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <sys/types.h>
40 #include <sys/queue.h>
45 #include <netinet/in.h>
47 #include <rte_debug.h>
48 #include <rte_ether.h>
49 #include <rte_ethdev.h>
51 #include <rte_mempool.h>
52 #include <rte_cycles.h>
61 #ifdef RTE_MACHINE_CPUFLAG_SSE4_2
62 #include <rte_hash_crc.h>
63 #define DEFAULT_HASH_FUNC rte_hash_crc
65 #include <rte_jhash.h>
66 #define DEFAULT_HASH_FUNC rte_jhash
67 #endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */
69 #define IPV6_ADDR_LEN 16
77 } __attribute__((__packed__));
79 union ipv4_5tuple_host {
92 #define XMM_NUM_IN_IPV6_5TUPLE 3
95 uint8_t ip_dst[IPV6_ADDR_LEN];
96 uint8_t ip_src[IPV6_ADDR_LEN];
100 } __attribute__((__packed__));
102 union ipv6_5tuple_host {
107 uint8_t ip_src[IPV6_ADDR_LEN];
108 uint8_t ip_dst[IPV6_ADDR_LEN];
113 xmm_t xmm[XMM_NUM_IN_IPV6_5TUPLE];
118 struct ipv4_l3fwd_em_route {
119 struct ipv4_5tuple key;
123 struct ipv6_l3fwd_em_route {
124 struct ipv6_5tuple key;
128 static struct ipv4_l3fwd_em_route ipv4_l3fwd_em_route_array[] = {
129 {{IPv4(101, 0, 0, 0), IPv4(100, 10, 0, 1), 101, 11, IPPROTO_TCP}, 0},
130 {{IPv4(201, 0, 0, 0), IPv4(200, 20, 0, 1), 102, 12, IPPROTO_TCP}, 1},
131 {{IPv4(111, 0, 0, 0), IPv4(100, 30, 0, 1), 101, 11, IPPROTO_TCP}, 2},
132 {{IPv4(211, 0, 0, 0), IPv4(200, 40, 0, 1), 102, 12, IPPROTO_TCP}, 3},
135 static struct ipv6_l3fwd_em_route ipv6_l3fwd_em_route_array[] = {
137 {0xfe, 0x80, 0, 0, 0, 0, 0, 0, 0x02, 0x1e, 0x67, 0xff, 0xfe, 0, 0, 0},
138 {0xfe, 0x80, 0, 0, 0, 0, 0, 0, 0x02, 0x1b, 0x21, 0xff, 0xfe, 0x91, 0x38, 0x05},
139 101, 11, IPPROTO_TCP}, 0},
142 {0xfe, 0x90, 0, 0, 0, 0, 0, 0, 0x02, 0x1e, 0x67, 0xff, 0xfe, 0, 0, 0},
143 {0xfe, 0x90, 0, 0, 0, 0, 0, 0, 0x02, 0x1b, 0x21, 0xff, 0xfe, 0x91, 0x38, 0x05},
144 102, 12, IPPROTO_TCP}, 1},
147 {0xfe, 0xa0, 0, 0, 0, 0, 0, 0, 0x02, 0x1e, 0x67, 0xff, 0xfe, 0, 0, 0},
148 {0xfe, 0xa0, 0, 0, 0, 0, 0, 0, 0x02, 0x1b, 0x21, 0xff, 0xfe, 0x91, 0x38, 0x05},
149 101, 11, IPPROTO_TCP}, 2},
152 {0xfe, 0xb0, 0, 0, 0, 0, 0, 0, 0x02, 0x1e, 0x67, 0xff, 0xfe, 0, 0, 0},
153 {0xfe, 0xb0, 0, 0, 0, 0, 0, 0, 0x02, 0x1b, 0x21, 0xff, 0xfe, 0x91, 0x38, 0x05},
154 102, 12, IPPROTO_TCP}, 3},
157 struct rte_hash *ipv4_l3fwd_em_lookup_struct[NB_SOCKETS];
158 struct rte_hash *ipv6_l3fwd_em_lookup_struct[NB_SOCKETS];
160 static inline uint32_t
161 ipv4_hash_crc(const void *data, __rte_unused uint32_t data_len,
164 const union ipv4_5tuple_host *k;
170 p = (const uint32_t *)&k->port_src;
172 #ifdef RTE_MACHINE_CPUFLAG_SSE4_2
173 init_val = rte_hash_crc_4byte(t, init_val);
174 init_val = rte_hash_crc_4byte(k->ip_src, init_val);
175 init_val = rte_hash_crc_4byte(k->ip_dst, init_val);
176 init_val = rte_hash_crc_4byte(*p, init_val);
177 #else /* RTE_MACHINE_CPUFLAG_SSE4_2 */
178 init_val = rte_jhash_1word(t, init_val);
179 init_val = rte_jhash_1word(k->ip_src, init_val);
180 init_val = rte_jhash_1word(k->ip_dst, init_val);
181 init_val = rte_jhash_1word(*p, init_val);
182 #endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */
187 static inline uint32_t
188 ipv6_hash_crc(const void *data, __rte_unused uint32_t data_len,
191 const union ipv6_5tuple_host *k;
194 #ifdef RTE_MACHINE_CPUFLAG_SSE4_2
195 const uint32_t *ip_src0, *ip_src1, *ip_src2, *ip_src3;
196 const uint32_t *ip_dst0, *ip_dst1, *ip_dst2, *ip_dst3;
197 #endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */
201 p = (const uint32_t *)&k->port_src;
203 #ifdef RTE_MACHINE_CPUFLAG_SSE4_2
204 ip_src0 = (const uint32_t *) k->ip_src;
205 ip_src1 = (const uint32_t *)(k->ip_src+4);
206 ip_src2 = (const uint32_t *)(k->ip_src+8);
207 ip_src3 = (const uint32_t *)(k->ip_src+12);
208 ip_dst0 = (const uint32_t *) k->ip_dst;
209 ip_dst1 = (const uint32_t *)(k->ip_dst+4);
210 ip_dst2 = (const uint32_t *)(k->ip_dst+8);
211 ip_dst3 = (const uint32_t *)(k->ip_dst+12);
212 init_val = rte_hash_crc_4byte(t, init_val);
213 init_val = rte_hash_crc_4byte(*ip_src0, init_val);
214 init_val = rte_hash_crc_4byte(*ip_src1, init_val);
215 init_val = rte_hash_crc_4byte(*ip_src2, init_val);
216 init_val = rte_hash_crc_4byte(*ip_src3, init_val);
217 init_val = rte_hash_crc_4byte(*ip_dst0, init_val);
218 init_val = rte_hash_crc_4byte(*ip_dst1, init_val);
219 init_val = rte_hash_crc_4byte(*ip_dst2, init_val);
220 init_val = rte_hash_crc_4byte(*ip_dst3, init_val);
221 init_val = rte_hash_crc_4byte(*p, init_val);
222 #else /* RTE_MACHINE_CPUFLAG_SSE4_2 */
223 init_val = rte_jhash_1word(t, init_val);
224 init_val = rte_jhash(k->ip_src,
225 sizeof(uint8_t) * IPV6_ADDR_LEN, init_val);
226 init_val = rte_jhash(k->ip_dst,
227 sizeof(uint8_t) * IPV6_ADDR_LEN, init_val);
228 init_val = rte_jhash_1word(*p, init_val);
229 #endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */
233 #define IPV4_L3FWD_EM_NUM_ROUTES \
234 (sizeof(ipv4_l3fwd_em_route_array) / sizeof(ipv4_l3fwd_em_route_array[0]))
236 #define IPV6_L3FWD_EM_NUM_ROUTES \
237 (sizeof(ipv6_l3fwd_em_route_array) / sizeof(ipv6_l3fwd_em_route_array[0]))
239 static uint8_t ipv4_l3fwd_out_if[L3FWD_HASH_ENTRIES] __rte_cache_aligned;
240 static uint8_t ipv6_l3fwd_out_if[L3FWD_HASH_ENTRIES] __rte_cache_aligned;
242 static rte_xmm_t mask0;
243 static rte_xmm_t mask1;
244 static rte_xmm_t mask2;
246 #if defined(__SSE2__)
248 em_mask_key(void *key, xmm_t mask)
250 __m128i data = _mm_loadu_si128((__m128i *)(key));
252 return _mm_and_si128(data, mask);
254 #elif defined(RTE_MACHINE_CPUFLAG_NEON)
256 em_mask_key(void *key, xmm_t mask)
258 int32x4_t data = vld1q_s32((int32_t *)key);
260 return vandq_s32(data, mask);
263 #error No vector engine (SSE, NEON) available, check your toolchain
266 static inline uint8_t
267 em_get_ipv4_dst_port(void *ipv4_hdr, uint8_t portid, void *lookup_struct)
270 union ipv4_5tuple_host key;
271 struct rte_hash *ipv4_l3fwd_lookup_struct =
272 (struct rte_hash *)lookup_struct;
274 ipv4_hdr = (uint8_t *)ipv4_hdr + offsetof(struct ipv4_hdr, time_to_live);
277 * Get 5 tuple: dst port, src port, dst IP address,
278 * src IP address and protocol.
280 key.xmm = em_mask_key(ipv4_hdr, mask0.x);
282 /* Find destination port */
283 ret = rte_hash_lookup(ipv4_l3fwd_lookup_struct, (const void *)&key);
284 return (uint8_t)((ret < 0) ? portid : ipv4_l3fwd_out_if[ret]);
287 static inline uint8_t
288 em_get_ipv6_dst_port(void *ipv6_hdr, uint8_t portid, void *lookup_struct)
291 union ipv6_5tuple_host key;
292 struct rte_hash *ipv6_l3fwd_lookup_struct =
293 (struct rte_hash *)lookup_struct;
295 ipv6_hdr = (uint8_t *)ipv6_hdr + offsetof(struct ipv6_hdr, payload_len);
296 void *data0 = ipv6_hdr;
297 void *data1 = ((uint8_t *)ipv6_hdr) + sizeof(xmm_t);
298 void *data2 = ((uint8_t *)ipv6_hdr) + sizeof(xmm_t) + sizeof(xmm_t);
300 /* Get part of 5 tuple: src IP address lower 96 bits and protocol */
301 key.xmm[0] = em_mask_key(data0, mask1.x);
304 * Get part of 5 tuple: dst IP address lower 96 bits
305 * and src IP address higher 32 bits.
307 key.xmm[1] = *(xmm_t *)data1;
310 * Get part of 5 tuple: dst port and src port
311 * and dst IP address higher 32 bits.
313 key.xmm[2] = em_mask_key(data2, mask2.x);
315 /* Find destination port */
316 ret = rte_hash_lookup(ipv6_l3fwd_lookup_struct, (const void *)&key);
317 return (uint8_t)((ret < 0) ? portid : ipv6_l3fwd_out_if[ret]);
320 #if defined(__SSE4_1__)
321 #if defined(NO_HASH_MULTI_LOOKUP)
322 #include "l3fwd_em_sse.h"
324 #include "l3fwd_em_hlm_sse.h"
327 #include "l3fwd_em.h"
331 convert_ipv4_5tuple(struct ipv4_5tuple *key1,
332 union ipv4_5tuple_host *key2)
334 key2->ip_dst = rte_cpu_to_be_32(key1->ip_dst);
335 key2->ip_src = rte_cpu_to_be_32(key1->ip_src);
336 key2->port_dst = rte_cpu_to_be_16(key1->port_dst);
337 key2->port_src = rte_cpu_to_be_16(key1->port_src);
338 key2->proto = key1->proto;
344 convert_ipv6_5tuple(struct ipv6_5tuple *key1,
345 union ipv6_5tuple_host *key2)
349 for (i = 0; i < 16; i++) {
350 key2->ip_dst[i] = key1->ip_dst[i];
351 key2->ip_src[i] = key1->ip_src[i];
353 key2->port_dst = rte_cpu_to_be_16(key1->port_dst);
354 key2->port_src = rte_cpu_to_be_16(key1->port_src);
355 key2->proto = key1->proto;
361 #define BYTE_VALUE_MAX 256
362 #define ALL_32_BITS 0xffffffff
363 #define BIT_8_TO_15 0x0000ff00
366 populate_ipv4_few_flow_into_table(const struct rte_hash *h)
371 mask0 = (rte_xmm_t){.u32 = {BIT_8_TO_15, ALL_32_BITS,
372 ALL_32_BITS, ALL_32_BITS} };
374 for (i = 0; i < IPV4_L3FWD_EM_NUM_ROUTES; i++) {
375 struct ipv4_l3fwd_em_route entry;
376 union ipv4_5tuple_host newkey;
378 entry = ipv4_l3fwd_em_route_array[i];
379 convert_ipv4_5tuple(&entry.key, &newkey);
380 ret = rte_hash_add_key(h, (void *) &newkey);
382 rte_exit(EXIT_FAILURE, "Unable to add entry %" PRIu32
383 " to the l3fwd hash.\n", i);
385 ipv4_l3fwd_out_if[ret] = entry.if_out;
387 printf("Hash: Adding 0x%" PRIx64 " keys\n",
388 (uint64_t)IPV4_L3FWD_EM_NUM_ROUTES);
391 #define BIT_16_TO_23 0x00ff0000
393 populate_ipv6_few_flow_into_table(const struct rte_hash *h)
398 mask1 = (rte_xmm_t){.u32 = {BIT_16_TO_23, ALL_32_BITS,
399 ALL_32_BITS, ALL_32_BITS} };
401 mask2 = (rte_xmm_t){.u32 = {ALL_32_BITS, ALL_32_BITS, 0, 0} };
403 for (i = 0; i < IPV6_L3FWD_EM_NUM_ROUTES; i++) {
404 struct ipv6_l3fwd_em_route entry;
405 union ipv6_5tuple_host newkey;
407 entry = ipv6_l3fwd_em_route_array[i];
408 convert_ipv6_5tuple(&entry.key, &newkey);
409 ret = rte_hash_add_key(h, (void *) &newkey);
411 rte_exit(EXIT_FAILURE, "Unable to add entry %" PRIu32
412 " to the l3fwd hash.\n", i);
414 ipv6_l3fwd_out_if[ret] = entry.if_out;
416 printf("Hash: Adding 0x%" PRIx64 "keys\n",
417 (uint64_t)IPV6_L3FWD_EM_NUM_ROUTES);
420 #define NUMBER_PORT_USED 4
422 populate_ipv4_many_flow_into_table(const struct rte_hash *h,
423 unsigned int nr_flow)
427 mask0 = (rte_xmm_t){.u32 = {BIT_8_TO_15, ALL_32_BITS,
428 ALL_32_BITS, ALL_32_BITS} };
430 for (i = 0; i < nr_flow; i++) {
431 struct ipv4_l3fwd_em_route entry;
432 union ipv4_5tuple_host newkey;
434 uint8_t a = (uint8_t)
435 ((i/NUMBER_PORT_USED)%BYTE_VALUE_MAX);
436 uint8_t b = (uint8_t)
437 (((i/NUMBER_PORT_USED)/BYTE_VALUE_MAX)%BYTE_VALUE_MAX);
438 uint8_t c = (uint8_t)
439 ((i/NUMBER_PORT_USED)/(BYTE_VALUE_MAX*BYTE_VALUE_MAX));
441 /* Create the ipv4 exact match flow */
442 memset(&entry, 0, sizeof(entry));
443 switch (i & (NUMBER_PORT_USED - 1)) {
445 entry = ipv4_l3fwd_em_route_array[0];
446 entry.key.ip_dst = IPv4(101, c, b, a);
449 entry = ipv4_l3fwd_em_route_array[1];
450 entry.key.ip_dst = IPv4(201, c, b, a);
453 entry = ipv4_l3fwd_em_route_array[2];
454 entry.key.ip_dst = IPv4(111, c, b, a);
457 entry = ipv4_l3fwd_em_route_array[3];
458 entry.key.ip_dst = IPv4(211, c, b, a);
461 convert_ipv4_5tuple(&entry.key, &newkey);
462 int32_t ret = rte_hash_add_key(h, (void *) &newkey);
465 rte_exit(EXIT_FAILURE, "Unable to add entry %u\n", i);
467 ipv4_l3fwd_out_if[ret] = (uint8_t) entry.if_out;
470 printf("Hash: Adding 0x%x keys\n", nr_flow);
474 populate_ipv6_many_flow_into_table(const struct rte_hash *h,
475 unsigned int nr_flow)
479 mask1 = (rte_xmm_t){.u32 = {BIT_16_TO_23, ALL_32_BITS,
480 ALL_32_BITS, ALL_32_BITS} };
481 mask2 = (rte_xmm_t){.u32 = {ALL_32_BITS, ALL_32_BITS, 0, 0} };
483 for (i = 0; i < nr_flow; i++) {
484 struct ipv6_l3fwd_em_route entry;
485 union ipv6_5tuple_host newkey;
487 uint8_t a = (uint8_t)
488 ((i/NUMBER_PORT_USED)%BYTE_VALUE_MAX);
489 uint8_t b = (uint8_t)
490 (((i/NUMBER_PORT_USED)/BYTE_VALUE_MAX)%BYTE_VALUE_MAX);
491 uint8_t c = (uint8_t)
492 ((i/NUMBER_PORT_USED)/(BYTE_VALUE_MAX*BYTE_VALUE_MAX));
494 /* Create the ipv6 exact match flow */
495 memset(&entry, 0, sizeof(entry));
496 switch (i & (NUMBER_PORT_USED - 1)) {
498 entry = ipv6_l3fwd_em_route_array[0];
501 entry = ipv6_l3fwd_em_route_array[1];
504 entry = ipv6_l3fwd_em_route_array[2];
507 entry = ipv6_l3fwd_em_route_array[3];
510 entry.key.ip_dst[13] = c;
511 entry.key.ip_dst[14] = b;
512 entry.key.ip_dst[15] = a;
513 convert_ipv6_5tuple(&entry.key, &newkey);
514 int32_t ret = rte_hash_add_key(h, (void *) &newkey);
517 rte_exit(EXIT_FAILURE, "Unable to add entry %u\n", i);
519 ipv6_l3fwd_out_if[ret] = (uint8_t) entry.if_out;
522 printf("Hash: Adding 0x%x keys\n", nr_flow);
526 * 1. IP packets without extension;
527 * 2. L4 payload should be either TCP or UDP.
530 em_check_ptype(int portid)
533 int ptype_l3_ipv4_ext = 0;
534 int ptype_l3_ipv6_ext = 0;
535 int ptype_l4_tcp = 0;
536 int ptype_l4_udp = 0;
537 uint32_t ptype_mask = RTE_PTYPE_L3_MASK | RTE_PTYPE_L4_MASK;
539 ret = rte_eth_dev_get_supported_ptypes(portid, ptype_mask, NULL, 0);
543 uint32_t ptypes[ret];
545 ret = rte_eth_dev_get_supported_ptypes(portid, ptype_mask, ptypes, ret);
546 for (i = 0; i < ret; ++i) {
548 case RTE_PTYPE_L3_IPV4_EXT:
549 ptype_l3_ipv4_ext = 1;
551 case RTE_PTYPE_L3_IPV6_EXT:
552 ptype_l3_ipv6_ext = 1;
554 case RTE_PTYPE_L4_TCP:
557 case RTE_PTYPE_L4_UDP:
563 if (ptype_l3_ipv4_ext == 0)
564 printf("port %d cannot parse RTE_PTYPE_L3_IPV4_EXT\n", portid);
565 if (ptype_l3_ipv6_ext == 0)
566 printf("port %d cannot parse RTE_PTYPE_L3_IPV6_EXT\n", portid);
567 if (!ptype_l3_ipv4_ext || !ptype_l3_ipv6_ext)
570 if (ptype_l4_tcp == 0)
571 printf("port %d cannot parse RTE_PTYPE_L4_TCP\n", portid);
572 if (ptype_l4_udp == 0)
573 printf("port %d cannot parse RTE_PTYPE_L4_UDP\n", portid);
574 if (ptype_l4_tcp && ptype_l4_udp)
581 em_parse_ptype(struct rte_mbuf *m)
583 struct ether_hdr *eth_hdr;
584 uint32_t packet_type = RTE_PTYPE_UNKNOWN;
588 struct ipv4_hdr *ipv4_hdr;
589 struct ipv6_hdr *ipv6_hdr;
591 eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
592 ether_type = eth_hdr->ether_type;
593 l3 = (uint8_t *)eth_hdr + sizeof(struct ether_hdr);
594 if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
595 ipv4_hdr = (struct ipv4_hdr *)l3;
596 hdr_len = (ipv4_hdr->version_ihl & IPV4_HDR_IHL_MASK) *
598 if (hdr_len == sizeof(struct ipv4_hdr)) {
599 packet_type |= RTE_PTYPE_L3_IPV4;
600 if (ipv4_hdr->next_proto_id == IPPROTO_TCP)
601 packet_type |= RTE_PTYPE_L4_TCP;
602 else if (ipv4_hdr->next_proto_id == IPPROTO_UDP)
603 packet_type |= RTE_PTYPE_L4_UDP;
605 packet_type |= RTE_PTYPE_L3_IPV4_EXT;
606 } else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
607 ipv6_hdr = (struct ipv6_hdr *)l3;
608 if (ipv6_hdr->proto == IPPROTO_TCP)
609 packet_type |= RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP;
610 else if (ipv6_hdr->proto == IPPROTO_UDP)
611 packet_type |= RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP;
613 packet_type |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
616 m->packet_type = packet_type;
620 em_cb_parse_ptype(uint8_t port __rte_unused, uint16_t queue __rte_unused,
621 struct rte_mbuf *pkts[], uint16_t nb_pkts,
622 uint16_t max_pkts __rte_unused,
623 void *user_param __rte_unused)
627 for (i = 0; i < nb_pkts; ++i)
628 em_parse_ptype(pkts[i]);
633 /* main processing loop */
635 em_main_loop(__attribute__((unused)) void *dummy)
637 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
639 uint64_t prev_tsc, diff_tsc, cur_tsc;
641 uint8_t portid, queueid;
642 struct lcore_conf *qconf;
643 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /
644 US_PER_S * BURST_TX_DRAIN_US;
648 lcore_id = rte_lcore_id();
649 qconf = &lcore_conf[lcore_id];
651 if (qconf->n_rx_queue == 0) {
652 RTE_LOG(INFO, L3FWD, "lcore %u has nothing to do\n", lcore_id);
656 RTE_LOG(INFO, L3FWD, "entering main loop on lcore %u\n", lcore_id);
658 for (i = 0; i < qconf->n_rx_queue; i++) {
660 portid = qconf->rx_queue_list[i].port_id;
661 queueid = qconf->rx_queue_list[i].queue_id;
663 " -- lcoreid=%u portid=%hhu rxqueueid=%hhu\n",
664 lcore_id, portid, queueid);
667 while (!force_quit) {
669 cur_tsc = rte_rdtsc();
672 * TX burst queue drain
674 diff_tsc = cur_tsc - prev_tsc;
675 if (unlikely(diff_tsc > drain_tsc)) {
677 for (i = 0; i < qconf->n_tx_port; ++i) {
678 portid = qconf->tx_port_id[i];
679 if (qconf->tx_mbufs[portid].len == 0)
682 qconf->tx_mbufs[portid].len,
684 qconf->tx_mbufs[portid].len = 0;
691 * Read packet from RX queues
693 for (i = 0; i < qconf->n_rx_queue; ++i) {
694 portid = qconf->rx_queue_list[i].port_id;
695 queueid = qconf->rx_queue_list[i].queue_id;
696 nb_rx = rte_eth_rx_burst(portid, queueid, pkts_burst,
701 #if defined(__SSE4_1__)
702 l3fwd_em_send_packets(nb_rx, pkts_burst,
705 l3fwd_em_no_opt_send_packets(nb_rx, pkts_burst,
707 #endif /* __SSE_4_1__ */
715 * Initialize exact match (hash) parameters.
718 setup_hash(const int socketid)
720 struct rte_hash_parameters ipv4_l3fwd_hash_params = {
722 .entries = L3FWD_HASH_ENTRIES,
723 .key_len = sizeof(union ipv4_5tuple_host),
724 .hash_func = ipv4_hash_crc,
725 .hash_func_init_val = 0,
728 struct rte_hash_parameters ipv6_l3fwd_hash_params = {
730 .entries = L3FWD_HASH_ENTRIES,
731 .key_len = sizeof(union ipv6_5tuple_host),
732 .hash_func = ipv6_hash_crc,
733 .hash_func_init_val = 0,
738 /* create ipv4 hash */
739 snprintf(s, sizeof(s), "ipv4_l3fwd_hash_%d", socketid);
740 ipv4_l3fwd_hash_params.name = s;
741 ipv4_l3fwd_hash_params.socket_id = socketid;
742 ipv4_l3fwd_em_lookup_struct[socketid] =
743 rte_hash_create(&ipv4_l3fwd_hash_params);
744 if (ipv4_l3fwd_em_lookup_struct[socketid] == NULL)
745 rte_exit(EXIT_FAILURE,
746 "Unable to create the l3fwd hash on socket %d\n",
749 /* create ipv6 hash */
750 snprintf(s, sizeof(s), "ipv6_l3fwd_hash_%d", socketid);
751 ipv6_l3fwd_hash_params.name = s;
752 ipv6_l3fwd_hash_params.socket_id = socketid;
753 ipv6_l3fwd_em_lookup_struct[socketid] =
754 rte_hash_create(&ipv6_l3fwd_hash_params);
755 if (ipv6_l3fwd_em_lookup_struct[socketid] == NULL)
756 rte_exit(EXIT_FAILURE,
757 "Unable to create the l3fwd hash on socket %d\n",
760 if (hash_entry_number != HASH_ENTRY_NUMBER_DEFAULT) {
761 /* For testing hash matching with a large number of flows we
762 * generate millions of IP 5-tuples with an incremented dst
763 * address to initialize the hash table. */
765 /* populate the ipv4 hash */
766 populate_ipv4_many_flow_into_table(
767 ipv4_l3fwd_em_lookup_struct[socketid],
770 /* populate the ipv6 hash */
771 populate_ipv6_many_flow_into_table(
772 ipv6_l3fwd_em_lookup_struct[socketid],
777 * Use data in ipv4/ipv6 l3fwd lookup table
778 * directly to initialize the hash table.
781 /* populate the ipv4 hash */
782 populate_ipv4_few_flow_into_table(
783 ipv4_l3fwd_em_lookup_struct[socketid]);
785 /* populate the ipv6 hash */
786 populate_ipv6_few_flow_into_table(
787 ipv6_l3fwd_em_lookup_struct[socketid]);
792 /* Return ipv4/ipv6 em fwd lookup struct. */
794 em_get_ipv4_l3fwd_lookup_struct(const int socketid)
796 return ipv4_l3fwd_em_lookup_struct[socketid];
800 em_get_ipv6_l3fwd_lookup_struct(const int socketid)
802 return ipv6_l3fwd_em_lookup_struct[socketid];