4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <sys/types.h>
40 #include <sys/queue.h>
45 #include <rte_common.h>
47 #include <rte_byteorder.h>
49 #include <rte_memory.h>
50 #include <rte_memcpy.h>
51 #include <rte_memzone.h>
53 #include <rte_per_lcore.h>
54 #include <rte_launch.h>
55 #include <rte_atomic.h>
56 #include <rte_cycles.h>
57 #include <rte_prefetch.h>
58 #include <rte_lcore.h>
59 #include <rte_per_lcore.h>
60 #include <rte_branch_prediction.h>
61 #include <rte_interrupts.h>
63 #include <rte_random.h>
64 #include <rte_debug.h>
65 #include <rte_ether.h>
66 #include <rte_ethdev.h>
68 #include <rte_mempool.h>
73 #include <rte_string_fns.h>
75 #define APP_LOOKUP_EXACT_MATCH 0
76 #define APP_LOOKUP_LPM 1
77 #define DO_RFC_1812_CHECKS
79 #ifndef APP_LOOKUP_METHOD
80 #define APP_LOOKUP_METHOD APP_LOOKUP_LPM
84 * When set to zero, simple forwaring path is eanbled.
85 * When set to one, optimized forwarding path is enabled.
86 * Note that LPM optimisation path uses SSE4.1 instructions.
88 #if ((APP_LOOKUP_METHOD == APP_LOOKUP_LPM) && !defined(__SSE4_1__))
89 #define ENABLE_MULTI_BUFFER_OPTIMIZE 0
91 #define ENABLE_MULTI_BUFFER_OPTIMIZE 1
94 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
96 #elif (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
100 #error "APP_LOOKUP_METHOD set to incorrect value"
104 #define IPv6_BYTES_FMT "%02x%02x:%02x%02x:%02x%02x:%02x%02x:"\
105 "%02x%02x:%02x%02x:%02x%02x:%02x%02x"
106 #define IPv6_BYTES(addr) \
107 addr[0], addr[1], addr[2], addr[3], \
108 addr[4], addr[5], addr[6], addr[7], \
109 addr[8], addr[9], addr[10], addr[11],\
110 addr[12], addr[13],addr[14], addr[15]
114 #define RTE_LOGTYPE_L3FWD RTE_LOGTYPE_USER1
116 #define MAX_JUMBO_PKT_LEN 9600
118 #define IPV6_ADDR_LEN 16
120 #define MEMPOOL_CACHE_SIZE 256
122 #define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
125 * This expression is used to calculate the number of mbufs needed depending on user input, taking
126 * into account memory for rx and tx hardware rings, cache per lcore and mtable per port per lcore.
127 * RTE_MAX is used to ensure that NB_MBUF never goes below a minimum value of 8192
130 #define NB_MBUF RTE_MAX ( \
131 (nb_ports*nb_rx_queue*RTE_TEST_RX_DESC_DEFAULT + \
132 nb_ports*nb_lcores*MAX_PKT_BURST + \
133 nb_ports*n_tx_queue*RTE_TEST_TX_DESC_DEFAULT + \
134 nb_lcores*MEMPOOL_CACHE_SIZE), \
137 #define MAX_PKT_BURST 32
138 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
141 * Try to avoid TX buffering if we have at least MAX_TX_BURST packets to send.
143 #define MAX_TX_BURST (MAX_PKT_BURST / 2)
147 /* Configure how many packets ahead to prefetch, when reading packets */
148 #define PREFETCH_OFFSET 3
150 /* Used to mark destination port as 'invalid'. */
151 #define BAD_PORT ((uint16_t)-1)
156 * Configurable number of RX/TX ring descriptors
158 #define RTE_TEST_RX_DESC_DEFAULT 128
159 #define RTE_TEST_TX_DESC_DEFAULT 512
160 static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
161 static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
163 /* ethernet addresses of ports */
164 static struct ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
166 static __m128i val_eth[RTE_MAX_ETHPORTS];
168 /* replace first 12B of the ethernet header. */
169 #define MASK_ETH 0x3f
171 /* mask of enabled ports */
172 static uint32_t enabled_port_mask = 0;
173 static int promiscuous_on = 0; /**< Ports set in promiscuous mode off by default. */
174 static int numa_on = 1; /**< NUMA is enabled by default. */
176 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
177 static int ipv6 = 0; /**< ipv6 is false by default. */
182 struct rte_mbuf *m_table[MAX_PKT_BURST];
185 struct lcore_rx_queue {
188 } __rte_cache_aligned;
190 #define MAX_RX_QUEUE_PER_LCORE 16
191 #define MAX_TX_QUEUE_PER_PORT RTE_MAX_ETHPORTS
192 #define MAX_RX_QUEUE_PER_PORT 128
194 #define MAX_LCORE_PARAMS 1024
195 struct lcore_params {
199 } __rte_cache_aligned;
201 static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
202 static struct lcore_params lcore_params_array_default[] = {
214 static struct lcore_params * lcore_params = lcore_params_array_default;
215 static uint16_t nb_lcore_params = sizeof(lcore_params_array_default) /
216 sizeof(lcore_params_array_default[0]);
218 static struct rte_eth_conf port_conf = {
220 .mq_mode = ETH_MQ_RX_RSS,
221 .max_rx_pkt_len = ETHER_MAX_LEN,
223 .header_split = 0, /**< Header Split disabled */
224 .hw_ip_checksum = 1, /**< IP checksum offload enabled */
225 .hw_vlan_filter = 0, /**< VLAN filtering disabled */
226 .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
227 .hw_strip_crc = 0, /**< CRC stripped by hardware */
232 .rss_hf = ETH_RSS_IP,
236 .mq_mode = ETH_MQ_TX_NONE,
240 static struct rte_mempool * pktmbuf_pool[NB_SOCKETS];
242 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
244 #ifdef RTE_MACHINE_CPUFLAG_SSE4_2
245 #include <rte_hash_crc.h>
246 #define DEFAULT_HASH_FUNC rte_hash_crc
248 #include <rte_jhash.h>
249 #define DEFAULT_HASH_FUNC rte_jhash
258 } __attribute__((__packed__));
260 union ipv4_5tuple_host {
273 #define XMM_NUM_IN_IPV6_5TUPLE 3
276 uint8_t ip_dst[IPV6_ADDR_LEN];
277 uint8_t ip_src[IPV6_ADDR_LEN];
281 } __attribute__((__packed__));
283 union ipv6_5tuple_host {
288 uint8_t ip_src[IPV6_ADDR_LEN];
289 uint8_t ip_dst[IPV6_ADDR_LEN];
294 __m128i xmm[XMM_NUM_IN_IPV6_5TUPLE];
297 struct ipv4_l3fwd_route {
298 struct ipv4_5tuple key;
302 struct ipv6_l3fwd_route {
303 struct ipv6_5tuple key;
307 static struct ipv4_l3fwd_route ipv4_l3fwd_route_array[] = {
308 {{IPv4(101,0,0,0), IPv4(100,10,0,1), 101, 11, IPPROTO_TCP}, 0},
309 {{IPv4(201,0,0,0), IPv4(200,20,0,1), 102, 12, IPPROTO_TCP}, 1},
310 {{IPv4(111,0,0,0), IPv4(100,30,0,1), 101, 11, IPPROTO_TCP}, 2},
311 {{IPv4(211,0,0,0), IPv4(200,40,0,1), 102, 12, IPPROTO_TCP}, 3},
314 static struct ipv6_l3fwd_route ipv6_l3fwd_route_array[] = {
316 {0xfe, 0x80, 0, 0, 0, 0, 0, 0, 0x02, 0x1e, 0x67, 0xff, 0xfe, 0, 0, 0},
317 {0xfe, 0x80, 0, 0, 0, 0, 0, 0, 0x02, 0x1b, 0x21, 0xff, 0xfe, 0x91, 0x38, 0x05},
318 101, 11, IPPROTO_TCP}, 0},
321 {0xfe, 0x90, 0, 0, 0, 0, 0, 0, 0x02, 0x1e, 0x67, 0xff, 0xfe, 0, 0, 0},
322 {0xfe, 0x90, 0, 0, 0, 0, 0, 0, 0x02, 0x1b, 0x21, 0xff, 0xfe, 0x91, 0x38, 0x05},
323 102, 12, IPPROTO_TCP}, 1},
326 {0xfe, 0xa0, 0, 0, 0, 0, 0, 0, 0x02, 0x1e, 0x67, 0xff, 0xfe, 0, 0, 0},
327 {0xfe, 0xa0, 0, 0, 0, 0, 0, 0, 0x02, 0x1b, 0x21, 0xff, 0xfe, 0x91, 0x38, 0x05},
328 101, 11, IPPROTO_TCP}, 2},
331 {0xfe, 0xb0, 0, 0, 0, 0, 0, 0, 0x02, 0x1e, 0x67, 0xff, 0xfe, 0, 0, 0},
332 {0xfe, 0xb0, 0, 0, 0, 0, 0, 0, 0x02, 0x1b, 0x21, 0xff, 0xfe, 0x91, 0x38, 0x05},
333 102, 12, IPPROTO_TCP}, 3},
336 typedef struct rte_hash lookup_struct_t;
337 static lookup_struct_t *ipv4_l3fwd_lookup_struct[NB_SOCKETS];
338 static lookup_struct_t *ipv6_l3fwd_lookup_struct[NB_SOCKETS];
340 #ifdef RTE_ARCH_X86_64
341 /* default to 4 million hash entries (approx) */
342 #define L3FWD_HASH_ENTRIES 1024*1024*4
344 /* 32-bit has less address-space for hugepage memory, limit to 1M entries */
345 #define L3FWD_HASH_ENTRIES 1024*1024*1
347 #define HASH_ENTRY_NUMBER_DEFAULT 4
349 static uint32_t hash_entry_number = HASH_ENTRY_NUMBER_DEFAULT;
351 static inline uint32_t
352 ipv4_hash_crc(const void *data, __rte_unused uint32_t data_len,
355 const union ipv4_5tuple_host *k;
361 p = (const uint32_t *)&k->port_src;
363 #ifdef RTE_MACHINE_CPUFLAG_SSE4_2
364 init_val = rte_hash_crc_4byte(t, init_val);
365 init_val = rte_hash_crc_4byte(k->ip_src, init_val);
366 init_val = rte_hash_crc_4byte(k->ip_dst, init_val);
367 init_val = rte_hash_crc_4byte(*p, init_val);
368 #else /* RTE_MACHINE_CPUFLAG_SSE4_2 */
369 init_val = rte_jhash_1word(t, init_val);
370 init_val = rte_jhash_1word(k->ip_src, init_val);
371 init_val = rte_jhash_1word(k->ip_dst, init_val);
372 init_val = rte_jhash_1word(*p, init_val);
373 #endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */
377 static inline uint32_t
378 ipv6_hash_crc(const void *data, __rte_unused uint32_t data_len, uint32_t init_val)
380 const union ipv6_5tuple_host *k;
383 #ifdef RTE_MACHINE_CPUFLAG_SSE4_2
384 const uint32_t *ip_src0, *ip_src1, *ip_src2, *ip_src3;
385 const uint32_t *ip_dst0, *ip_dst1, *ip_dst2, *ip_dst3;
386 #endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */
390 p = (const uint32_t *)&k->port_src;
392 #ifdef RTE_MACHINE_CPUFLAG_SSE4_2
393 ip_src0 = (const uint32_t *) k->ip_src;
394 ip_src1 = (const uint32_t *)(k->ip_src+4);
395 ip_src2 = (const uint32_t *)(k->ip_src+8);
396 ip_src3 = (const uint32_t *)(k->ip_src+12);
397 ip_dst0 = (const uint32_t *) k->ip_dst;
398 ip_dst1 = (const uint32_t *)(k->ip_dst+4);
399 ip_dst2 = (const uint32_t *)(k->ip_dst+8);
400 ip_dst3 = (const uint32_t *)(k->ip_dst+12);
401 init_val = rte_hash_crc_4byte(t, init_val);
402 init_val = rte_hash_crc_4byte(*ip_src0, init_val);
403 init_val = rte_hash_crc_4byte(*ip_src1, init_val);
404 init_val = rte_hash_crc_4byte(*ip_src2, init_val);
405 init_val = rte_hash_crc_4byte(*ip_src3, init_val);
406 init_val = rte_hash_crc_4byte(*ip_dst0, init_val);
407 init_val = rte_hash_crc_4byte(*ip_dst1, init_val);
408 init_val = rte_hash_crc_4byte(*ip_dst2, init_val);
409 init_val = rte_hash_crc_4byte(*ip_dst3, init_val);
410 init_val = rte_hash_crc_4byte(*p, init_val);
411 #else /* RTE_MACHINE_CPUFLAG_SSE4_2 */
412 init_val = rte_jhash_1word(t, init_val);
413 init_val = rte_jhash(k->ip_src, sizeof(uint8_t) * IPV6_ADDR_LEN, init_val);
414 init_val = rte_jhash(k->ip_dst, sizeof(uint8_t) * IPV6_ADDR_LEN, init_val);
415 init_val = rte_jhash_1word(*p, init_val);
416 #endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */
420 #define IPV4_L3FWD_NUM_ROUTES \
421 (sizeof(ipv4_l3fwd_route_array) / sizeof(ipv4_l3fwd_route_array[0]))
423 #define IPV6_L3FWD_NUM_ROUTES \
424 (sizeof(ipv6_l3fwd_route_array) / sizeof(ipv6_l3fwd_route_array[0]))
426 static uint8_t ipv4_l3fwd_out_if[L3FWD_HASH_ENTRIES] __rte_cache_aligned;
427 static uint8_t ipv6_l3fwd_out_if[L3FWD_HASH_ENTRIES] __rte_cache_aligned;
431 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
432 struct ipv4_l3fwd_route {
438 struct ipv6_l3fwd_route {
444 static struct ipv4_l3fwd_route ipv4_l3fwd_route_array[] = {
445 {IPv4(1,1,1,0), 24, 0},
446 {IPv4(2,1,1,0), 24, 1},
447 {IPv4(3,1,1,0), 24, 2},
448 {IPv4(4,1,1,0), 24, 3},
449 {IPv4(5,1,1,0), 24, 4},
450 {IPv4(6,1,1,0), 24, 5},
451 {IPv4(7,1,1,0), 24, 6},
452 {IPv4(8,1,1,0), 24, 7},
455 static struct ipv6_l3fwd_route ipv6_l3fwd_route_array[] = {
456 {{1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 0},
457 {{2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 1},
458 {{3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 2},
459 {{4,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 3},
460 {{5,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 4},
461 {{6,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 5},
462 {{7,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 6},
463 {{8,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 7},
466 #define IPV4_L3FWD_NUM_ROUTES \
467 (sizeof(ipv4_l3fwd_route_array) / sizeof(ipv4_l3fwd_route_array[0]))
468 #define IPV6_L3FWD_NUM_ROUTES \
469 (sizeof(ipv6_l3fwd_route_array) / sizeof(ipv6_l3fwd_route_array[0]))
471 #define IPV4_L3FWD_LPM_MAX_RULES 1024
472 #define IPV6_L3FWD_LPM_MAX_RULES 1024
473 #define IPV6_L3FWD_LPM_NUMBER_TBL8S (1 << 16)
475 typedef struct rte_lpm lookup_struct_t;
476 typedef struct rte_lpm6 lookup6_struct_t;
477 static lookup_struct_t *ipv4_l3fwd_lookup_struct[NB_SOCKETS];
478 static lookup6_struct_t *ipv6_l3fwd_lookup_struct[NB_SOCKETS];
483 struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
484 uint16_t tx_queue_id[RTE_MAX_ETHPORTS];
485 struct mbuf_table tx_mbufs[RTE_MAX_ETHPORTS];
486 lookup_struct_t * ipv4_lookup_struct;
487 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
488 lookup6_struct_t * ipv6_lookup_struct;
490 lookup_struct_t * ipv6_lookup_struct;
492 } __rte_cache_aligned;
494 static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
496 /* Send burst of packets on an output interface */
498 send_burst(struct lcore_conf *qconf, uint16_t n, uint8_t port)
500 struct rte_mbuf **m_table;
504 queueid = qconf->tx_queue_id[port];
505 m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
507 ret = rte_eth_tx_burst(port, queueid, m_table, n);
508 if (unlikely(ret < n)) {
510 rte_pktmbuf_free(m_table[ret]);
517 /* Enqueue a single packet, and send burst if queue is filled */
519 send_single_packet(struct rte_mbuf *m, uint8_t port)
523 struct lcore_conf *qconf;
525 lcore_id = rte_lcore_id();
527 qconf = &lcore_conf[lcore_id];
528 len = qconf->tx_mbufs[port].len;
529 qconf->tx_mbufs[port].m_table[len] = m;
532 /* enough pkts to be sent */
533 if (unlikely(len == MAX_PKT_BURST)) {
534 send_burst(qconf, MAX_PKT_BURST, port);
538 qconf->tx_mbufs[port].len = len;
542 static inline __attribute__((always_inline)) void
543 send_packetsx4(struct lcore_conf *qconf, uint8_t port,
544 struct rte_mbuf *m[], uint32_t num)
548 len = qconf->tx_mbufs[port].len;
551 * If TX buffer for that queue is empty, and we have enough packets,
552 * then send them straightway.
554 if (num >= MAX_TX_BURST && len == 0) {
555 n = rte_eth_tx_burst(port, qconf->tx_queue_id[port], m, num);
556 if (unlikely(n < num)) {
558 rte_pktmbuf_free(m[n]);
565 * Put packets into TX buffer for that queue.
569 n = (n > MAX_PKT_BURST) ? MAX_PKT_BURST - len : num;
572 switch (n % FWDSTEP) {
575 qconf->tx_mbufs[port].m_table[len + j] = m[j];
578 qconf->tx_mbufs[port].m_table[len + j] = m[j];
581 qconf->tx_mbufs[port].m_table[len + j] = m[j];
584 qconf->tx_mbufs[port].m_table[len + j] = m[j];
591 /* enough pkts to be sent */
592 if (unlikely(len == MAX_PKT_BURST)) {
594 send_burst(qconf, MAX_PKT_BURST, port);
596 /* copy rest of the packets into the TX buffer. */
599 switch (len % FWDSTEP) {
602 qconf->tx_mbufs[port].m_table[j] = m[n + j];
605 qconf->tx_mbufs[port].m_table[j] = m[n + j];
608 qconf->tx_mbufs[port].m_table[j] = m[n + j];
611 qconf->tx_mbufs[port].m_table[j] = m[n + j];
617 qconf->tx_mbufs[port].len = len;
620 #ifdef DO_RFC_1812_CHECKS
622 is_valid_ipv4_pkt(struct ipv4_hdr *pkt, uint32_t link_len)
624 /* From http://www.rfc-editor.org/rfc/rfc1812.txt section 5.2.2 */
626 * 1. The packet length reported by the Link Layer must be large
627 * enough to hold the minimum length legal IP datagram (20 bytes).
629 if (link_len < sizeof(struct ipv4_hdr))
632 /* 2. The IP checksum must be correct. */
633 /* this is checked in H/W */
636 * 3. The IP version number must be 4. If the version number is not 4
637 * then the packet may be another version of IP, such as IPng or
640 if (((pkt->version_ihl) >> 4) != 4)
643 * 4. The IP header length field must be large enough to hold the
644 * minimum length legal IP datagram (20 bytes = 5 words).
646 if ((pkt->version_ihl & 0xf) < 5)
650 * 5. The IP total length field must be large enough to hold the IP
651 * datagram header, whose length is specified in the IP header length
654 if (rte_cpu_to_be_16(pkt->total_length) < sizeof(struct ipv4_hdr))
661 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
663 static __m128i mask0;
664 static __m128i mask1;
665 static __m128i mask2;
666 static inline uint8_t
667 get_ipv4_dst_port(void *ipv4_hdr, uint8_t portid, lookup_struct_t * ipv4_l3fwd_lookup_struct)
670 union ipv4_5tuple_host key;
672 ipv4_hdr = (uint8_t *)ipv4_hdr + offsetof(struct ipv4_hdr, time_to_live);
673 __m128i data = _mm_loadu_si128((__m128i*)(ipv4_hdr));
674 /* Get 5 tuple: dst port, src port, dst IP address, src IP address and protocol */
675 key.xmm = _mm_and_si128(data, mask0);
676 /* Find destination port */
677 ret = rte_hash_lookup(ipv4_l3fwd_lookup_struct, (const void *)&key);
678 return (uint8_t)((ret < 0)? portid : ipv4_l3fwd_out_if[ret]);
681 static inline uint8_t
682 get_ipv6_dst_port(void *ipv6_hdr, uint8_t portid, lookup_struct_t * ipv6_l3fwd_lookup_struct)
685 union ipv6_5tuple_host key;
687 ipv6_hdr = (uint8_t *)ipv6_hdr + offsetof(struct ipv6_hdr, payload_len);
688 __m128i data0 = _mm_loadu_si128((__m128i*)(ipv6_hdr));
689 __m128i data1 = _mm_loadu_si128((__m128i*)(((uint8_t*)ipv6_hdr)+sizeof(__m128i)));
690 __m128i data2 = _mm_loadu_si128((__m128i*)(((uint8_t*)ipv6_hdr)+sizeof(__m128i)+sizeof(__m128i)));
691 /* Get part of 5 tuple: src IP address lower 96 bits and protocol */
692 key.xmm[0] = _mm_and_si128(data0, mask1);
693 /* Get part of 5 tuple: dst IP address lower 96 bits and src IP address higher 32 bits */
695 /* Get part of 5 tuple: dst port and src port and dst IP address higher 32 bits */
696 key.xmm[2] = _mm_and_si128(data2, mask2);
698 /* Find destination port */
699 ret = rte_hash_lookup(ipv6_l3fwd_lookup_struct, (const void *)&key);
700 return (uint8_t)((ret < 0)? portid : ipv6_l3fwd_out_if[ret]);
704 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
706 static inline uint8_t
707 get_ipv4_dst_port(void *ipv4_hdr, uint8_t portid, lookup_struct_t * ipv4_l3fwd_lookup_struct)
711 return (uint8_t) ((rte_lpm_lookup(ipv4_l3fwd_lookup_struct,
712 rte_be_to_cpu_32(((struct ipv4_hdr *)ipv4_hdr)->dst_addr),
713 &next_hop) == 0) ? next_hop : portid);
716 static inline uint8_t
717 get_ipv6_dst_port(void *ipv6_hdr, uint8_t portid, lookup6_struct_t * ipv6_l3fwd_lookup_struct)
720 return (uint8_t) ((rte_lpm6_lookup(ipv6_l3fwd_lookup_struct,
721 ((struct ipv6_hdr*)ipv6_hdr)->dst_addr, &next_hop) == 0)?
726 static inline void l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid,
727 struct lcore_conf *qconf) __attribute__((unused));
729 #if ((APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH) && \
730 (ENABLE_MULTI_BUFFER_OPTIMIZE == 1))
732 #define MASK_ALL_PKTS 0xf
733 #define EXECLUDE_1ST_PKT 0xe
734 #define EXECLUDE_2ND_PKT 0xd
735 #define EXECLUDE_3RD_PKT 0xb
736 #define EXECLUDE_4TH_PKT 0x7
739 simple_ipv4_fwd_4pkts(struct rte_mbuf* m[4], uint8_t portid, struct lcore_conf *qconf)
741 struct ether_hdr *eth_hdr[4];
742 struct ipv4_hdr *ipv4_hdr[4];
743 void *d_addr_bytes[4];
746 union ipv4_5tuple_host key[4];
749 eth_hdr[0] = rte_pktmbuf_mtod(m[0], struct ether_hdr *);
750 eth_hdr[1] = rte_pktmbuf_mtod(m[1], struct ether_hdr *);
751 eth_hdr[2] = rte_pktmbuf_mtod(m[2], struct ether_hdr *);
752 eth_hdr[3] = rte_pktmbuf_mtod(m[3], struct ether_hdr *);
754 /* Handle IPv4 headers.*/
755 ipv4_hdr[0] = (struct ipv4_hdr *)(rte_pktmbuf_mtod(m[0], unsigned char *) +
756 sizeof(struct ether_hdr));
757 ipv4_hdr[1] = (struct ipv4_hdr *)(rte_pktmbuf_mtod(m[1], unsigned char *) +
758 sizeof(struct ether_hdr));
759 ipv4_hdr[2] = (struct ipv4_hdr *)(rte_pktmbuf_mtod(m[2], unsigned char *) +
760 sizeof(struct ether_hdr));
761 ipv4_hdr[3] = (struct ipv4_hdr *)(rte_pktmbuf_mtod(m[3], unsigned char *) +
762 sizeof(struct ether_hdr));
764 #ifdef DO_RFC_1812_CHECKS
765 /* Check to make sure the packet is valid (RFC1812) */
766 uint8_t valid_mask = MASK_ALL_PKTS;
767 if (is_valid_ipv4_pkt(ipv4_hdr[0], m[0]->pkt_len) < 0) {
768 rte_pktmbuf_free(m[0]);
769 valid_mask &= EXECLUDE_1ST_PKT;
771 if (is_valid_ipv4_pkt(ipv4_hdr[1], m[1]->pkt_len) < 0) {
772 rte_pktmbuf_free(m[1]);
773 valid_mask &= EXECLUDE_2ND_PKT;
775 if (is_valid_ipv4_pkt(ipv4_hdr[2], m[2]->pkt_len) < 0) {
776 rte_pktmbuf_free(m[2]);
777 valid_mask &= EXECLUDE_3RD_PKT;
779 if (is_valid_ipv4_pkt(ipv4_hdr[3], m[3]->pkt_len) < 0) {
780 rte_pktmbuf_free(m[3]);
781 valid_mask &= EXECLUDE_4TH_PKT;
783 if (unlikely(valid_mask != MASK_ALL_PKTS)) {
784 if (valid_mask == 0){
788 for (i = 0; i < 4; i++) {
789 if ((0x1 << i) & valid_mask) {
790 l3fwd_simple_forward(m[i], portid, qconf);
796 #endif // End of #ifdef DO_RFC_1812_CHECKS
798 data[0] = _mm_loadu_si128((__m128i*)(rte_pktmbuf_mtod(m[0], unsigned char *) +
799 sizeof(struct ether_hdr) + offsetof(struct ipv4_hdr, time_to_live)));
800 data[1] = _mm_loadu_si128((__m128i*)(rte_pktmbuf_mtod(m[1], unsigned char *) +
801 sizeof(struct ether_hdr) + offsetof(struct ipv4_hdr, time_to_live)));
802 data[2] = _mm_loadu_si128((__m128i*)(rte_pktmbuf_mtod(m[2], unsigned char *) +
803 sizeof(struct ether_hdr) + offsetof(struct ipv4_hdr, time_to_live)));
804 data[3] = _mm_loadu_si128((__m128i*)(rte_pktmbuf_mtod(m[3], unsigned char *) +
805 sizeof(struct ether_hdr) + offsetof(struct ipv4_hdr, time_to_live)));
807 key[0].xmm = _mm_and_si128(data[0], mask0);
808 key[1].xmm = _mm_and_si128(data[1], mask0);
809 key[2].xmm = _mm_and_si128(data[2], mask0);
810 key[3].xmm = _mm_and_si128(data[3], mask0);
812 const void *key_array[4] = {&key[0], &key[1], &key[2],&key[3]};
813 rte_hash_lookup_multi(qconf->ipv4_lookup_struct, &key_array[0], 4, ret);
814 dst_port[0] = (uint8_t) ((ret[0] < 0) ? portid : ipv4_l3fwd_out_if[ret[0]]);
815 dst_port[1] = (uint8_t) ((ret[1] < 0) ? portid : ipv4_l3fwd_out_if[ret[1]]);
816 dst_port[2] = (uint8_t) ((ret[2] < 0) ? portid : ipv4_l3fwd_out_if[ret[2]]);
817 dst_port[3] = (uint8_t) ((ret[3] < 0) ? portid : ipv4_l3fwd_out_if[ret[3]]);
819 if (dst_port[0] >= RTE_MAX_ETHPORTS || (enabled_port_mask & 1 << dst_port[0]) == 0)
820 dst_port[0] = portid;
821 if (dst_port[1] >= RTE_MAX_ETHPORTS || (enabled_port_mask & 1 << dst_port[1]) == 0)
822 dst_port[1] = portid;
823 if (dst_port[2] >= RTE_MAX_ETHPORTS || (enabled_port_mask & 1 << dst_port[2]) == 0)
824 dst_port[2] = portid;
825 if (dst_port[3] >= RTE_MAX_ETHPORTS || (enabled_port_mask & 1 << dst_port[3]) == 0)
826 dst_port[3] = portid;
828 /* 02:00:00:00:00:xx */
829 d_addr_bytes[0] = ð_hdr[0]->d_addr.addr_bytes[0];
830 d_addr_bytes[1] = ð_hdr[1]->d_addr.addr_bytes[0];
831 d_addr_bytes[2] = ð_hdr[2]->d_addr.addr_bytes[0];
832 d_addr_bytes[3] = ð_hdr[3]->d_addr.addr_bytes[0];
833 *((uint64_t *)d_addr_bytes[0]) = 0x000000000002 + ((uint64_t)dst_port[0] << 40);
834 *((uint64_t *)d_addr_bytes[1]) = 0x000000000002 + ((uint64_t)dst_port[1] << 40);
835 *((uint64_t *)d_addr_bytes[2]) = 0x000000000002 + ((uint64_t)dst_port[2] << 40);
836 *((uint64_t *)d_addr_bytes[3]) = 0x000000000002 + ((uint64_t)dst_port[3] << 40);
838 #ifdef DO_RFC_1812_CHECKS
839 /* Update time to live and header checksum */
840 --(ipv4_hdr[0]->time_to_live);
841 --(ipv4_hdr[1]->time_to_live);
842 --(ipv4_hdr[2]->time_to_live);
843 --(ipv4_hdr[3]->time_to_live);
844 ++(ipv4_hdr[0]->hdr_checksum);
845 ++(ipv4_hdr[1]->hdr_checksum);
846 ++(ipv4_hdr[2]->hdr_checksum);
847 ++(ipv4_hdr[3]->hdr_checksum);
851 ether_addr_copy(&ports_eth_addr[dst_port[0]], ð_hdr[0]->s_addr);
852 ether_addr_copy(&ports_eth_addr[dst_port[1]], ð_hdr[1]->s_addr);
853 ether_addr_copy(&ports_eth_addr[dst_port[2]], ð_hdr[2]->s_addr);
854 ether_addr_copy(&ports_eth_addr[dst_port[3]], ð_hdr[3]->s_addr);
856 send_single_packet(m[0], (uint8_t)dst_port[0]);
857 send_single_packet(m[1], (uint8_t)dst_port[1]);
858 send_single_packet(m[2], (uint8_t)dst_port[2]);
859 send_single_packet(m[3], (uint8_t)dst_port[3]);
863 static inline void get_ipv6_5tuple(struct rte_mbuf* m0, __m128i mask0, __m128i mask1,
864 union ipv6_5tuple_host * key)
866 __m128i tmpdata0 = _mm_loadu_si128((__m128i*)(rte_pktmbuf_mtod(m0, unsigned char *)
867 + sizeof(struct ether_hdr) + offsetof(struct ipv6_hdr, payload_len)));
868 __m128i tmpdata1 = _mm_loadu_si128((__m128i*)(rte_pktmbuf_mtod(m0, unsigned char *)
869 + sizeof(struct ether_hdr) + offsetof(struct ipv6_hdr, payload_len)
871 __m128i tmpdata2 = _mm_loadu_si128((__m128i*)(rte_pktmbuf_mtod(m0, unsigned char *)
872 + sizeof(struct ether_hdr) + offsetof(struct ipv6_hdr, payload_len)
873 + sizeof(__m128i) + sizeof(__m128i)));
874 key->xmm[0] = _mm_and_si128(tmpdata0, mask0);
875 key->xmm[1] = tmpdata1;
876 key->xmm[2] = _mm_and_si128(tmpdata2, mask1);
881 simple_ipv6_fwd_4pkts(struct rte_mbuf* m[4], uint8_t portid, struct lcore_conf *qconf)
883 struct ether_hdr *eth_hdr[4];
884 __attribute__((unused)) struct ipv6_hdr *ipv6_hdr[4];
885 void *d_addr_bytes[4];
888 union ipv6_5tuple_host key[4];
890 eth_hdr[0] = rte_pktmbuf_mtod(m[0], struct ether_hdr *);
891 eth_hdr[1] = rte_pktmbuf_mtod(m[1], struct ether_hdr *);
892 eth_hdr[2] = rte_pktmbuf_mtod(m[2], struct ether_hdr *);
893 eth_hdr[3] = rte_pktmbuf_mtod(m[3], struct ether_hdr *);
895 /* Handle IPv6 headers.*/
896 ipv6_hdr[0] = (struct ipv6_hdr *)(rte_pktmbuf_mtod(m[0], unsigned char *) +
897 sizeof(struct ether_hdr));
898 ipv6_hdr[1] = (struct ipv6_hdr *)(rte_pktmbuf_mtod(m[1], unsigned char *) +
899 sizeof(struct ether_hdr));
900 ipv6_hdr[2] = (struct ipv6_hdr *)(rte_pktmbuf_mtod(m[2], unsigned char *) +
901 sizeof(struct ether_hdr));
902 ipv6_hdr[3] = (struct ipv6_hdr *)(rte_pktmbuf_mtod(m[3], unsigned char *) +
903 sizeof(struct ether_hdr));
905 get_ipv6_5tuple(m[0], mask1, mask2, &key[0]);
906 get_ipv6_5tuple(m[1], mask1, mask2, &key[1]);
907 get_ipv6_5tuple(m[2], mask1, mask2, &key[2]);
908 get_ipv6_5tuple(m[3], mask1, mask2, &key[3]);
910 const void *key_array[4] = {&key[0], &key[1], &key[2],&key[3]};
911 rte_hash_lookup_multi(qconf->ipv6_lookup_struct, &key_array[0], 4, ret);
912 dst_port[0] = (uint8_t) ((ret[0] < 0)? portid:ipv6_l3fwd_out_if[ret[0]]);
913 dst_port[1] = (uint8_t) ((ret[1] < 0)? portid:ipv6_l3fwd_out_if[ret[1]]);
914 dst_port[2] = (uint8_t) ((ret[2] < 0)? portid:ipv6_l3fwd_out_if[ret[2]]);
915 dst_port[3] = (uint8_t) ((ret[3] < 0)? portid:ipv6_l3fwd_out_if[ret[3]]);
917 if (dst_port[0] >= RTE_MAX_ETHPORTS || (enabled_port_mask & 1 << dst_port[0]) == 0)
918 dst_port[0] = portid;
919 if (dst_port[1] >= RTE_MAX_ETHPORTS || (enabled_port_mask & 1 << dst_port[1]) == 0)
920 dst_port[1] = portid;
921 if (dst_port[2] >= RTE_MAX_ETHPORTS || (enabled_port_mask & 1 << dst_port[2]) == 0)
922 dst_port[2] = portid;
923 if (dst_port[3] >= RTE_MAX_ETHPORTS || (enabled_port_mask & 1 << dst_port[3]) == 0)
924 dst_port[3] = portid;
926 /* 02:00:00:00:00:xx */
927 d_addr_bytes[0] = ð_hdr[0]->d_addr.addr_bytes[0];
928 d_addr_bytes[1] = ð_hdr[1]->d_addr.addr_bytes[0];
929 d_addr_bytes[2] = ð_hdr[2]->d_addr.addr_bytes[0];
930 d_addr_bytes[3] = ð_hdr[3]->d_addr.addr_bytes[0];
931 *((uint64_t *)d_addr_bytes[0]) = 0x000000000002 + ((uint64_t)dst_port[0] << 40);
932 *((uint64_t *)d_addr_bytes[1]) = 0x000000000002 + ((uint64_t)dst_port[1] << 40);
933 *((uint64_t *)d_addr_bytes[2]) = 0x000000000002 + ((uint64_t)dst_port[2] << 40);
934 *((uint64_t *)d_addr_bytes[3]) = 0x000000000002 + ((uint64_t)dst_port[3] << 40);
937 ether_addr_copy(&ports_eth_addr[dst_port[0]], ð_hdr[0]->s_addr);
938 ether_addr_copy(&ports_eth_addr[dst_port[1]], ð_hdr[1]->s_addr);
939 ether_addr_copy(&ports_eth_addr[dst_port[2]], ð_hdr[2]->s_addr);
940 ether_addr_copy(&ports_eth_addr[dst_port[3]], ð_hdr[3]->s_addr);
942 send_single_packet(m[0], (uint8_t)dst_port[0]);
943 send_single_packet(m[1], (uint8_t)dst_port[1]);
944 send_single_packet(m[2], (uint8_t)dst_port[2]);
945 send_single_packet(m[3], (uint8_t)dst_port[3]);
948 #endif /* APP_LOOKUP_METHOD */
950 static inline __attribute__((always_inline)) void
951 l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid, struct lcore_conf *qconf)
953 struct ether_hdr *eth_hdr;
954 struct ipv4_hdr *ipv4_hdr;
958 eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
960 if (m->ol_flags & PKT_RX_IPV4_HDR) {
961 /* Handle IPv4 headers.*/
962 ipv4_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod(m, unsigned char *) +
963 sizeof(struct ether_hdr));
965 #ifdef DO_RFC_1812_CHECKS
966 /* Check to make sure the packet is valid (RFC1812) */
967 if (is_valid_ipv4_pkt(ipv4_hdr, m->pkt_len) < 0) {
973 dst_port = get_ipv4_dst_port(ipv4_hdr, portid,
974 qconf->ipv4_lookup_struct);
975 if (dst_port >= RTE_MAX_ETHPORTS ||
976 (enabled_port_mask & 1 << dst_port) == 0)
979 /* 02:00:00:00:00:xx */
980 d_addr_bytes = ð_hdr->d_addr.addr_bytes[0];
981 *((uint64_t *)d_addr_bytes) = ETHER_LOCAL_ADMIN_ADDR +
982 ((uint64_t)dst_port << 40);
984 #ifdef DO_RFC_1812_CHECKS
985 /* Update time to live and header checksum */
986 --(ipv4_hdr->time_to_live);
987 ++(ipv4_hdr->hdr_checksum);
991 ether_addr_copy(&ports_eth_addr[dst_port], ð_hdr->s_addr);
993 send_single_packet(m, dst_port);
996 /* Handle IPv6 headers.*/
997 struct ipv6_hdr *ipv6_hdr;
999 ipv6_hdr = (struct ipv6_hdr *)(rte_pktmbuf_mtod(m, unsigned char *) +
1000 sizeof(struct ether_hdr));
1002 dst_port = get_ipv6_dst_port(ipv6_hdr, portid, qconf->ipv6_lookup_struct);
1004 if (dst_port >= RTE_MAX_ETHPORTS || (enabled_port_mask & 1 << dst_port) == 0)
1007 /* 02:00:00:00:00:xx */
1008 d_addr_bytes = ð_hdr->d_addr.addr_bytes[0];
1009 *((uint64_t *)d_addr_bytes) = ETHER_LOCAL_ADMIN_ADDR +
1010 ((uint64_t)dst_port << 40);
1013 ether_addr_copy(&ports_eth_addr[dst_port], ð_hdr->s_addr);
1015 send_single_packet(m, dst_port);
1020 #ifdef DO_RFC_1812_CHECKS
1022 #define IPV4_MIN_VER_IHL 0x45
1023 #define IPV4_MAX_VER_IHL 0x4f
1024 #define IPV4_MAX_VER_IHL_DIFF (IPV4_MAX_VER_IHL - IPV4_MIN_VER_IHL)
1026 /* Minimum value of IPV4 total length (20B) in network byte order. */
1027 #define IPV4_MIN_LEN_BE (sizeof(struct ipv4_hdr) << 8)
1030 * From http://www.rfc-editor.org/rfc/rfc1812.txt section 5.2.2:
1031 * - The IP version number must be 4.
1032 * - The IP header length field must be large enough to hold the
1033 * minimum length legal IP datagram (20 bytes = 5 words).
1034 * - The IP total length field must be large enough to hold the IP
1035 * datagram header, whose length is specified in the IP header length
1037 * If we encounter invalid IPV4 packet, then set destination port for it
1038 * to BAD_PORT value.
1040 static inline __attribute__((always_inline)) void
1041 rfc1812_process(struct ipv4_hdr *ipv4_hdr, uint16_t *dp, uint32_t flags)
1045 if ((flags & PKT_RX_IPV4_HDR) != 0) {
1047 ihl = ipv4_hdr->version_ihl - IPV4_MIN_VER_IHL;
1049 ipv4_hdr->time_to_live--;
1050 ipv4_hdr->hdr_checksum++;
1052 if (ihl > IPV4_MAX_VER_IHL_DIFF ||
1053 ((uint8_t)ipv4_hdr->total_length == 0 &&
1054 ipv4_hdr->total_length < IPV4_MIN_LEN_BE)) {
1061 #define rfc1812_process(mb, dp) do { } while (0)
1062 #endif /* DO_RFC_1812_CHECKS */
1065 #if ((APP_LOOKUP_METHOD == APP_LOOKUP_LPM) && \
1066 (ENABLE_MULTI_BUFFER_OPTIMIZE == 1))
1068 static inline __attribute__((always_inline)) uint16_t
1069 get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
1070 uint32_t dst_ipv4, uint8_t portid)
1073 struct ipv6_hdr *ipv6_hdr;
1074 struct ether_hdr *eth_hdr;
1076 if (pkt->ol_flags & PKT_RX_IPV4_HDR) {
1077 if (rte_lpm_lookup(qconf->ipv4_lookup_struct, dst_ipv4,
1080 } else if (pkt->ol_flags & PKT_RX_IPV6_HDR) {
1081 eth_hdr = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
1082 ipv6_hdr = (struct ipv6_hdr *)(eth_hdr + 1);
1083 if (rte_lpm6_lookup(qconf->ipv6_lookup_struct,
1084 ipv6_hdr->dst_addr, &next_hop) != 0)
1094 process_packet(struct lcore_conf *qconf, struct rte_mbuf *pkt,
1095 uint16_t *dst_port, uint8_t portid)
1097 struct ether_hdr *eth_hdr;
1098 struct ipv4_hdr *ipv4_hdr;
1103 eth_hdr = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
1104 ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
1106 dst_ipv4 = ipv4_hdr->dst_addr;
1107 dst_ipv4 = rte_be_to_cpu_32(dst_ipv4);
1108 dp = get_dst_port(qconf, pkt, dst_ipv4, portid);
1110 te = _mm_load_si128((__m128i *)eth_hdr);
1114 rfc1812_process(ipv4_hdr, dst_port, pkt->ol_flags);
1116 te = _mm_blend_epi16(te, ve, MASK_ETH);
1117 _mm_store_si128((__m128i *)eth_hdr, te);
1121 * Read ol_flags and destination IPV4 addresses from 4 mbufs.
1124 processx4_step1(struct rte_mbuf *pkt[FWDSTEP], __m128i *dip, uint32_t *flag)
1126 struct ipv4_hdr *ipv4_hdr;
1127 struct ether_hdr *eth_hdr;
1128 uint32_t x0, x1, x2, x3;
1130 eth_hdr = rte_pktmbuf_mtod(pkt[0], struct ether_hdr *);
1131 ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
1132 x0 = ipv4_hdr->dst_addr;
1133 flag[0] = pkt[0]->ol_flags & PKT_RX_IPV4_HDR;
1135 eth_hdr = rte_pktmbuf_mtod(pkt[1], struct ether_hdr *);
1136 ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
1137 x1 = ipv4_hdr->dst_addr;
1138 flag[0] &= pkt[1]->ol_flags;
1140 eth_hdr = rte_pktmbuf_mtod(pkt[2], struct ether_hdr *);
1141 ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
1142 x2 = ipv4_hdr->dst_addr;
1143 flag[0] &= pkt[2]->ol_flags;
1145 eth_hdr = rte_pktmbuf_mtod(pkt[3], struct ether_hdr *);
1146 ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
1147 x3 = ipv4_hdr->dst_addr;
1148 flag[0] &= pkt[3]->ol_flags;
1150 dip[0] = _mm_set_epi32(x3, x2, x1, x0);
1154 * Lookup into LPM for destination port.
1155 * If lookup fails, use incoming port (portid) as destination port.
1158 processx4_step2(const struct lcore_conf *qconf, __m128i dip, uint32_t flag,
1159 uint8_t portid, struct rte_mbuf *pkt[FWDSTEP], uint16_t dprt[FWDSTEP])
1162 const __m128i bswap_mask = _mm_set_epi8(12, 13, 14, 15, 8, 9, 10, 11,
1163 4, 5, 6, 7, 0, 1, 2, 3);
1165 /* Byte swap 4 IPV4 addresses. */
1166 dip = _mm_shuffle_epi8(dip, bswap_mask);
1168 /* if all 4 packets are IPV4. */
1169 if (likely(flag != 0)) {
1170 rte_lpm_lookupx4(qconf->ipv4_lookup_struct, dip, dprt, portid);
1173 dprt[0] = get_dst_port(qconf, pkt[0], dst.u32[0], portid);
1174 dprt[1] = get_dst_port(qconf, pkt[1], dst.u32[1], portid);
1175 dprt[2] = get_dst_port(qconf, pkt[2], dst.u32[2], portid);
1176 dprt[3] = get_dst_port(qconf, pkt[3], dst.u32[3], portid);
1181 * Update source and destination MAC addresses in the ethernet header.
1182 * Perform RFC1812 checks and updates for IPV4 packets.
1185 processx4_step3(struct rte_mbuf *pkt[FWDSTEP], uint16_t dst_port[FWDSTEP])
1187 __m128i te[FWDSTEP];
1188 __m128i ve[FWDSTEP];
1189 __m128i *p[FWDSTEP];
1191 p[0] = (rte_pktmbuf_mtod(pkt[0], __m128i *));
1192 p[1] = (rte_pktmbuf_mtod(pkt[1], __m128i *));
1193 p[2] = (rte_pktmbuf_mtod(pkt[2], __m128i *));
1194 p[3] = (rte_pktmbuf_mtod(pkt[3], __m128i *));
1196 ve[0] = val_eth[dst_port[0]];
1197 te[0] = _mm_load_si128(p[0]);
1199 ve[1] = val_eth[dst_port[1]];
1200 te[1] = _mm_load_si128(p[1]);
1202 ve[2] = val_eth[dst_port[2]];
1203 te[2] = _mm_load_si128(p[2]);
1205 ve[3] = val_eth[dst_port[3]];
1206 te[3] = _mm_load_si128(p[3]);
1208 /* Update first 12 bytes, keep rest bytes intact. */
1209 te[0] = _mm_blend_epi16(te[0], ve[0], MASK_ETH);
1210 te[1] = _mm_blend_epi16(te[1], ve[1], MASK_ETH);
1211 te[2] = _mm_blend_epi16(te[2], ve[2], MASK_ETH);
1212 te[3] = _mm_blend_epi16(te[3], ve[3], MASK_ETH);
1214 _mm_store_si128(p[0], te[0]);
1215 _mm_store_si128(p[1], te[1]);
1216 _mm_store_si128(p[2], te[2]);
1217 _mm_store_si128(p[3], te[3]);
1219 rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[0] + 1),
1220 &dst_port[0], pkt[0]->ol_flags);
1221 rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[1] + 1),
1222 &dst_port[1], pkt[1]->ol_flags);
1223 rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[2] + 1),
1224 &dst_port[2], pkt[2]->ol_flags);
1225 rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[3] + 1),
1226 &dst_port[3], pkt[3]->ol_flags);
1230 * We group consecutive packets with the same destionation port into one burst.
1231 * To avoid extra latency this is done together with some other packet
1232 * processing, but after we made a final decision about packet's destination.
1233 * To do this we maintain:
1234 * pnum - array of number of consecutive packets with the same dest port for
1235 * each packet in the input burst.
1236 * lp - pointer to the last updated element in the pnum.
1237 * dlp - dest port value lp corresponds to.
1240 #define GRPSZ (1 << FWDSTEP)
1241 #define GRPMSK (GRPSZ - 1)
1243 #define GROUP_PORT_STEP(dlp, dcp, lp, pn, idx) do { \
1244 if (likely((dlp) == (dcp)[(idx)])) { \
1247 (dlp) = (dcp)[idx]; \
1248 (lp) = (pn) + (idx); \
1254 * Group consecutive packets with the same destination port in bursts of 4.
1255 * Suppose we have array of destionation ports:
1256 * dst_port[] = {a, b, c, d,, e, ... }
1257 * dp1 should contain: <a, b, c, d>, dp2: <b, c, d, e>.
1258 * We doing 4 comparisions at once and the result is 4 bit mask.
1259 * This mask is used as an index into prebuild array of pnum values.
1261 static inline uint16_t *
1262 port_groupx4(uint16_t pn[FWDSTEP + 1], uint16_t *lp, __m128i dp1, __m128i dp2)
1264 static const struct {
1265 uint64_t pnum; /* prebuild 4 values for pnum[]. */
1266 int32_t idx; /* index for new last updated elemnet. */
1267 uint16_t lpv; /* add value to the last updated element. */
1270 /* 0: a != b, b != c, c != d, d != e */
1271 .pnum = UINT64_C(0x0001000100010001),
1276 /* 1: a == b, b != c, c != d, d != e */
1277 .pnum = UINT64_C(0x0001000100010002),
1282 /* 2: a != b, b == c, c != d, d != e */
1283 .pnum = UINT64_C(0x0001000100020001),
1288 /* 3: a == b, b == c, c != d, d != e */
1289 .pnum = UINT64_C(0x0001000100020003),
1294 /* 4: a != b, b != c, c == d, d != e */
1295 .pnum = UINT64_C(0x0001000200010001),
1300 /* 5: a == b, b != c, c == d, d != e */
1301 .pnum = UINT64_C(0x0001000200010002),
1306 /* 6: a != b, b == c, c == d, d != e */
1307 .pnum = UINT64_C(0x0001000200030001),
1312 /* 7: a == b, b == c, c == d, d != e */
1313 .pnum = UINT64_C(0x0001000200030004),
1318 /* 8: a != b, b != c, c != d, d == e */
1319 .pnum = UINT64_C(0x0002000100010001),
1324 /* 9: a == b, b != c, c != d, d == e */
1325 .pnum = UINT64_C(0x0002000100010002),
1330 /* 0xa: a != b, b == c, c != d, d == e */
1331 .pnum = UINT64_C(0x0002000100020001),
1336 /* 0xb: a == b, b == c, c != d, d == e */
1337 .pnum = UINT64_C(0x0002000100020003),
1342 /* 0xc: a != b, b != c, c == d, d == e */
1343 .pnum = UINT64_C(0x0002000300010001),
1348 /* 0xd: a == b, b != c, c == d, d == e */
1349 .pnum = UINT64_C(0x0002000300010002),
1354 /* 0xe: a != b, b == c, c == d, d == e */
1355 .pnum = UINT64_C(0x0002000300040001),
1360 /* 0xf: a == b, b == c, c == d, d == e */
1361 .pnum = UINT64_C(0x0002000300040005),
1368 uint16_t u16[FWDSTEP + 1];
1370 } *pnum = (void *)pn;
1374 dp1 = _mm_cmpeq_epi16(dp1, dp2);
1375 dp1 = _mm_unpacklo_epi16(dp1, dp1);
1376 v = _mm_movemask_ps((__m128)dp1);
1378 /* update last port counter. */
1379 lp[0] += gptbl[v].lpv;
1381 /* if dest port value has changed. */
1383 lp = pnum->u16 + gptbl[v].idx;
1385 pnum->u64 = gptbl[v].pnum;
1391 #endif /* APP_LOOKUP_METHOD */
1393 /* main processing loop */
1395 main_loop(__attribute__((unused)) void *dummy)
1397 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1399 uint64_t prev_tsc, diff_tsc, cur_tsc;
1401 uint8_t portid, queueid;
1402 struct lcore_conf *qconf;
1403 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /
1404 US_PER_S * BURST_TX_DRAIN_US;
1406 #if ((APP_LOOKUP_METHOD == APP_LOOKUP_LPM) && \
1407 (ENABLE_MULTI_BUFFER_OPTIMIZE == 1))
1411 uint16_t dst_port[MAX_PKT_BURST];
1412 __m128i dip[MAX_PKT_BURST / FWDSTEP];
1413 uint32_t flag[MAX_PKT_BURST / FWDSTEP];
1414 uint16_t pnum[MAX_PKT_BURST + 1];
1419 lcore_id = rte_lcore_id();
1420 qconf = &lcore_conf[lcore_id];
1422 if (qconf->n_rx_queue == 0) {
1423 RTE_LOG(INFO, L3FWD, "lcore %u has nothing to do\n", lcore_id);
1427 RTE_LOG(INFO, L3FWD, "entering main loop on lcore %u\n", lcore_id);
1429 for (i = 0; i < qconf->n_rx_queue; i++) {
1431 portid = qconf->rx_queue_list[i].port_id;
1432 queueid = qconf->rx_queue_list[i].queue_id;
1433 RTE_LOG(INFO, L3FWD, " -- lcoreid=%u portid=%hhu rxqueueid=%hhu\n", lcore_id,
1439 cur_tsc = rte_rdtsc();
1442 * TX burst queue drain
1444 diff_tsc = cur_tsc - prev_tsc;
1445 if (unlikely(diff_tsc > drain_tsc)) {
1448 * This could be optimized (use queueid instead of
1449 * portid), but it is not called so often
1451 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
1452 if (qconf->tx_mbufs[portid].len == 0)
1455 qconf->tx_mbufs[portid].len,
1457 qconf->tx_mbufs[portid].len = 0;
1464 * Read packet from RX queues
1466 for (i = 0; i < qconf->n_rx_queue; ++i) {
1467 portid = qconf->rx_queue_list[i].port_id;
1468 queueid = qconf->rx_queue_list[i].queue_id;
1469 nb_rx = rte_eth_rx_burst(portid, queueid, pkts_burst,
1474 #if (ENABLE_MULTI_BUFFER_OPTIMIZE == 1)
1475 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
1478 * Send nb_rx - nb_rx%4 packets
1481 int32_t n = RTE_ALIGN_FLOOR(nb_rx, 4);
1482 for (j = 0; j < n ; j+=4) {
1483 uint32_t ol_flag = pkts_burst[j]->ol_flags
1484 & pkts_burst[j+1]->ol_flags
1485 & pkts_burst[j+2]->ol_flags
1486 & pkts_burst[j+3]->ol_flags;
1487 if (ol_flag & PKT_RX_IPV4_HDR ) {
1488 simple_ipv4_fwd_4pkts(&pkts_burst[j],
1490 } else if (ol_flag & PKT_RX_IPV6_HDR) {
1491 simple_ipv6_fwd_4pkts(&pkts_burst[j],
1494 l3fwd_simple_forward(pkts_burst[j],
1496 l3fwd_simple_forward(pkts_burst[j+1],
1498 l3fwd_simple_forward(pkts_burst[j+2],
1500 l3fwd_simple_forward(pkts_burst[j+3],
1504 for (; j < nb_rx ; j++) {
1505 l3fwd_simple_forward(pkts_burst[j],
1509 #elif (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
1511 k = RTE_ALIGN_FLOOR(nb_rx, FWDSTEP);
1512 for (j = 0; j != k; j += FWDSTEP) {
1513 processx4_step1(&pkts_burst[j],
1515 &flag[j / FWDSTEP]);
1518 k = RTE_ALIGN_FLOOR(nb_rx, FWDSTEP);
1519 for (j = 0; j != k; j += FWDSTEP) {
1520 processx4_step2(qconf, dip[j / FWDSTEP],
1521 flag[j / FWDSTEP], portid,
1522 &pkts_burst[j], &dst_port[j]);
1526 * Finish packet processing and group consecutive
1527 * packets with the same destination port.
1529 k = RTE_ALIGN_FLOOR(nb_rx, FWDSTEP);
1536 processx4_step3(pkts_burst, dst_port);
1538 /* dp1: <d[0], d[1], d[2], d[3], ... > */
1539 dp1 = _mm_loadu_si128((__m128i *)dst_port);
1541 for (j = FWDSTEP; j != k; j += FWDSTEP) {
1542 processx4_step3(&pkts_burst[j],
1547 * <d[j-3], d[j-2], d[j-1], d[j], ... >
1549 dp2 = _mm_loadu_si128((__m128i *)
1550 &dst_port[j - FWDSTEP + 1]);
1551 lp = port_groupx4(&pnum[j - FWDSTEP],
1556 * <d[j], d[j+1], d[j+2], d[j+3], ... >
1558 dp1 = _mm_srli_si128(dp2,
1560 sizeof(dst_port[0]));
1564 * dp2: <d[j-3], d[j-2], d[j-1], d[j-1], ... >
1566 dp2 = _mm_shufflelo_epi16(dp1, 0xf9);
1567 lp = port_groupx4(&pnum[j - FWDSTEP], lp,
1571 * remove values added by the last repeated
1575 dlp = dst_port[j - 1];
1577 /* set dlp and lp to the never used values. */
1579 lp = pnum + MAX_PKT_BURST;
1582 /* Process up to last 3 packets one by one. */
1583 switch (nb_rx % FWDSTEP) {
1585 process_packet(qconf, pkts_burst[j],
1586 dst_port + j, portid);
1587 GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j);
1590 process_packet(qconf, pkts_burst[j],
1591 dst_port + j, portid);
1592 GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j);
1595 process_packet(qconf, pkts_burst[j],
1596 dst_port + j, portid);
1597 GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j);
1602 * Send packets out, through destination port.
1603 * Consecuteve pacekts with the same destination port
1604 * are already grouped together.
1605 * If destination port for the packet equals BAD_PORT,
1606 * then free the packet without sending it out.
1608 for (j = 0; j < nb_rx; j += k) {
1616 if (likely(pn != BAD_PORT)) {
1617 send_packetsx4(qconf, pn,
1620 for (m = j; m != j + k; m++)
1621 rte_pktmbuf_free(pkts_burst[m]);
1625 #endif /* APP_LOOKUP_METHOD */
1626 #else /* ENABLE_MULTI_BUFFER_OPTIMIZE == 0 */
1628 /* Prefetch first packets */
1629 for (j = 0; j < PREFETCH_OFFSET && j < nb_rx; j++) {
1630 rte_prefetch0(rte_pktmbuf_mtod(
1631 pkts_burst[j], void *));
1634 /* Prefetch and forward already prefetched packets */
1635 for (j = 0; j < (nb_rx - PREFETCH_OFFSET); j++) {
1636 rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[
1637 j + PREFETCH_OFFSET], void *));
1638 l3fwd_simple_forward(pkts_burst[j], portid,
1642 /* Forward remaining prefetched packets */
1643 for (; j < nb_rx; j++) {
1644 l3fwd_simple_forward(pkts_burst[j], portid,
1647 #endif /* ENABLE_MULTI_BUFFER_OPTIMIZE */
1654 check_lcore_params(void)
1656 uint8_t queue, lcore;
1660 for (i = 0; i < nb_lcore_params; ++i) {
1661 queue = lcore_params[i].queue_id;
1662 if (queue >= MAX_RX_QUEUE_PER_PORT) {
1663 printf("invalid queue number: %hhu\n", queue);
1666 lcore = lcore_params[i].lcore_id;
1667 if (!rte_lcore_is_enabled(lcore)) {
1668 printf("error: lcore %hhu is not enabled in lcore mask\n", lcore);
1671 if ((socketid = rte_lcore_to_socket_id(lcore) != 0) &&
1673 printf("warning: lcore %hhu is on socket %d with numa off \n",
1681 check_port_config(const unsigned nb_ports)
1686 for (i = 0; i < nb_lcore_params; ++i) {
1687 portid = lcore_params[i].port_id;
1688 if ((enabled_port_mask & (1 << portid)) == 0) {
1689 printf("port %u is not enabled in port mask\n", portid);
1692 if (portid >= nb_ports) {
1693 printf("port %u is not present on the board\n", portid);
1701 get_port_n_rx_queues(const uint8_t port)
1706 for (i = 0; i < nb_lcore_params; ++i) {
1707 if (lcore_params[i].port_id == port && lcore_params[i].queue_id > queue)
1708 queue = lcore_params[i].queue_id;
1710 return (uint8_t)(++queue);
1714 init_lcore_rx_queues(void)
1716 uint16_t i, nb_rx_queue;
1719 for (i = 0; i < nb_lcore_params; ++i) {
1720 lcore = lcore_params[i].lcore_id;
1721 nb_rx_queue = lcore_conf[lcore].n_rx_queue;
1722 if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
1723 printf("error: too many queues (%u) for lcore: %u\n",
1724 (unsigned)nb_rx_queue + 1, (unsigned)lcore);
1727 lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id =
1728 lcore_params[i].port_id;
1729 lcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id =
1730 lcore_params[i].queue_id;
1731 lcore_conf[lcore].n_rx_queue++;
1739 print_usage(const char *prgname)
1741 printf ("%s [EAL options] -- -p PORTMASK -P"
1742 " [--config (port,queue,lcore)[,(port,queue,lcore]]"
1743 " [--enable-jumbo [--max-pkt-len PKTLEN]]\n"
1744 " -p PORTMASK: hexadecimal bitmask of ports to configure\n"
1745 " -P : enable promiscuous mode\n"
1746 " --config (port,queue,lcore): rx queues configuration\n"
1747 " --no-numa: optional, disable numa awareness\n"
1748 " --ipv6: optional, specify it if running ipv6 packets\n"
1749 " --enable-jumbo: enable jumbo frame"
1750 " which max packet len is PKTLEN in decimal (64-9600)\n"
1751 " --hash-entry-num: specify the hash entry number in hexadecimal to be setup\n",
1755 static int parse_max_pkt_len(const char *pktlen)
1760 /* parse decimal string */
1761 len = strtoul(pktlen, &end, 10);
1762 if ((pktlen[0] == '\0') || (end == NULL) || (*end != '\0'))
1772 parse_portmask(const char *portmask)
1777 /* parse hexadecimal string */
1778 pm = strtoul(portmask, &end, 16);
1779 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
1788 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
1790 parse_hash_entry_number(const char *hash_entry_num)
1793 unsigned long hash_en;
1794 /* parse hexadecimal string */
1795 hash_en = strtoul(hash_entry_num, &end, 16);
1796 if ((hash_entry_num[0] == '\0') || (end == NULL) || (*end != '\0'))
1807 parse_config(const char *q_arg)
1810 const char *p, *p0 = q_arg;
1818 unsigned long int_fld[_NUM_FLD];
1819 char *str_fld[_NUM_FLD];
1823 nb_lcore_params = 0;
1825 while ((p = strchr(p0,'(')) != NULL) {
1827 if((p0 = strchr(p,')')) == NULL)
1831 if(size >= sizeof(s))
1834 snprintf(s, sizeof(s), "%.*s", size, p);
1835 if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') != _NUM_FLD)
1837 for (i = 0; i < _NUM_FLD; i++){
1839 int_fld[i] = strtoul(str_fld[i], &end, 0);
1840 if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
1843 if (nb_lcore_params >= MAX_LCORE_PARAMS) {
1844 printf("exceeded max number of lcore params: %hu\n",
1848 lcore_params_array[nb_lcore_params].port_id = (uint8_t)int_fld[FLD_PORT];
1849 lcore_params_array[nb_lcore_params].queue_id = (uint8_t)int_fld[FLD_QUEUE];
1850 lcore_params_array[nb_lcore_params].lcore_id = (uint8_t)int_fld[FLD_LCORE];
1853 lcore_params = lcore_params_array;
1857 #define CMD_LINE_OPT_CONFIG "config"
1858 #define CMD_LINE_OPT_NO_NUMA "no-numa"
1859 #define CMD_LINE_OPT_IPV6 "ipv6"
1860 #define CMD_LINE_OPT_ENABLE_JUMBO "enable-jumbo"
1861 #define CMD_LINE_OPT_HASH_ENTRY_NUM "hash-entry-num"
1863 /* Parse the argument given in the command line of the application */
1865 parse_args(int argc, char **argv)
1870 char *prgname = argv[0];
1871 static struct option lgopts[] = {
1872 {CMD_LINE_OPT_CONFIG, 1, 0, 0},
1873 {CMD_LINE_OPT_NO_NUMA, 0, 0, 0},
1874 {CMD_LINE_OPT_IPV6, 0, 0, 0},
1875 {CMD_LINE_OPT_ENABLE_JUMBO, 0, 0, 0},
1876 {CMD_LINE_OPT_HASH_ENTRY_NUM, 1, 0, 0},
1882 while ((opt = getopt_long(argc, argvopt, "p:P",
1883 lgopts, &option_index)) != EOF) {
1888 enabled_port_mask = parse_portmask(optarg);
1889 if (enabled_port_mask == 0) {
1890 printf("invalid portmask\n");
1891 print_usage(prgname);
1896 printf("Promiscuous mode selected\n");
1902 if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_CONFIG,
1903 sizeof (CMD_LINE_OPT_CONFIG))) {
1904 ret = parse_config(optarg);
1906 printf("invalid config\n");
1907 print_usage(prgname);
1912 if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_NO_NUMA,
1913 sizeof(CMD_LINE_OPT_NO_NUMA))) {
1914 printf("numa is disabled \n");
1918 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
1919 if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_IPV6,
1920 sizeof(CMD_LINE_OPT_IPV6))) {
1921 printf("ipv6 is specified \n");
1926 if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_ENABLE_JUMBO,
1927 sizeof (CMD_LINE_OPT_ENABLE_JUMBO))) {
1928 struct option lenopts = {"max-pkt-len", required_argument, 0, 0};
1930 printf("jumbo frame is enabled - disabling simple TX path\n");
1931 port_conf.rxmode.jumbo_frame = 1;
1933 /* if no max-pkt-len set, use the default value ETHER_MAX_LEN */
1934 if (0 == getopt_long(argc, argvopt, "", &lenopts, &option_index)) {
1935 ret = parse_max_pkt_len(optarg);
1936 if ((ret < 64) || (ret > MAX_JUMBO_PKT_LEN)){
1937 printf("invalid packet length\n");
1938 print_usage(prgname);
1941 port_conf.rxmode.max_rx_pkt_len = ret;
1943 printf("set jumbo frame max packet length to %u\n",
1944 (unsigned int)port_conf.rxmode.max_rx_pkt_len);
1946 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
1947 if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_HASH_ENTRY_NUM,
1948 sizeof(CMD_LINE_OPT_HASH_ENTRY_NUM))) {
1949 ret = parse_hash_entry_number(optarg);
1950 if ((ret > 0) && (ret <= L3FWD_HASH_ENTRIES)) {
1951 hash_entry_number = ret;
1953 printf("invalid hash entry number\n");
1954 print_usage(prgname);
1962 print_usage(prgname);
1968 argv[optind-1] = prgname;
1971 optind = 0; /* reset getopt lib */
1976 print_ethaddr(const char *name, const struct ether_addr *eth_addr)
1978 char buf[ETHER_ADDR_FMT_SIZE];
1979 ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);
1980 printf("%s%s", name, buf);
1983 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
1985 static void convert_ipv4_5tuple(struct ipv4_5tuple* key1,
1986 union ipv4_5tuple_host* key2)
1988 key2->ip_dst = rte_cpu_to_be_32(key1->ip_dst);
1989 key2->ip_src = rte_cpu_to_be_32(key1->ip_src);
1990 key2->port_dst = rte_cpu_to_be_16(key1->port_dst);
1991 key2->port_src = rte_cpu_to_be_16(key1->port_src);
1992 key2->proto = key1->proto;
1998 static void convert_ipv6_5tuple(struct ipv6_5tuple* key1,
1999 union ipv6_5tuple_host* key2)
2002 for (i = 0; i < 16; i++)
2004 key2->ip_dst[i] = key1->ip_dst[i];
2005 key2->ip_src[i] = key1->ip_src[i];
2007 key2->port_dst = rte_cpu_to_be_16(key1->port_dst);
2008 key2->port_src = rte_cpu_to_be_16(key1->port_src);
2009 key2->proto = key1->proto;
2016 #define BYTE_VALUE_MAX 256
2017 #define ALL_32_BITS 0xffffffff
2018 #define BIT_8_TO_15 0x0000ff00
2020 populate_ipv4_few_flow_into_table(const struct rte_hash* h)
2024 uint32_t array_len = sizeof(ipv4_l3fwd_route_array)/sizeof(ipv4_l3fwd_route_array[0]);
2026 mask0 = _mm_set_epi32(ALL_32_BITS, ALL_32_BITS, ALL_32_BITS, BIT_8_TO_15);
2027 for (i = 0; i < array_len; i++) {
2028 struct ipv4_l3fwd_route entry;
2029 union ipv4_5tuple_host newkey;
2030 entry = ipv4_l3fwd_route_array[i];
2031 convert_ipv4_5tuple(&entry.key, &newkey);
2032 ret = rte_hash_add_key (h,(void *) &newkey);
2034 rte_exit(EXIT_FAILURE, "Unable to add entry %" PRIu32
2035 " to the l3fwd hash.\n", i);
2037 ipv4_l3fwd_out_if[ret] = entry.if_out;
2039 printf("Hash: Adding 0x%" PRIx32 " keys\n", array_len);
2042 #define BIT_16_TO_23 0x00ff0000
2044 populate_ipv6_few_flow_into_table(const struct rte_hash* h)
2048 uint32_t array_len = sizeof(ipv6_l3fwd_route_array)/sizeof(ipv6_l3fwd_route_array[0]);
2050 mask1 = _mm_set_epi32(ALL_32_BITS, ALL_32_BITS, ALL_32_BITS, BIT_16_TO_23);
2051 mask2 = _mm_set_epi32(0, 0, ALL_32_BITS, ALL_32_BITS);
2052 for (i = 0; i < array_len; i++) {
2053 struct ipv6_l3fwd_route entry;
2054 union ipv6_5tuple_host newkey;
2055 entry = ipv6_l3fwd_route_array[i];
2056 convert_ipv6_5tuple(&entry.key, &newkey);
2057 ret = rte_hash_add_key (h, (void *) &newkey);
2059 rte_exit(EXIT_FAILURE, "Unable to add entry %" PRIu32
2060 " to the l3fwd hash.\n", i);
2062 ipv6_l3fwd_out_if[ret] = entry.if_out;
2064 printf("Hash: Adding 0x%" PRIx32 "keys\n", array_len);
2067 #define NUMBER_PORT_USED 4
2069 populate_ipv4_many_flow_into_table(const struct rte_hash* h,
2070 unsigned int nr_flow)
2073 mask0 = _mm_set_epi32(ALL_32_BITS, ALL_32_BITS, ALL_32_BITS, BIT_8_TO_15);
2074 for (i = 0; i < nr_flow; i++) {
2075 struct ipv4_l3fwd_route entry;
2076 union ipv4_5tuple_host newkey;
2077 uint8_t a = (uint8_t) ((i/NUMBER_PORT_USED)%BYTE_VALUE_MAX);
2078 uint8_t b = (uint8_t) (((i/NUMBER_PORT_USED)/BYTE_VALUE_MAX)%BYTE_VALUE_MAX);
2079 uint8_t c = (uint8_t) ((i/NUMBER_PORT_USED)/(BYTE_VALUE_MAX*BYTE_VALUE_MAX));
2080 /* Create the ipv4 exact match flow */
2081 memset(&entry, 0, sizeof(entry));
2082 switch (i & (NUMBER_PORT_USED -1)) {
2084 entry = ipv4_l3fwd_route_array[0];
2085 entry.key.ip_dst = IPv4(101,c,b,a);
2088 entry = ipv4_l3fwd_route_array[1];
2089 entry.key.ip_dst = IPv4(201,c,b,a);
2092 entry = ipv4_l3fwd_route_array[2];
2093 entry.key.ip_dst = IPv4(111,c,b,a);
2096 entry = ipv4_l3fwd_route_array[3];
2097 entry.key.ip_dst = IPv4(211,c,b,a);
2100 convert_ipv4_5tuple(&entry.key, &newkey);
2101 int32_t ret = rte_hash_add_key(h,(void *) &newkey);
2103 rte_exit(EXIT_FAILURE, "Unable to add entry %u\n", i);
2105 ipv4_l3fwd_out_if[ret] = (uint8_t) entry.if_out;
2108 printf("Hash: Adding 0x%x keys\n", nr_flow);
2112 populate_ipv6_many_flow_into_table(const struct rte_hash* h,
2113 unsigned int nr_flow)
2116 mask1 = _mm_set_epi32(ALL_32_BITS, ALL_32_BITS, ALL_32_BITS, BIT_16_TO_23);
2117 mask2 = _mm_set_epi32(0, 0, ALL_32_BITS, ALL_32_BITS);
2118 for (i = 0; i < nr_flow; i++) {
2119 struct ipv6_l3fwd_route entry;
2120 union ipv6_5tuple_host newkey;
2121 uint8_t a = (uint8_t) ((i/NUMBER_PORT_USED)%BYTE_VALUE_MAX);
2122 uint8_t b = (uint8_t) (((i/NUMBER_PORT_USED)/BYTE_VALUE_MAX)%BYTE_VALUE_MAX);
2123 uint8_t c = (uint8_t) ((i/NUMBER_PORT_USED)/(BYTE_VALUE_MAX*BYTE_VALUE_MAX));
2124 /* Create the ipv6 exact match flow */
2125 memset(&entry, 0, sizeof(entry));
2126 switch (i & (NUMBER_PORT_USED - 1)) {
2127 case 0: entry = ipv6_l3fwd_route_array[0]; break;
2128 case 1: entry = ipv6_l3fwd_route_array[1]; break;
2129 case 2: entry = ipv6_l3fwd_route_array[2]; break;
2130 case 3: entry = ipv6_l3fwd_route_array[3]; break;
2132 entry.key.ip_dst[13] = c;
2133 entry.key.ip_dst[14] = b;
2134 entry.key.ip_dst[15] = a;
2135 convert_ipv6_5tuple(&entry.key, &newkey);
2136 int32_t ret = rte_hash_add_key(h,(void *) &newkey);
2138 rte_exit(EXIT_FAILURE, "Unable to add entry %u\n", i);
2140 ipv6_l3fwd_out_if[ret] = (uint8_t) entry.if_out;
2143 printf("Hash: Adding 0x%x keys\n", nr_flow);
2147 setup_hash(int socketid)
2149 struct rte_hash_parameters ipv4_l3fwd_hash_params = {
2151 .entries = L3FWD_HASH_ENTRIES,
2152 .bucket_entries = 4,
2153 .key_len = sizeof(union ipv4_5tuple_host),
2154 .hash_func = ipv4_hash_crc,
2155 .hash_func_init_val = 0,
2158 struct rte_hash_parameters ipv6_l3fwd_hash_params = {
2160 .entries = L3FWD_HASH_ENTRIES,
2161 .bucket_entries = 4,
2162 .key_len = sizeof(union ipv6_5tuple_host),
2163 .hash_func = ipv6_hash_crc,
2164 .hash_func_init_val = 0,
2169 /* create ipv4 hash */
2170 snprintf(s, sizeof(s), "ipv4_l3fwd_hash_%d", socketid);
2171 ipv4_l3fwd_hash_params.name = s;
2172 ipv4_l3fwd_hash_params.socket_id = socketid;
2173 ipv4_l3fwd_lookup_struct[socketid] = rte_hash_create(&ipv4_l3fwd_hash_params);
2174 if (ipv4_l3fwd_lookup_struct[socketid] == NULL)
2175 rte_exit(EXIT_FAILURE, "Unable to create the l3fwd hash on "
2176 "socket %d\n", socketid);
2178 /* create ipv6 hash */
2179 snprintf(s, sizeof(s), "ipv6_l3fwd_hash_%d", socketid);
2180 ipv6_l3fwd_hash_params.name = s;
2181 ipv6_l3fwd_hash_params.socket_id = socketid;
2182 ipv6_l3fwd_lookup_struct[socketid] = rte_hash_create(&ipv6_l3fwd_hash_params);
2183 if (ipv6_l3fwd_lookup_struct[socketid] == NULL)
2184 rte_exit(EXIT_FAILURE, "Unable to create the l3fwd hash on "
2185 "socket %d\n", socketid);
2187 if (hash_entry_number != HASH_ENTRY_NUMBER_DEFAULT) {
2188 /* For testing hash matching with a large number of flows we
2189 * generate millions of IP 5-tuples with an incremented dst
2190 * address to initialize the hash table. */
2192 /* populate the ipv4 hash */
2193 populate_ipv4_many_flow_into_table(
2194 ipv4_l3fwd_lookup_struct[socketid], hash_entry_number);
2196 /* populate the ipv6 hash */
2197 populate_ipv6_many_flow_into_table(
2198 ipv6_l3fwd_lookup_struct[socketid], hash_entry_number);
2201 /* Use data in ipv4/ipv6 l3fwd lookup table directly to initialize the hash table */
2203 /* populate the ipv4 hash */
2204 populate_ipv4_few_flow_into_table(ipv4_l3fwd_lookup_struct[socketid]);
2206 /* populate the ipv6 hash */
2207 populate_ipv6_few_flow_into_table(ipv6_l3fwd_lookup_struct[socketid]);
2213 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
2215 setup_lpm(int socketid)
2217 struct rte_lpm6_config config;
2222 /* create the LPM table */
2223 snprintf(s, sizeof(s), "IPV4_L3FWD_LPM_%d", socketid);
2224 ipv4_l3fwd_lookup_struct[socketid] = rte_lpm_create(s, socketid,
2225 IPV4_L3FWD_LPM_MAX_RULES, 0);
2226 if (ipv4_l3fwd_lookup_struct[socketid] == NULL)
2227 rte_exit(EXIT_FAILURE, "Unable to create the l3fwd LPM table"
2228 " on socket %d\n", socketid);
2230 /* populate the LPM table */
2231 for (i = 0; i < IPV4_L3FWD_NUM_ROUTES; i++) {
2233 /* skip unused ports */
2234 if ((1 << ipv4_l3fwd_route_array[i].if_out &
2235 enabled_port_mask) == 0)
2238 ret = rte_lpm_add(ipv4_l3fwd_lookup_struct[socketid],
2239 ipv4_l3fwd_route_array[i].ip,
2240 ipv4_l3fwd_route_array[i].depth,
2241 ipv4_l3fwd_route_array[i].if_out);
2244 rte_exit(EXIT_FAILURE, "Unable to add entry %u to the "
2245 "l3fwd LPM table on socket %d\n",
2249 printf("LPM: Adding route 0x%08x / %d (%d)\n",
2250 (unsigned)ipv4_l3fwd_route_array[i].ip,
2251 ipv4_l3fwd_route_array[i].depth,
2252 ipv4_l3fwd_route_array[i].if_out);
2255 /* create the LPM6 table */
2256 snprintf(s, sizeof(s), "IPV6_L3FWD_LPM_%d", socketid);
2258 config.max_rules = IPV6_L3FWD_LPM_MAX_RULES;
2259 config.number_tbl8s = IPV6_L3FWD_LPM_NUMBER_TBL8S;
2261 ipv6_l3fwd_lookup_struct[socketid] = rte_lpm6_create(s, socketid,
2263 if (ipv6_l3fwd_lookup_struct[socketid] == NULL)
2264 rte_exit(EXIT_FAILURE, "Unable to create the l3fwd LPM table"
2265 " on socket %d\n", socketid);
2267 /* populate the LPM table */
2268 for (i = 0; i < IPV6_L3FWD_NUM_ROUTES; i++) {
2270 /* skip unused ports */
2271 if ((1 << ipv6_l3fwd_route_array[i].if_out &
2272 enabled_port_mask) == 0)
2275 ret = rte_lpm6_add(ipv6_l3fwd_lookup_struct[socketid],
2276 ipv6_l3fwd_route_array[i].ip,
2277 ipv6_l3fwd_route_array[i].depth,
2278 ipv6_l3fwd_route_array[i].if_out);
2281 rte_exit(EXIT_FAILURE, "Unable to add entry %u to the "
2282 "l3fwd LPM table on socket %d\n",
2286 printf("LPM: Adding route %s / %d (%d)\n",
2288 ipv6_l3fwd_route_array[i].depth,
2289 ipv6_l3fwd_route_array[i].if_out);
2295 init_mem(unsigned nb_mbuf)
2297 struct lcore_conf *qconf;
2302 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
2303 if (rte_lcore_is_enabled(lcore_id) == 0)
2307 socketid = rte_lcore_to_socket_id(lcore_id);
2311 if (socketid >= NB_SOCKETS) {
2312 rte_exit(EXIT_FAILURE, "Socket %d of lcore %u is out of range %d\n",
2313 socketid, lcore_id, NB_SOCKETS);
2315 if (pktmbuf_pool[socketid] == NULL) {
2316 snprintf(s, sizeof(s), "mbuf_pool_%d", socketid);
2317 pktmbuf_pool[socketid] =
2318 rte_mempool_create(s, nb_mbuf, MBUF_SIZE, MEMPOOL_CACHE_SIZE,
2319 sizeof(struct rte_pktmbuf_pool_private),
2320 rte_pktmbuf_pool_init, NULL,
2321 rte_pktmbuf_init, NULL,
2323 if (pktmbuf_pool[socketid] == NULL)
2324 rte_exit(EXIT_FAILURE,
2325 "Cannot init mbuf pool on socket %d\n", socketid);
2327 printf("Allocated mbuf pool on socket %d\n", socketid);
2329 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
2330 setup_lpm(socketid);
2332 setup_hash(socketid);
2335 qconf = &lcore_conf[lcore_id];
2336 qconf->ipv4_lookup_struct = ipv4_l3fwd_lookup_struct[socketid];
2337 qconf->ipv6_lookup_struct = ipv6_l3fwd_lookup_struct[socketid];
2342 /* Check the link status of all ports in up to 9s, and print them finally */
2344 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
2346 #define CHECK_INTERVAL 100 /* 100ms */
2347 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2348 uint8_t portid, count, all_ports_up, print_flag = 0;
2349 struct rte_eth_link link;
2351 printf("\nChecking link status");
2353 for (count = 0; count <= MAX_CHECK_TIME; count++) {
2355 for (portid = 0; portid < port_num; portid++) {
2356 if ((port_mask & (1 << portid)) == 0)
2358 memset(&link, 0, sizeof(link));
2359 rte_eth_link_get_nowait(portid, &link);
2360 /* print link status if flag set */
2361 if (print_flag == 1) {
2362 if (link.link_status)
2363 printf("Port %d Link Up - speed %u "
2364 "Mbps - %s\n", (uint8_t)portid,
2365 (unsigned)link.link_speed,
2366 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
2367 ("full-duplex") : ("half-duplex\n"));
2369 printf("Port %d Link Down\n",
2373 /* clear all_ports_up flag if any link down */
2374 if (link.link_status == 0) {
2379 /* after finally printing all link status, get out */
2380 if (print_flag == 1)
2383 if (all_ports_up == 0) {
2386 rte_delay_ms(CHECK_INTERVAL);
2389 /* set the print_flag if all ports up or timeout */
2390 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2398 main(int argc, char **argv)
2400 struct lcore_conf *qconf;
2401 struct rte_eth_dev_info dev_info;
2402 struct rte_eth_txconf *txconf;
2407 uint32_t n_tx_queue, nb_lcores;
2408 uint8_t portid, nb_rx_queue, queue, socketid;
2411 ret = rte_eal_init(argc, argv);
2413 rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
2417 /* parse application arguments (after the EAL ones) */
2418 ret = parse_args(argc, argv);
2420 rte_exit(EXIT_FAILURE, "Invalid L3FWD parameters\n");
2422 if (check_lcore_params() < 0)
2423 rte_exit(EXIT_FAILURE, "check_lcore_params failed\n");
2425 ret = init_lcore_rx_queues();
2427 rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n");
2429 nb_ports = rte_eth_dev_count();
2430 if (nb_ports > RTE_MAX_ETHPORTS)
2431 nb_ports = RTE_MAX_ETHPORTS;
2433 if (check_port_config(nb_ports) < 0)
2434 rte_exit(EXIT_FAILURE, "check_port_config failed\n");
2436 nb_lcores = rte_lcore_count();
2438 /* initialize all ports */
2439 for (portid = 0; portid < nb_ports; portid++) {
2440 /* skip ports that are not enabled */
2441 if ((enabled_port_mask & (1 << portid)) == 0) {
2442 printf("\nSkipping disabled port %d\n", portid);
2447 printf("Initializing port %d ... ", portid );
2450 nb_rx_queue = get_port_n_rx_queues(portid);
2451 n_tx_queue = nb_lcores;
2452 if (n_tx_queue > MAX_TX_QUEUE_PER_PORT)
2453 n_tx_queue = MAX_TX_QUEUE_PER_PORT;
2454 printf("Creating queues: nb_rxq=%d nb_txq=%u... ",
2455 nb_rx_queue, (unsigned)n_tx_queue );
2456 ret = rte_eth_dev_configure(portid, nb_rx_queue,
2457 (uint16_t)n_tx_queue, &port_conf);
2459 rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%d\n",
2462 rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
2463 print_ethaddr(" Address:", &ports_eth_addr[portid]);
2467 * prepare dst and src MACs for each port.
2469 *(uint64_t *)(val_eth + portid) =
2470 ETHER_LOCAL_ADMIN_ADDR + ((uint64_t)portid << 40);
2471 ether_addr_copy(&ports_eth_addr[portid],
2472 (struct ether_addr *)(val_eth + portid) + 1);
2475 ret = init_mem(NB_MBUF);
2477 rte_exit(EXIT_FAILURE, "init_mem failed\n");
2479 /* init one TX queue per couple (lcore,port) */
2481 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
2482 if (rte_lcore_is_enabled(lcore_id) == 0)
2486 socketid = (uint8_t)rte_lcore_to_socket_id(lcore_id);
2490 printf("txq=%u,%d,%d ", lcore_id, queueid, socketid);
2493 rte_eth_dev_info_get(portid, &dev_info);
2494 txconf = &dev_info.default_txconf;
2495 if (port_conf.rxmode.jumbo_frame)
2496 txconf->txq_flags = 0;
2497 ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
2500 rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, "
2501 "port=%d\n", ret, portid);
2503 qconf = &lcore_conf[lcore_id];
2504 qconf->tx_queue_id[portid] = queueid;
2510 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
2511 if (rte_lcore_is_enabled(lcore_id) == 0)
2513 qconf = &lcore_conf[lcore_id];
2514 printf("\nInitializing rx queues on lcore %u ... ", lcore_id );
2516 /* init RX queues */
2517 for(queue = 0; queue < qconf->n_rx_queue; ++queue) {
2518 portid = qconf->rx_queue_list[queue].port_id;
2519 queueid = qconf->rx_queue_list[queue].queue_id;
2522 socketid = (uint8_t)rte_lcore_to_socket_id(lcore_id);
2526 printf("rxq=%d,%d,%d ", portid, queueid, socketid);
2529 ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd,
2532 pktmbuf_pool[socketid]);
2534 rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup: err=%d,"
2535 "port=%d\n", ret, portid);
2542 for (portid = 0; portid < nb_ports; portid++) {
2543 if ((enabled_port_mask & (1 << portid)) == 0) {
2547 ret = rte_eth_dev_start(portid);
2549 rte_exit(EXIT_FAILURE, "rte_eth_dev_start: err=%d, port=%d\n",
2553 * If enabled, put device in promiscuous mode.
2554 * This allows IO forwarding mode to forward packets
2555 * to itself through 2 cross-connected ports of the
2559 rte_eth_promiscuous_enable(portid);
2562 check_all_ports_link_status((uint8_t)nb_ports, enabled_port_mask);
2564 /* launch per-lcore init on every lcore */
2565 rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
2566 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
2567 if (rte_eal_wait_lcore(lcore_id) < 0)