4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <sys/types.h>
40 #include <sys/queue.h>
46 #include <rte_common.h>
47 #include <rte_byteorder.h>
49 #include <rte_memory.h>
50 #include <rte_memcpy.h>
51 #include <rte_memzone.h>
52 #include <rte_tailq.h>
54 #include <rte_per_lcore.h>
55 #include <rte_launch.h>
56 #include <rte_atomic.h>
57 #include <rte_cycles.h>
58 #include <rte_prefetch.h>
59 #include <rte_lcore.h>
60 #include <rte_per_lcore.h>
61 #include <rte_branch_prediction.h>
62 #include <rte_interrupts.h>
64 #include <rte_random.h>
65 #include <rte_debug.h>
66 #include <rte_ether.h>
67 #include <rte_ethdev.h>
69 #include <rte_mempool.h>
71 #include <rte_malloc.h>
75 #include <rte_string_fns.h>
78 #define APP_LOOKUP_EXACT_MATCH 0
79 #define APP_LOOKUP_LPM 1
80 #define DO_RFC_1812_CHECKS
82 #ifndef APP_LOOKUP_METHOD
83 #define APP_LOOKUP_METHOD APP_LOOKUP_LPM
86 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
88 #elif (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
92 #error "APP_LOOKUP_METHOD set to incorrect value"
95 #define MAX_PKT_BURST 32
97 #include "rte_ip_frag.h"
100 #define IPv6_BYTES_FMT "%02x%02x:%02x%02x:%02x%02x:%02x%02x:"\
101 "%02x%02x:%02x%02x:%02x%02x:%02x%02x"
102 #define IPv6_BYTES(addr) \
103 addr[0], addr[1], addr[2], addr[3], \
104 addr[4], addr[5], addr[6], addr[7], \
105 addr[8], addr[9], addr[10], addr[11],\
106 addr[12], addr[13],addr[14], addr[15]
110 #define RTE_LOGTYPE_L3FWD RTE_LOGTYPE_USER1
112 #define MAX_PORTS RTE_MAX_ETHPORTS
114 #define MAX_JUMBO_PKT_LEN 9600
116 #define IPV6_ADDR_LEN 16
118 #define MEMPOOL_CACHE_SIZE 256
120 #define BUF_SIZE 2048
122 (BUF_SIZE + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
124 #define MAX_FLOW_NUM UINT16_MAX
125 #define MIN_FLOW_NUM 1
126 #define DEF_FLOW_NUM 0x1000
128 /* TTL numbers are in ms. */
129 #define MAX_FLOW_TTL (3600 * MS_PER_S)
130 #define MIN_FLOW_TTL 1
131 #define DEF_FLOW_TTL MS_PER_S
133 #define DEF_MBUF_NUM 0x400
135 /* Should be power of two. */
136 #define IPV4_FRAG_TBL_BUCKET_ENTRIES 2
138 static uint32_t max_flow_num = DEF_FLOW_NUM;
139 static uint32_t max_flow_ttl = DEF_FLOW_TTL;
142 * RX and TX Prefetch, Host, and Write-back threshold values should be
143 * carefully set for optimal performance. Consult the network
144 * controller's datasheet and supporting DPDK documentation for guidance
145 * on how these parameters should be set.
147 #define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
148 #define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
149 #define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */
152 * These default values are optimized for use with the Intel(R) 82599 10 GbE
153 * Controller and the DPDK ixgbe PMD. Consider using other values for other
154 * network controllers and/or network drivers.
156 #define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */
157 #define TX_HTHRESH 0 /**< Default values of TX host threshold reg. */
158 #define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
160 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
164 /* Configure how many packets ahead to prefetch, when reading packets */
165 #define PREFETCH_OFFSET 3
168 * Configurable number of RX/TX ring descriptors
170 #define RTE_TEST_RX_DESC_DEFAULT 128
171 #define RTE_TEST_TX_DESC_DEFAULT 512
173 static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
174 static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
176 /* ethernet addresses of ports */
177 static struct ether_addr ports_eth_addr[MAX_PORTS];
179 /* mask of enabled ports */
180 static uint32_t enabled_port_mask = 0;
181 static int promiscuous_on = 0; /**< Ports set in promiscuous mode off by default. */
182 static int numa_on = 1; /**< NUMA is enabled by default. */
188 struct rte_mbuf *m_table[0];
191 struct lcore_rx_queue {
194 } __rte_cache_aligned;
196 #define MAX_RX_QUEUE_PER_LCORE 16
197 #define MAX_TX_QUEUE_PER_PORT MAX_PORTS
198 #define MAX_RX_QUEUE_PER_PORT 128
200 #define MAX_LCORE_PARAMS 1024
201 struct lcore_params {
205 } __rte_cache_aligned;
207 static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
208 static struct lcore_params lcore_params_array_default[] = {
220 static struct lcore_params * lcore_params = lcore_params_array_default;
221 static uint16_t nb_lcore_params = sizeof(lcore_params_array_default) /
222 sizeof(lcore_params_array_default[0]);
224 static struct rte_eth_conf port_conf = {
226 .mq_mode = ETH_MQ_RX_RSS,
227 .max_rx_pkt_len = ETHER_MAX_LEN,
229 .header_split = 0, /**< Header Split disabled */
230 .hw_ip_checksum = 1, /**< IP checksum offload enabled */
231 .hw_vlan_filter = 0, /**< VLAN filtering disabled */
232 .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
233 .hw_strip_crc = 0, /**< CRC stripped by hardware */
238 .rss_hf = ETH_RSS_IPV4 | ETH_RSS_IPV6,
242 .mq_mode = ETH_MQ_TX_NONE,
246 static const struct rte_eth_rxconf rx_conf = {
248 .pthresh = RX_PTHRESH,
249 .hthresh = RX_HTHRESH,
250 .wthresh = RX_WTHRESH,
252 .rx_free_thresh = 32,
255 static const struct rte_eth_txconf tx_conf = {
257 .pthresh = TX_PTHRESH,
258 .hthresh = TX_HTHRESH,
259 .wthresh = TX_WTHRESH,
261 .tx_free_thresh = 0, /* Use PMD default values */
262 .tx_rs_thresh = 0, /* Use PMD default values */
266 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
268 #ifdef RTE_MACHINE_CPUFLAG_SSE4_2
269 #include <rte_hash_crc.h>
270 #define DEFAULT_HASH_FUNC rte_hash_crc
272 #include <rte_jhash.h>
273 #define DEFAULT_HASH_FUNC rte_jhash
282 } __attribute__((__packed__));
285 uint8_t ip_dst[IPV6_ADDR_LEN];
286 uint8_t ip_src[IPV6_ADDR_LEN];
290 } __attribute__((__packed__));
292 struct ipv4_l3fwd_route {
293 struct ipv4_5tuple key;
297 struct ipv6_l3fwd_route {
298 struct ipv6_5tuple key;
302 static struct ipv4_l3fwd_route ipv4_l3fwd_route_array[] = {
303 {{IPv4(100,10,0,1), IPv4(200,10,0,1), 101, 11, IPPROTO_TCP}, 0},
304 {{IPv4(100,20,0,2), IPv4(200,20,0,2), 102, 12, IPPROTO_TCP}, 1},
305 {{IPv4(100,30,0,3), IPv4(200,30,0,3), 103, 13, IPPROTO_TCP}, 2},
306 {{IPv4(100,40,0,4), IPv4(200,40,0,4), 104, 14, IPPROTO_TCP}, 3},
309 static struct ipv6_l3fwd_route ipv6_l3fwd_route_array[] = {
312 {0xfe, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
313 0x02, 0x1b, 0x21, 0xff, 0xfe, 0x91, 0x38, 0x05},
314 {0xfe, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
315 0x02, 0x1e, 0x67, 0xff, 0xfe, 0x0d, 0xb6, 0x0a},
321 typedef struct rte_hash lookup_struct_t;
322 static lookup_struct_t *ipv4_l3fwd_lookup_struct[NB_SOCKETS];
323 static lookup_struct_t *ipv6_l3fwd_lookup_struct[NB_SOCKETS];
325 #define L3FWD_HASH_ENTRIES 1024
327 #define IPV4_L3FWD_NUM_ROUTES \
328 (sizeof(ipv4_l3fwd_route_array) / sizeof(ipv4_l3fwd_route_array[0]))
330 #define IPV6_L3FWD_NUM_ROUTES \
331 (sizeof(ipv6_l3fwd_route_array) / sizeof(ipv6_l3fwd_route_array[0]))
333 static uint8_t ipv4_l3fwd_out_if[L3FWD_HASH_ENTRIES] __rte_cache_aligned;
334 static uint8_t ipv6_l3fwd_out_if[L3FWD_HASH_ENTRIES] __rte_cache_aligned;
337 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
338 struct ipv4_l3fwd_route {
344 struct ipv6_l3fwd_route {
350 static struct ipv4_l3fwd_route ipv4_l3fwd_route_array[] = {
351 {IPv4(1,1,1,0), 24, 0},
352 {IPv4(2,1,1,0), 24, 1},
353 {IPv4(3,1,1,0), 24, 2},
354 {IPv4(4,1,1,0), 24, 3},
355 {IPv4(5,1,1,0), 24, 4},
356 {IPv4(6,1,1,0), 24, 5},
357 {IPv4(7,1,1,0), 24, 6},
358 {IPv4(8,1,1,0), 24, 7},
361 static struct ipv6_l3fwd_route ipv6_l3fwd_route_array[] = {
362 {{1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 0},
363 {{2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 1},
364 {{3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 2},
365 {{4,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 3},
366 {{5,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 4},
367 {{6,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 5},
368 {{7,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 6},
369 {{8,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 7},
372 #define IPV4_L3FWD_NUM_ROUTES \
373 (sizeof(ipv4_l3fwd_route_array) / sizeof(ipv4_l3fwd_route_array[0]))
374 #define IPV6_L3FWD_NUM_ROUTES \
375 (sizeof(ipv6_l3fwd_route_array) / sizeof(ipv6_l3fwd_route_array[0]))
377 #define IPV4_L3FWD_LPM_MAX_RULES 1024
378 #define IPV6_L3FWD_LPM_MAX_RULES 1024
379 #define IPV6_L3FWD_LPM_NUMBER_TBL8S (1 << 16)
381 typedef struct rte_lpm lookup_struct_t;
382 typedef struct rte_lpm6 lookup6_struct_t;
383 static lookup_struct_t *ipv4_l3fwd_lookup_struct[NB_SOCKETS];
384 static lookup6_struct_t *ipv6_l3fwd_lookup_struct[NB_SOCKETS];
387 struct tx_lcore_stat {
394 #ifdef IPV4_FRAG_TBL_STAT
395 #define TX_LCORE_STAT_UPDATE(s, f, v) ((s)->f += (v))
397 #define TX_LCORE_STAT_UPDATE(s, f, v) do {} while (0)
398 #endif /* IPV4_FRAG_TBL_STAT */
402 struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
403 uint16_t tx_queue_id[MAX_PORTS];
404 lookup_struct_t * ipv4_lookup_struct;
405 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
406 lookup6_struct_t * ipv6_lookup_struct;
408 lookup_struct_t * ipv6_lookup_struct;
410 struct rte_ip_frag_tbl *frag_tbl[MAX_RX_QUEUE_PER_LCORE];
411 struct rte_mempool *pool[MAX_RX_QUEUE_PER_LCORE];
412 struct rte_ip_frag_death_row death_row;
413 struct mbuf_table *tx_mbufs[MAX_PORTS];
414 struct tx_lcore_stat tx_stat;
415 } __rte_cache_aligned;
417 static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
420 * If number of queued packets reached given threahold, then
421 * send burst of packets on an output interface.
423 static inline uint32_t
424 send_burst(struct lcore_conf *qconf, uint32_t thresh, uint8_t port)
426 uint32_t fill, len, k, n;
427 struct mbuf_table *txmb;
429 txmb = qconf->tx_mbufs[port];
432 if ((int32_t)(fill = txmb->head - txmb->tail) < 0)
435 if (fill >= thresh) {
436 n = RTE_MIN(len - txmb->tail, fill);
438 k = rte_eth_tx_burst(port, qconf->tx_queue_id[port],
439 txmb->m_table + txmb->tail, (uint16_t)n);
441 TX_LCORE_STAT_UPDATE(&qconf->tx_stat, call, 1);
442 TX_LCORE_STAT_UPDATE(&qconf->tx_stat, send, k);
445 if ((txmb->tail += k) == len)
452 /* Enqueue a single packet, and send burst if queue is filled */
454 send_single_packet(struct rte_mbuf *m, uint8_t port)
456 uint32_t fill, lcore_id, len;
457 struct lcore_conf *qconf;
458 struct mbuf_table *txmb;
460 lcore_id = rte_lcore_id();
461 qconf = &lcore_conf[lcore_id];
463 txmb = qconf->tx_mbufs[port];
466 fill = send_burst(qconf, MAX_PKT_BURST, port);
468 if (fill == len - 1) {
469 TX_LCORE_STAT_UPDATE(&qconf->tx_stat, drop, 1);
470 rte_pktmbuf_free(txmb->m_table[txmb->tail]);
471 if (++txmb->tail == len)
475 TX_LCORE_STAT_UPDATE(&qconf->tx_stat, queue, 1);
476 txmb->m_table[txmb->head] = m;
477 if(++txmb->head == len)
483 #ifdef DO_RFC_1812_CHECKS
485 is_valid_ipv4_pkt(struct ipv4_hdr *pkt, uint32_t link_len)
487 /* From http://www.rfc-editor.org/rfc/rfc1812.txt section 5.2.2 */
489 * 1. The packet length reported by the Link Layer must be large
490 * enough to hold the minimum length legal IP datagram (20 bytes).
492 if (link_len < sizeof(struct ipv4_hdr))
495 /* 2. The IP checksum must be correct. */
496 /* this is checked in H/W */
499 * 3. The IP version number must be 4. If the version number is not 4
500 * then the packet may be another version of IP, such as IPng or
503 if (((pkt->version_ihl) >> 4) != 4)
506 * 4. The IP header length field must be large enough to hold the
507 * minimum length legal IP datagram (20 bytes = 5 words).
509 if ((pkt->version_ihl & 0xf) < 5)
513 * 5. The IP total length field must be large enough to hold the IP
514 * datagram header, whose length is specified in the IP header length
517 if (rte_cpu_to_be_16(pkt->total_length) < sizeof(struct ipv4_hdr))
524 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
526 print_ipv4_key(struct ipv4_5tuple key)
528 printf("IP dst = %08x, IP src = %08x, port dst = %d, port src = %d, proto = %d\n",
529 (unsigned)key.ip_dst, (unsigned)key.ip_src, key.port_dst, key.port_src, key.proto);
532 print_ipv6_key(struct ipv6_5tuple key)
534 printf( "IP dst = " IPv6_BYTES_FMT ", IP src = " IPv6_BYTES_FMT ", "
535 "port dst = %d, port src = %d, proto = %d\n",
536 IPv6_BYTES(key.ip_dst), IPv6_BYTES(key.ip_src),
537 key.port_dst, key.port_src, key.proto);
540 static inline uint8_t
541 get_ipv4_dst_port(struct ipv4_hdr *ipv4_hdr, uint8_t portid, lookup_struct_t * ipv4_l3fwd_lookup_struct)
543 struct ipv4_5tuple key;
548 key.ip_dst = rte_be_to_cpu_32(ipv4_hdr->dst_addr);
549 key.ip_src = rte_be_to_cpu_32(ipv4_hdr->src_addr);
550 key.proto = ipv4_hdr->next_proto_id;
552 switch (ipv4_hdr->next_proto_id) {
554 tcp = (struct tcp_hdr *)((unsigned char *) ipv4_hdr +
555 sizeof(struct ipv4_hdr));
556 key.port_dst = rte_be_to_cpu_16(tcp->dst_port);
557 key.port_src = rte_be_to_cpu_16(tcp->src_port);
561 udp = (struct udp_hdr *)((unsigned char *) ipv4_hdr +
562 sizeof(struct ipv4_hdr));
563 key.port_dst = rte_be_to_cpu_16(udp->dst_port);
564 key.port_src = rte_be_to_cpu_16(udp->src_port);
573 /* Find destination port */
574 ret = rte_hash_lookup(ipv4_l3fwd_lookup_struct, (const void *)&key);
575 return (uint8_t)((ret < 0)? portid : ipv4_l3fwd_out_if[ret]);
578 static inline uint8_t
579 get_ipv6_dst_port(struct ipv6_hdr *ipv6_hdr, uint8_t portid, lookup_struct_t * ipv6_l3fwd_lookup_struct)
581 struct ipv6_5tuple key;
586 memcpy(key.ip_dst, ipv6_hdr->dst_addr, IPV6_ADDR_LEN);
587 memcpy(key.ip_src, ipv6_hdr->src_addr, IPV6_ADDR_LEN);
589 key.proto = ipv6_hdr->proto;
591 switch (ipv6_hdr->proto) {
593 tcp = (struct tcp_hdr *)((unsigned char *) ipv6_hdr +
594 sizeof(struct ipv6_hdr));
595 key.port_dst = rte_be_to_cpu_16(tcp->dst_port);
596 key.port_src = rte_be_to_cpu_16(tcp->src_port);
600 udp = (struct udp_hdr *)((unsigned char *) ipv6_hdr +
601 sizeof(struct ipv6_hdr));
602 key.port_dst = rte_be_to_cpu_16(udp->dst_port);
603 key.port_src = rte_be_to_cpu_16(udp->src_port);
612 /* Find destination port */
613 ret = rte_hash_lookup(ipv6_l3fwd_lookup_struct, (const void *)&key);
614 return (uint8_t)((ret < 0)? portid : ipv6_l3fwd_out_if[ret]);
618 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
619 static inline uint8_t
620 get_ipv4_dst_port(struct ipv4_hdr *ipv4_hdr, uint8_t portid, lookup_struct_t * ipv4_l3fwd_lookup_struct)
624 return (uint8_t) ((rte_lpm_lookup(ipv4_l3fwd_lookup_struct,
625 rte_be_to_cpu_32(ipv4_hdr->dst_addr), &next_hop) == 0)?
629 static inline uint8_t
630 get_ipv6_dst_port(struct ipv6_hdr *ipv6_hdr, uint8_t portid, lookup6_struct_t * ipv6_l3fwd_lookup_struct)
634 return (uint8_t) ((rte_lpm6_lookup(ipv6_l3fwd_lookup_struct,
635 ipv6_hdr->dst_addr, &next_hop) == 0)?
641 l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid, uint32_t queue,
642 struct lcore_conf *qconf, uint64_t tms)
644 struct ether_hdr *eth_hdr;
645 struct ipv4_hdr *ipv4_hdr;
649 eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
651 if (m->ol_flags & PKT_RX_IPV4_HDR) {
652 /* Handle IPv4 headers.*/
653 ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
655 #ifdef DO_RFC_1812_CHECKS
656 /* Check to make sure the packet is valid (RFC1812) */
657 if (is_valid_ipv4_pkt(ipv4_hdr, m->pkt.pkt_len) < 0) {
662 /* Update time to live and header checksum */
663 --(ipv4_hdr->time_to_live);
664 ++(ipv4_hdr->hdr_checksum);
667 /* if it is a fragmented packet, then try to reassemble. */
668 if (rte_ipv4_frag_pkt_is_fragmented(ipv4_hdr)) {
671 struct rte_ip_frag_tbl *tbl;
672 struct rte_ip_frag_death_row *dr;
674 tbl = qconf->frag_tbl[queue];
675 dr = &qconf->death_row;
677 /* prepare mbuf: setup l2_len/l3_len. */
678 m->pkt.vlan_macip.f.l2_len = sizeof(*eth_hdr);
679 m->pkt.vlan_macip.f.l3_len = sizeof(*ipv4_hdr);
681 /* process this fragment. */
682 if ((mo = rte_ipv4_frag_reassemble_packet(tbl, dr, m, tms,
684 /* no packet to send out. */
687 /* we have our packet reassembled. */
690 eth_hdr = rte_pktmbuf_mtod(m,
692 ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
696 dst_port = get_ipv4_dst_port(ipv4_hdr, portid,
697 qconf->ipv4_lookup_struct);
698 if (dst_port >= MAX_PORTS ||
699 (enabled_port_mask & 1 << dst_port) == 0)
702 /* 02:00:00:00:00:xx */
703 d_addr_bytes = ð_hdr->d_addr.addr_bytes[0];
704 *((uint64_t *)d_addr_bytes) = 0x000000000002 + ((uint64_t)dst_port << 40);
707 ether_addr_copy(&ports_eth_addr[dst_port], ð_hdr->s_addr);
709 send_single_packet(m, dst_port);
712 /* Handle IPv6 headers.*/
713 struct ipv6_hdr *ipv6_hdr;
715 ipv6_hdr = (struct ipv6_hdr *)(rte_pktmbuf_mtod(m, unsigned char *) +
716 sizeof(struct ether_hdr));
718 dst_port = get_ipv6_dst_port(ipv6_hdr, portid, qconf->ipv6_lookup_struct);
720 if (dst_port >= MAX_PORTS || (enabled_port_mask & 1 << dst_port) == 0)
723 /* 02:00:00:00:00:xx */
724 d_addr_bytes = ð_hdr->d_addr.addr_bytes[0];
725 *((uint64_t *)d_addr_bytes) = 0x000000000002 + ((uint64_t)dst_port << 40);
728 ether_addr_copy(&ports_eth_addr[dst_port], ð_hdr->s_addr);
730 send_single_packet(m, dst_port);
735 /* main processing loop */
737 main_loop(__attribute__((unused)) void *dummy)
739 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
741 uint64_t diff_tsc, cur_tsc, prev_tsc;
743 uint8_t portid, queueid;
744 struct lcore_conf *qconf;
745 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
749 lcore_id = rte_lcore_id();
750 qconf = &lcore_conf[lcore_id];
752 if (qconf->n_rx_queue == 0) {
753 RTE_LOG(INFO, L3FWD, "lcore %u has nothing to do\n", lcore_id);
757 RTE_LOG(INFO, L3FWD, "entering main loop on lcore %u\n", lcore_id);
759 for (i = 0; i < qconf->n_rx_queue; i++) {
761 portid = qconf->rx_queue_list[i].port_id;
762 queueid = qconf->rx_queue_list[i].queue_id;
763 RTE_LOG(INFO, L3FWD, " -- lcoreid=%u portid=%hhu rxqueueid=%hhu\n", lcore_id,
769 cur_tsc = rte_rdtsc();
772 * TX burst queue drain
774 diff_tsc = cur_tsc - prev_tsc;
775 if (unlikely(diff_tsc > drain_tsc)) {
778 * This could be optimized (use queueid instead of
779 * portid), but it is not called so often
781 for (portid = 0; portid < MAX_PORTS; portid++) {
782 if ((enabled_port_mask & (1 << portid)) != 0)
783 send_burst(qconf, 1, portid);
790 * Read packet from RX queues
792 for (i = 0; i < qconf->n_rx_queue; ++i) {
794 portid = qconf->rx_queue_list[i].port_id;
795 queueid = qconf->rx_queue_list[i].queue_id;
797 nb_rx = rte_eth_rx_burst(portid, queueid, pkts_burst,
800 /* Prefetch first packets */
801 for (j = 0; j < PREFETCH_OFFSET && j < nb_rx; j++) {
802 rte_prefetch0(rte_pktmbuf_mtod(
803 pkts_burst[j], void *));
806 /* Prefetch and forward already prefetched packets */
807 for (j = 0; j < (nb_rx - PREFETCH_OFFSET); j++) {
808 rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[
809 j + PREFETCH_OFFSET], void *));
810 l3fwd_simple_forward(pkts_burst[j], portid,
814 /* Forward remaining prefetched packets */
815 for (; j < nb_rx; j++) {
816 l3fwd_simple_forward(pkts_burst[j], portid,
820 rte_ip_frag_free_death_row(&qconf->death_row,
827 check_lcore_params(void)
829 uint8_t queue, lcore;
833 for (i = 0; i < nb_lcore_params; ++i) {
834 queue = lcore_params[i].queue_id;
835 if (queue >= MAX_RX_QUEUE_PER_PORT) {
836 printf("invalid queue number: %hhu\n", queue);
839 lcore = lcore_params[i].lcore_id;
840 if (!rte_lcore_is_enabled(lcore)) {
841 printf("error: lcore %hhu is not enabled in lcore mask\n", lcore);
844 if ((socketid = rte_lcore_to_socket_id(lcore) != 0) &&
846 printf("warning: lcore %hhu is on socket %d with numa off \n",
854 check_port_config(const unsigned nb_ports)
859 for (i = 0; i < nb_lcore_params; ++i) {
860 portid = lcore_params[i].port_id;
861 if ((enabled_port_mask & (1 << portid)) == 0) {
862 printf("port %u is not enabled in port mask\n", portid);
865 if (portid >= nb_ports) {
866 printf("port %u is not present on the board\n", portid);
874 get_port_n_rx_queues(const uint8_t port)
879 for (i = 0; i < nb_lcore_params; ++i) {
880 if (lcore_params[i].port_id == port && lcore_params[i].queue_id > queue)
881 queue = lcore_params[i].queue_id;
883 return (uint8_t)(++queue);
887 init_lcore_rx_queues(void)
889 uint16_t i, nb_rx_queue;
892 for (i = 0; i < nb_lcore_params; ++i) {
893 lcore = lcore_params[i].lcore_id;
894 nb_rx_queue = lcore_conf[lcore].n_rx_queue;
895 if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
896 printf("error: too many queues (%u) for lcore: %u\n",
897 (unsigned)nb_rx_queue + 1, (unsigned)lcore);
900 lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id =
901 lcore_params[i].port_id;
902 lcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id =
903 lcore_params[i].queue_id;
904 lcore_conf[lcore].n_rx_queue++;
912 print_usage(const char *prgname)
914 printf ("%s [EAL options] -- -p PORTMASK -P"
915 " [--config (port,queue,lcore)[,(port,queue,lcore]]"
916 " [--enable-jumbo [--max-pkt-len PKTLEN]]"
917 " [--maxflows=<flows>] [--flowttl=<ttl>[(s|ms)]]\n"
918 " -p PORTMASK: hexadecimal bitmask of ports to configure\n"
919 " -P : enable promiscuous mode\n"
920 " --config (port,queue,lcore): rx queues configuration\n"
921 " --no-numa: optional, disable numa awareness\n"
922 " --enable-jumbo: enable jumbo frame"
923 " which max packet len is PKTLEN in decimal (64-9600)\n"
924 " --maxflows=<flows>: optional, maximum number of flows "
926 " --flowttl=<ttl>[(s|ms)]: optional, maximum TTL for each "
932 parse_flow_num(const char *str, uint32_t min, uint32_t max, uint32_t *val)
937 /* parse decimal string */
939 v = strtoul(str, &end, 10);
940 if (errno != 0 || *end != '\0')
943 if (v < min || v > max)
951 parse_flow_ttl(const char *str, uint32_t min, uint32_t max, uint32_t *val)
956 static const char frmt_sec[] = "s";
957 static const char frmt_msec[] = "ms";
959 /* parse decimal string */
961 v = strtoul(str, &end, 10);
966 if (strncmp(frmt_sec, end, sizeof(frmt_sec)) == 0)
968 else if (strncmp(frmt_msec, end, sizeof (frmt_msec)) != 0)
972 if (v < min || v > max)
980 static int parse_max_pkt_len(const char *pktlen)
985 /* parse decimal string */
986 len = strtoul(pktlen, &end, 10);
987 if ((pktlen[0] == '\0') || (end == NULL) || (*end != '\0'))
997 parse_portmask(const char *portmask)
1002 /* parse hexadecimal string */
1003 pm = strtoul(portmask, &end, 16);
1004 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
1014 parse_config(const char *q_arg)
1017 const char *p, *p0 = q_arg;
1025 unsigned long int_fld[_NUM_FLD];
1026 char *str_fld[_NUM_FLD];
1030 nb_lcore_params = 0;
1032 while ((p = strchr(p0,'(')) != NULL) {
1034 if((p0 = strchr(p,')')) == NULL)
1038 if(size >= sizeof(s))
1041 rte_snprintf(s, sizeof(s), "%.*s", size, p);
1042 if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') != _NUM_FLD)
1044 for (i = 0; i < _NUM_FLD; i++){
1046 int_fld[i] = strtoul(str_fld[i], &end, 0);
1047 if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
1050 if (nb_lcore_params >= MAX_LCORE_PARAMS) {
1051 printf("exceeded max number of lcore params: %hu\n",
1055 lcore_params_array[nb_lcore_params].port_id = (uint8_t)int_fld[FLD_PORT];
1056 lcore_params_array[nb_lcore_params].queue_id = (uint8_t)int_fld[FLD_QUEUE];
1057 lcore_params_array[nb_lcore_params].lcore_id = (uint8_t)int_fld[FLD_LCORE];
1060 lcore_params = lcore_params_array;
1064 /* Parse the argument given in the command line of the application */
1066 parse_args(int argc, char **argv)
1071 char *prgname = argv[0];
1072 static struct option lgopts[] = {
1073 {"config", 1, 0, 0},
1074 {"no-numa", 0, 0, 0},
1075 {"enable-jumbo", 0, 0, 0},
1076 {"maxflows", 1, 0, 0},
1077 {"flowttl", 1, 0, 0},
1083 while ((opt = getopt_long(argc, argvopt, "p:P",
1084 lgopts, &option_index)) != EOF) {
1089 enabled_port_mask = parse_portmask(optarg);
1090 if (enabled_port_mask == 0) {
1091 printf("invalid portmask\n");
1092 print_usage(prgname);
1097 printf("Promiscuous mode selected\n");
1103 if (!strncmp(lgopts[option_index].name, "config", 6)) {
1104 ret = parse_config(optarg);
1106 printf("invalid config\n");
1107 print_usage(prgname);
1112 if (!strncmp(lgopts[option_index].name, "no-numa", 7)) {
1113 printf("numa is disabled \n");
1117 if (!strncmp(lgopts[option_index].name,
1119 if ((ret = parse_flow_num(optarg, MIN_FLOW_NUM,
1121 &max_flow_num)) != 0) {
1122 printf("invalid value: \"%s\" for "
1125 lgopts[option_index].name);
1126 print_usage(prgname);
1131 if (!strncmp(lgopts[option_index].name, "flowttl", 7)) {
1132 if ((ret = parse_flow_ttl(optarg, MIN_FLOW_TTL,
1134 &max_flow_ttl)) != 0) {
1135 printf("invalid value: \"%s\" for "
1138 lgopts[option_index].name);
1139 print_usage(prgname);
1144 if (!strncmp(lgopts[option_index].name, "enable-jumbo", 12)) {
1145 struct option lenopts = {"max-pkt-len", required_argument, 0, 0};
1147 printf("jumbo frame is enabled \n");
1148 port_conf.rxmode.jumbo_frame = 1;
1150 /* if no max-pkt-len set, use the default value ETHER_MAX_LEN */
1151 if (0 == getopt_long(argc, argvopt, "", &lenopts, &option_index)) {
1152 ret = parse_max_pkt_len(optarg);
1153 if ((ret < 64) || (ret > MAX_JUMBO_PKT_LEN)){
1154 printf("invalid packet length\n");
1155 print_usage(prgname);
1158 port_conf.rxmode.max_rx_pkt_len = ret;
1160 printf("set jumbo frame max packet length to %u\n",
1161 (unsigned int)port_conf.rxmode.max_rx_pkt_len);
1167 print_usage(prgname);
1173 argv[optind-1] = prgname;
1176 optind = 0; /* reset getopt lib */
1181 print_ethaddr(const char *name, const struct ether_addr *eth_addr)
1183 printf ("%s%02X:%02X:%02X:%02X:%02X:%02X", name,
1184 eth_addr->addr_bytes[0],
1185 eth_addr->addr_bytes[1],
1186 eth_addr->addr_bytes[2],
1187 eth_addr->addr_bytes[3],
1188 eth_addr->addr_bytes[4],
1189 eth_addr->addr_bytes[5]);
1192 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
1194 setup_hash(int socketid)
1196 struct rte_hash_parameters ipv4_l3fwd_hash_params = {
1198 .entries = L3FWD_HASH_ENTRIES,
1199 .bucket_entries = 4,
1200 .key_len = sizeof(struct ipv4_5tuple),
1201 .hash_func = DEFAULT_HASH_FUNC,
1202 .hash_func_init_val = 0,
1205 struct rte_hash_parameters ipv6_l3fwd_hash_params = {
1207 .entries = L3FWD_HASH_ENTRIES,
1208 .bucket_entries = 4,
1209 .key_len = sizeof(struct ipv6_5tuple),
1210 .hash_func = DEFAULT_HASH_FUNC,
1211 .hash_func_init_val = 0,
1218 /* create ipv4 hash */
1219 rte_snprintf(s, sizeof(s), "ipv4_l3fwd_hash_%d", socketid);
1220 ipv4_l3fwd_hash_params.name = s;
1221 ipv4_l3fwd_hash_params.socket_id = socketid;
1222 ipv4_l3fwd_lookup_struct[socketid] = rte_hash_create(&ipv4_l3fwd_hash_params);
1223 if (ipv4_l3fwd_lookup_struct[socketid] == NULL)
1224 rte_exit(EXIT_FAILURE, "Unable to create the l3fwd hash on "
1225 "socket %d\n", socketid);
1227 /* create ipv6 hash */
1228 rte_snprintf(s, sizeof(s), "ipv6_l3fwd_hash_%d", socketid);
1229 ipv6_l3fwd_hash_params.name = s;
1230 ipv6_l3fwd_hash_params.socket_id = socketid;
1231 ipv6_l3fwd_lookup_struct[socketid] = rte_hash_create(&ipv6_l3fwd_hash_params);
1232 if (ipv6_l3fwd_lookup_struct[socketid] == NULL)
1233 rte_exit(EXIT_FAILURE, "Unable to create the l3fwd hash on "
1234 "socket %d\n", socketid);
1237 /* populate the ipv4 hash */
1238 for (i = 0; i < IPV4_L3FWD_NUM_ROUTES; i++) {
1239 ret = rte_hash_add_key (ipv4_l3fwd_lookup_struct[socketid],
1240 (void *) &ipv4_l3fwd_route_array[i].key);
1242 rte_exit(EXIT_FAILURE, "Unable to add entry %u to the"
1243 "l3fwd hash on socket %d\n", i, socketid);
1245 ipv4_l3fwd_out_if[ret] = ipv4_l3fwd_route_array[i].if_out;
1246 printf("Hash: Adding key\n");
1247 print_ipv4_key(ipv4_l3fwd_route_array[i].key);
1250 /* populate the ipv6 hash */
1251 for (i = 0; i < IPV6_L3FWD_NUM_ROUTES; i++) {
1252 ret = rte_hash_add_key (ipv6_l3fwd_lookup_struct[socketid],
1253 (void *) &ipv6_l3fwd_route_array[i].key);
1255 rte_exit(EXIT_FAILURE, "Unable to add entry %u to the"
1256 "l3fwd hash on socket %d\n", i, socketid);
1258 ipv6_l3fwd_out_if[ret] = ipv6_l3fwd_route_array[i].if_out;
1259 printf("Hash: Adding key\n");
1260 print_ipv6_key(ipv6_l3fwd_route_array[i].key);
1265 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
1267 setup_lpm(int socketid)
1269 struct rte_lpm6_config config;
1274 /* create the LPM table */
1275 rte_snprintf(s, sizeof(s), "IPV4_L3FWD_LPM_%d", socketid);
1276 ipv4_l3fwd_lookup_struct[socketid] = rte_lpm_create(s, socketid,
1277 IPV4_L3FWD_LPM_MAX_RULES, 0);
1278 if (ipv4_l3fwd_lookup_struct[socketid] == NULL)
1279 rte_exit(EXIT_FAILURE, "Unable to create the l3fwd LPM table"
1280 " on socket %d\n", socketid);
1282 /* populate the LPM table */
1283 for (i = 0; i < IPV4_L3FWD_NUM_ROUTES; i++) {
1284 ret = rte_lpm_add(ipv4_l3fwd_lookup_struct[socketid],
1285 ipv4_l3fwd_route_array[i].ip,
1286 ipv4_l3fwd_route_array[i].depth,
1287 ipv4_l3fwd_route_array[i].if_out);
1290 rte_exit(EXIT_FAILURE, "Unable to add entry %u to the "
1291 "l3fwd LPM table on socket %d\n",
1295 printf("LPM: Adding route 0x%08x / %d (%d)\n",
1296 (unsigned)ipv4_l3fwd_route_array[i].ip,
1297 ipv4_l3fwd_route_array[i].depth,
1298 ipv4_l3fwd_route_array[i].if_out);
1301 /* create the LPM6 table */
1302 rte_snprintf(s, sizeof(s), "IPV6_L3FWD_LPM_%d", socketid);
1304 config.max_rules = IPV6_L3FWD_LPM_MAX_RULES;
1305 config.number_tbl8s = IPV6_L3FWD_LPM_NUMBER_TBL8S;
1307 ipv6_l3fwd_lookup_struct[socketid] = rte_lpm6_create(s, socketid,
1309 if (ipv6_l3fwd_lookup_struct[socketid] == NULL)
1310 rte_exit(EXIT_FAILURE, "Unable to create the l3fwd LPM table"
1311 " on socket %d\n", socketid);
1313 /* populate the LPM table */
1314 for (i = 0; i < IPV6_L3FWD_NUM_ROUTES; i++) {
1315 ret = rte_lpm6_add(ipv6_l3fwd_lookup_struct[socketid],
1316 ipv6_l3fwd_route_array[i].ip,
1317 ipv6_l3fwd_route_array[i].depth,
1318 ipv6_l3fwd_route_array[i].if_out);
1321 rte_exit(EXIT_FAILURE, "Unable to add entry %u to the "
1322 "l3fwd LPM table on socket %d\n",
1326 printf("LPM: Adding route %s / %d (%d)\n",
1328 ipv6_l3fwd_route_array[i].depth,
1329 ipv6_l3fwd_route_array[i].if_out);
1337 struct lcore_conf *qconf;
1341 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1342 if (rte_lcore_is_enabled(lcore_id) == 0)
1346 socketid = rte_lcore_to_socket_id(lcore_id);
1350 if (socketid >= NB_SOCKETS) {
1351 rte_exit(EXIT_FAILURE,
1352 "Socket %d of lcore %u is out of range %d\n",
1353 socketid, lcore_id, NB_SOCKETS);
1356 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
1357 setup_lpm(socketid);
1359 setup_hash(socketid);
1361 qconf = &lcore_conf[lcore_id];
1362 qconf->ipv4_lookup_struct = ipv4_l3fwd_lookup_struct[socketid];
1363 qconf->ipv6_lookup_struct = ipv6_l3fwd_lookup_struct[socketid];
1368 /* Check the link status of all ports in up to 9s, and print them finally */
1370 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
1372 #define CHECK_INTERVAL 100 /* 100ms */
1373 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1374 uint8_t portid, count, all_ports_up, print_flag = 0;
1375 struct rte_eth_link link;
1377 printf("\nChecking link status");
1379 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1381 for (portid = 0; portid < port_num; portid++) {
1382 if ((port_mask & (1 << portid)) == 0)
1384 memset(&link, 0, sizeof(link));
1385 rte_eth_link_get_nowait(portid, &link);
1386 /* print link status if flag set */
1387 if (print_flag == 1) {
1388 if (link.link_status)
1389 printf("Port %d Link Up - speed %u "
1390 "Mbps - %s\n", (uint8_t)portid,
1391 (unsigned)link.link_speed,
1392 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1393 ("full-duplex") : ("half-duplex\n"));
1395 printf("Port %d Link Down\n",
1399 /* clear all_ports_up flag if any link down */
1400 if (link.link_status == 0) {
1405 /* after finally printing all link status, get out */
1406 if (print_flag == 1)
1409 if (all_ports_up == 0) {
1412 rte_delay_ms(CHECK_INTERVAL);
1415 /* set the print_flag if all ports up or timeout */
1416 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1423 setup_port_tbl(struct lcore_conf *qconf, uint32_t lcore, int socket,
1426 struct mbuf_table *mtb;
1430 n = RTE_MAX(max_flow_num, 2UL * MAX_PKT_BURST);
1431 sz = sizeof (*mtb) + sizeof (mtb->m_table[0]) * n;
1433 if ((mtb = rte_zmalloc_socket(__func__, sz, CACHE_LINE_SIZE,
1435 rte_exit(EXIT_FAILURE, "%s() for lcore: %u, port: %u "
1436 "failed to allocate %zu bytes\n",
1437 __func__, lcore, port, sz);
1440 qconf->tx_mbufs[port] = mtb;
1444 setup_queue_tbl(struct lcore_conf *qconf, uint32_t lcore, int socket,
1448 uint64_t frag_cycles;
1449 char buf[RTE_MEMPOOL_NAMESIZE];
1451 frag_cycles = (rte_get_tsc_hz() + MS_PER_S - 1) / MS_PER_S *
1454 if ((qconf->frag_tbl[queue] = rte_ip_frag_table_create(max_flow_num,
1455 IPV4_FRAG_TBL_BUCKET_ENTRIES, max_flow_num, frag_cycles,
1457 rte_exit(EXIT_FAILURE, "ipv4_frag_tbl_create(%u) on "
1458 "lcore: %u for queue: %u failed\n",
1459 max_flow_num, lcore, queue);
1462 * At any given moment up to <max_flow_num * (MAX_FRAG_NUM - 1)>
1463 * mbufs could be stored int the fragment table.
1464 * Plus, each TX queue can hold up to <max_flow_num> packets.
1467 nb_mbuf = 2 * RTE_MAX(max_flow_num, 2UL * MAX_PKT_BURST) *
1468 RTE_LIBRTE_IP_FRAG_MAX_FRAG;
1469 nb_mbuf *= (port_conf.rxmode.max_rx_pkt_len + BUF_SIZE - 1) / BUF_SIZE;
1470 nb_mbuf += RTE_TEST_RX_DESC_DEFAULT + RTE_TEST_TX_DESC_DEFAULT;
1472 nb_mbuf = RTE_MAX(nb_mbuf, (uint32_t)DEF_MBUF_NUM);
1474 rte_snprintf(buf, sizeof(buf), "mbuf_pool_%u_%u", lcore, queue);
1476 if ((qconf->pool[queue] = rte_mempool_create(buf, nb_mbuf, MBUF_SIZE, 0,
1477 sizeof(struct rte_pktmbuf_pool_private),
1478 rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL,
1479 socket, MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)) == NULL)
1480 rte_exit(EXIT_FAILURE, "mempool_create(%s) failed", buf);
1484 queue_dump_stat(void)
1487 const struct lcore_conf *qconf;
1489 for (lcore = 0; lcore < RTE_MAX_LCORE; lcore++) {
1490 if (rte_lcore_is_enabled(lcore) == 0)
1493 qconf = lcore_conf + lcore;
1494 for (i = 0; i < qconf->n_rx_queue; i++) {
1496 fprintf(stdout, " -- lcoreid=%u portid=%hhu "
1497 "rxqueueid=%hhu frag tbl stat:\n",
1498 lcore, qconf->rx_queue_list[i].port_id,
1499 qconf->rx_queue_list[i].queue_id);
1500 rte_ip_frag_table_statistics_dump(stdout, qconf->frag_tbl[i]);
1501 fprintf(stdout, "TX bursts:\t%" PRIu64 "\n"
1502 "TX packets _queued:\t%" PRIu64 "\n"
1503 "TX packets dropped:\t%" PRIu64 "\n"
1504 "TX packets send:\t%" PRIu64 "\n",
1505 qconf->tx_stat.call,
1506 qconf->tx_stat.queue,
1507 qconf->tx_stat.drop,
1508 qconf->tx_stat.send);
1514 signal_handler(int signum)
1517 if (signum != SIGUSR1)
1518 rte_exit(0, "received signal: %d, exiting\n", signum);
1522 MAIN(int argc, char **argv)
1524 struct lcore_conf *qconf;
1529 uint32_t n_tx_queue, nb_lcores;
1530 uint8_t portid, nb_rx_queue, queue, socketid;
1533 ret = rte_eal_init(argc, argv);
1535 rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
1539 /* parse application arguments (after the EAL ones) */
1540 ret = parse_args(argc, argv);
1542 rte_exit(EXIT_FAILURE, "Invalid L3FWD parameters\n");
1544 if (check_lcore_params() < 0)
1545 rte_exit(EXIT_FAILURE, "check_lcore_params failed\n");
1547 ret = init_lcore_rx_queues();
1549 rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n");
1552 if (rte_eal_pci_probe() < 0)
1553 rte_exit(EXIT_FAILURE, "Cannot probe PCI\n");
1555 nb_ports = rte_eth_dev_count();
1556 if (nb_ports > MAX_PORTS)
1557 nb_ports = MAX_PORTS;
1559 if (check_port_config(nb_ports) < 0)
1560 rte_exit(EXIT_FAILURE, "check_port_config failed\n");
1562 nb_lcores = rte_lcore_count();
1564 /* initialize all ports */
1565 for (portid = 0; portid < nb_ports; portid++) {
1566 /* skip ports that are not enabled */
1567 if ((enabled_port_mask & (1 << portid)) == 0) {
1568 printf("\nSkipping disabled port %d\n", portid);
1573 printf("Initializing port %d ... ", portid );
1576 nb_rx_queue = get_port_n_rx_queues(portid);
1577 n_tx_queue = nb_lcores;
1578 if (n_tx_queue > MAX_TX_QUEUE_PER_PORT)
1579 n_tx_queue = MAX_TX_QUEUE_PER_PORT;
1580 printf("Creating queues: nb_rxq=%d nb_txq=%u... ",
1581 nb_rx_queue, (unsigned)n_tx_queue );
1582 ret = rte_eth_dev_configure(portid, nb_rx_queue,
1583 (uint16_t)n_tx_queue, &port_conf);
1585 rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%d\n",
1588 rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
1589 print_ethaddr(" Address:", &ports_eth_addr[portid]);
1595 rte_exit(EXIT_FAILURE, "init_mem failed\n");
1597 /* init one TX queue per couple (lcore,port) */
1599 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1600 if (rte_lcore_is_enabled(lcore_id) == 0)
1604 socketid = (uint8_t)rte_lcore_to_socket_id(lcore_id);
1608 printf("txq=%u,%d,%d ", lcore_id, queueid, socketid);
1610 ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
1611 socketid, &tx_conf);
1613 rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, "
1614 "port=%d\n", ret, portid);
1616 qconf = &lcore_conf[lcore_id];
1617 qconf->tx_queue_id[portid] = queueid;
1618 setup_port_tbl(qconf, lcore_id, socketid, portid);
1624 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1625 if (rte_lcore_is_enabled(lcore_id) == 0)
1627 qconf = &lcore_conf[lcore_id];
1628 printf("\nInitializing rx queues on lcore %u ... ", lcore_id );
1630 /* init RX queues */
1631 for(queue = 0; queue < qconf->n_rx_queue; ++queue) {
1632 portid = qconf->rx_queue_list[queue].port_id;
1633 queueid = qconf->rx_queue_list[queue].queue_id;
1636 socketid = (uint8_t)rte_lcore_to_socket_id(lcore_id);
1640 printf("rxq=%d,%d,%d ", portid, queueid, socketid);
1643 setup_queue_tbl(qconf, lcore_id, socketid, queue);
1645 ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd,
1646 socketid, &rx_conf, qconf->pool[queue]);
1648 rte_exit(EXIT_FAILURE,
1649 "rte_eth_rx_queue_setup: err=%d,"
1650 "port=%d\n", ret, portid);
1657 for (portid = 0; portid < nb_ports; portid++) {
1658 if ((enabled_port_mask & (1 << portid)) == 0) {
1662 ret = rte_eth_dev_start(portid);
1664 rte_exit(EXIT_FAILURE, "rte_eth_dev_start: err=%d, port=%d\n",
1668 * If enabled, put device in promiscuous mode.
1669 * This allows IO forwarding mode to forward packets
1670 * to itself through 2 cross-connected ports of the
1674 rte_eth_promiscuous_enable(portid);
1677 check_all_ports_link_status((uint8_t)nb_ports, enabled_port_mask);
1679 signal(SIGUSR1, signal_handler);
1680 signal(SIGTERM, signal_handler);
1681 signal(SIGINT, signal_handler);
1683 /* launch per-lcore init on every lcore */
1684 rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
1685 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
1686 if (rte_eal_wait_lcore(lcore_id) < 0)