4 * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 #include <sys/types.h>
41 #include <sys/queue.h>
47 #include <rte_common.h>
48 #include <rte_byteorder.h>
50 #include <rte_memory.h>
51 #include <rte_memcpy.h>
52 #include <rte_memzone.h>
53 #include <rte_tailq.h>
55 #include <rte_per_lcore.h>
56 #include <rte_launch.h>
57 #include <rte_atomic.h>
58 #include <rte_cycles.h>
59 #include <rte_prefetch.h>
60 #include <rte_lcore.h>
61 #include <rte_per_lcore.h>
62 #include <rte_branch_prediction.h>
63 #include <rte_interrupts.h>
65 #include <rte_random.h>
66 #include <rte_debug.h>
67 #include <rte_ether.h>
68 #include <rte_ethdev.h>
70 #include <rte_mempool.h>
72 #include <rte_malloc.h>
76 #include <rte_string_fns.h>
79 #include "ipv4_rsmbl.h"
81 #define APP_LOOKUP_EXACT_MATCH 0
82 #define APP_LOOKUP_LPM 1
83 #define DO_RFC_1812_CHECKS
85 #ifndef APP_LOOKUP_METHOD
86 #define APP_LOOKUP_METHOD APP_LOOKUP_LPM
89 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
91 #elif (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
95 #error "APP_LOOKUP_METHOD set to incorrect value"
99 #define IPv6_BYTES_FMT "%02x%02x:%02x%02x:%02x%02x:%02x%02x:"\
100 "%02x%02x:%02x%02x:%02x%02x:%02x%02x"
101 #define IPv6_BYTES(addr) \
102 addr[0], addr[1], addr[2], addr[3], \
103 addr[4], addr[5], addr[6], addr[7], \
104 addr[8], addr[9], addr[10], addr[11],\
105 addr[12], addr[13],addr[14], addr[15]
109 #define RTE_LOGTYPE_L3FWD RTE_LOGTYPE_USER1
111 #define MAX_PORTS RTE_MAX_ETHPORTS
113 #define MAX_JUMBO_PKT_LEN 9600
115 #define IPV6_ADDR_LEN 16
117 #define MEMPOOL_CACHE_SIZE 256
119 #define BUF_SIZE 2048
121 (BUF_SIZE + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
123 #define MAX_FLOW_NUM UINT16_MAX
124 #define MIN_FLOW_NUM 1
125 #define DEF_FLOW_NUM 0x1000
127 /* TTL numbers are in ms. */
128 #define MAX_FLOW_TTL (3600 * MS_PER_S)
129 #define MIN_FLOW_TTL 1
130 #define DEF_FLOW_TTL MS_PER_S
132 #define DEF_MBUF_NUM 0x400
134 /* Should be power of two. */
135 #define IPV4_FRAG_TBL_BUCKET_ENTRIES 2
137 static uint32_t max_flow_num = DEF_FLOW_NUM;
138 static uint32_t max_flow_ttl = DEF_FLOW_TTL;
141 * RX and TX Prefetch, Host, and Write-back threshold values should be
142 * carefully set for optimal performance. Consult the network
143 * controller's datasheet and supporting DPDK documentation for guidance
144 * on how these parameters should be set.
146 #define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
147 #define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
148 #define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */
151 * These default values are optimized for use with the Intel(R) 82599 10 GbE
152 * Controller and the DPDK ixgbe PMD. Consider using other values for other
153 * network controllers and/or network drivers.
155 #define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */
156 #define TX_HTHRESH 0 /**< Default values of TX host threshold reg. */
157 #define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
159 #define MAX_PKT_BURST 32
160 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
164 /* Configure how many packets ahead to prefetch, when reading packets */
165 #define PREFETCH_OFFSET 3
168 * Configurable number of RX/TX ring descriptors
170 #define RTE_TEST_RX_DESC_DEFAULT 128
171 #define RTE_TEST_TX_DESC_DEFAULT 512
172 static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
173 static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
175 /* ethernet addresses of ports */
176 static struct ether_addr ports_eth_addr[MAX_PORTS];
178 /* mask of enabled ports */
179 static uint32_t enabled_port_mask = 0;
180 static int promiscuous_on = 0; /**< Ports set in promiscuous mode off by default. */
181 static int numa_on = 1; /**< NUMA is enabled by default. */
185 struct rte_mbuf *m_table[MAX_PKT_BURST];
188 struct lcore_rx_queue {
191 } __rte_cache_aligned;
193 #define MAX_RX_QUEUE_PER_LCORE 16
194 #define MAX_TX_QUEUE_PER_PORT MAX_PORTS
195 #define MAX_RX_QUEUE_PER_PORT 128
197 #define MAX_LCORE_PARAMS 1024
198 struct lcore_params {
202 } __rte_cache_aligned;
204 static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
205 static struct lcore_params lcore_params_array_default[] = {
217 static struct lcore_params * lcore_params = lcore_params_array_default;
218 static uint16_t nb_lcore_params = sizeof(lcore_params_array_default) /
219 sizeof(lcore_params_array_default[0]);
221 static struct rte_eth_conf port_conf = {
223 .max_rx_pkt_len = ETHER_MAX_LEN,
225 .header_split = 0, /**< Header Split disabled */
226 .hw_ip_checksum = 1, /**< IP checksum offload enabled */
227 .hw_vlan_filter = 0, /**< VLAN filtering disabled */
228 .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
229 .hw_strip_crc = 0, /**< CRC stripped by hardware */
234 .rss_hf = ETH_RSS_IPV4 | ETH_RSS_IPV6,
238 .mq_mode = ETH_MQ_TX_NONE,
242 static const struct rte_eth_rxconf rx_conf = {
244 .pthresh = RX_PTHRESH,
245 .hthresh = RX_HTHRESH,
246 .wthresh = RX_WTHRESH,
248 .rx_free_thresh = 32,
251 static const struct rte_eth_txconf tx_conf = {
253 .pthresh = TX_PTHRESH,
254 .hthresh = TX_HTHRESH,
255 .wthresh = TX_WTHRESH,
257 .tx_free_thresh = 0, /* Use PMD default values */
258 .tx_rs_thresh = 0, /* Use PMD default values */
262 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
264 #ifdef RTE_MACHINE_CPUFLAG_SSE4_2
265 #include <rte_hash_crc.h>
266 #define DEFAULT_HASH_FUNC rte_hash_crc
268 #include <rte_jhash.h>
269 #define DEFAULT_HASH_FUNC rte_jhash
278 } __attribute__((__packed__));
281 uint8_t ip_dst[IPV6_ADDR_LEN];
282 uint8_t ip_src[IPV6_ADDR_LEN];
286 } __attribute__((__packed__));
288 struct ipv4_l3fwd_route {
289 struct ipv4_5tuple key;
293 struct ipv6_l3fwd_route {
294 struct ipv6_5tuple key;
298 static struct ipv4_l3fwd_route ipv4_l3fwd_route_array[] = {
299 {{IPv4(100,10,0,1), IPv4(200,10,0,1), 101, 11, IPPROTO_TCP}, 0},
300 {{IPv4(100,20,0,2), IPv4(200,20,0,2), 102, 12, IPPROTO_TCP}, 1},
301 {{IPv4(100,30,0,3), IPv4(200,30,0,3), 103, 13, IPPROTO_TCP}, 2},
302 {{IPv4(100,40,0,4), IPv4(200,40,0,4), 104, 14, IPPROTO_TCP}, 3},
305 static struct ipv6_l3fwd_route ipv6_l3fwd_route_array[] = {
308 {0xfe, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
309 0x02, 0x1b, 0x21, 0xff, 0xfe, 0x91, 0x38, 0x05},
310 {0xfe, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
311 0x02, 0x1e, 0x67, 0xff, 0xfe, 0x0d, 0xb6, 0x0a},
317 typedef struct rte_hash lookup_struct_t;
318 static lookup_struct_t *ipv4_l3fwd_lookup_struct[NB_SOCKETS];
319 static lookup_struct_t *ipv6_l3fwd_lookup_struct[NB_SOCKETS];
321 #define L3FWD_HASH_ENTRIES 1024
323 #define IPV4_L3FWD_NUM_ROUTES \
324 (sizeof(ipv4_l3fwd_route_array) / sizeof(ipv4_l3fwd_route_array[0]))
326 #define IPV6_L3FWD_NUM_ROUTES \
327 (sizeof(ipv6_l3fwd_route_array) / sizeof(ipv6_l3fwd_route_array[0]))
329 static uint8_t ipv4_l3fwd_out_if[L3FWD_HASH_ENTRIES] __rte_cache_aligned;
330 static uint8_t ipv6_l3fwd_out_if[L3FWD_HASH_ENTRIES] __rte_cache_aligned;
333 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
334 struct ipv4_l3fwd_route {
340 struct ipv6_l3fwd_route {
346 static struct ipv4_l3fwd_route ipv4_l3fwd_route_array[] = {
347 {IPv4(1,1,1,0), 24, 0},
348 {IPv4(2,1,1,0), 24, 1},
349 {IPv4(3,1,1,0), 24, 2},
350 {IPv4(4,1,1,0), 24, 3},
351 {IPv4(5,1,1,0), 24, 4},
352 {IPv4(6,1,1,0), 24, 5},
353 {IPv4(7,1,1,0), 24, 6},
354 {IPv4(8,1,1,0), 24, 7},
357 static struct ipv6_l3fwd_route ipv6_l3fwd_route_array[] = {
358 {{1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 0},
359 {{2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 1},
360 {{3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 2},
361 {{4,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 3},
362 {{5,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 4},
363 {{6,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 5},
364 {{7,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 6},
365 {{8,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 7},
368 #define IPV4_L3FWD_NUM_ROUTES \
369 (sizeof(ipv4_l3fwd_route_array) / sizeof(ipv4_l3fwd_route_array[0]))
370 #define IPV6_L3FWD_NUM_ROUTES \
371 (sizeof(ipv6_l3fwd_route_array) / sizeof(ipv6_l3fwd_route_array[0]))
373 #define IPV4_L3FWD_LPM_MAX_RULES 1024
374 #define IPV6_L3FWD_LPM_MAX_RULES 1024
375 #define IPV6_L3FWD_LPM_NUMBER_TBL8S (1 << 16)
377 typedef struct rte_lpm lookup_struct_t;
378 typedef struct rte_lpm6 lookup6_struct_t;
379 static lookup_struct_t *ipv4_l3fwd_lookup_struct[NB_SOCKETS];
380 static lookup6_struct_t *ipv6_l3fwd_lookup_struct[NB_SOCKETS];
385 struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
386 uint16_t tx_queue_id[MAX_PORTS];
387 struct mbuf_table tx_mbufs[MAX_PORTS];
388 lookup_struct_t * ipv4_lookup_struct;
389 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
390 lookup6_struct_t * ipv6_lookup_struct;
392 lookup_struct_t * ipv6_lookup_struct;
394 struct ipv4_frag_tbl *frag_tbl[MAX_RX_QUEUE_PER_LCORE];
395 struct rte_mempool *pool[MAX_RX_QUEUE_PER_LCORE];
396 } __rte_cache_aligned;
398 static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
400 /* Send burst of packets on an output interface */
402 send_burst(struct lcore_conf *qconf, uint16_t n, uint8_t port)
404 struct rte_mbuf **m_table;
408 queueid = qconf->tx_queue_id[port];
409 m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
411 ret = rte_eth_tx_burst(port, queueid, m_table, n);
412 if (unlikely(ret < n)) {
414 rte_pktmbuf_free(m_table[ret]);
421 /* Enqueue a single packet, and send burst if queue is filled */
423 send_single_packet(struct rte_mbuf *m, uint8_t port)
427 struct lcore_conf *qconf;
429 lcore_id = rte_lcore_id();
431 qconf = &lcore_conf[lcore_id];
432 len = qconf->tx_mbufs[port].len;
433 qconf->tx_mbufs[port].m_table[len] = m;
436 /* enough pkts to be sent */
437 if (unlikely(len == MAX_PKT_BURST)) {
438 send_burst(qconf, MAX_PKT_BURST, port);
442 qconf->tx_mbufs[port].len = len;
446 #ifdef DO_RFC_1812_CHECKS
448 is_valid_ipv4_pkt(struct ipv4_hdr *pkt, uint32_t link_len)
450 /* From http://www.rfc-editor.org/rfc/rfc1812.txt section 5.2.2 */
452 * 1. The packet length reported by the Link Layer must be large
453 * enough to hold the minimum length legal IP datagram (20 bytes).
455 if (link_len < sizeof(struct ipv4_hdr))
458 /* 2. The IP checksum must be correct. */
459 /* this is checked in H/W */
462 * 3. The IP version number must be 4. If the version number is not 4
463 * then the packet may be another version of IP, such as IPng or
466 if (((pkt->version_ihl) >> 4) != 4)
469 * 4. The IP header length field must be large enough to hold the
470 * minimum length legal IP datagram (20 bytes = 5 words).
472 if ((pkt->version_ihl & 0xf) < 5)
476 * 5. The IP total length field must be large enough to hold the IP
477 * datagram header, whose length is specified in the IP header length
480 if (rte_cpu_to_be_16(pkt->total_length) < sizeof(struct ipv4_hdr))
487 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
489 print_ipv4_key(struct ipv4_5tuple key)
491 printf("IP dst = %08x, IP src = %08x, port dst = %d, port src = %d, proto = %d\n",
492 (unsigned)key.ip_dst, (unsigned)key.ip_src, key.port_dst, key.port_src, key.proto);
495 print_ipv6_key(struct ipv6_5tuple key)
497 printf( "IP dst = " IPv6_BYTES_FMT ", IP src = " IPv6_BYTES_FMT ", "
498 "port dst = %d, port src = %d, proto = %d\n",
499 IPv6_BYTES(key.ip_dst), IPv6_BYTES(key.ip_src),
500 key.port_dst, key.port_src, key.proto);
503 static inline uint8_t
504 get_ipv4_dst_port(struct ipv4_hdr *ipv4_hdr, uint8_t portid, lookup_struct_t * ipv4_l3fwd_lookup_struct)
506 struct ipv4_5tuple key;
511 key.ip_dst = rte_be_to_cpu_32(ipv4_hdr->dst_addr);
512 key.ip_src = rte_be_to_cpu_32(ipv4_hdr->src_addr);
513 key.proto = ipv4_hdr->next_proto_id;
515 switch (ipv4_hdr->next_proto_id) {
517 tcp = (struct tcp_hdr *)((unsigned char *) ipv4_hdr +
518 sizeof(struct ipv4_hdr));
519 key.port_dst = rte_be_to_cpu_16(tcp->dst_port);
520 key.port_src = rte_be_to_cpu_16(tcp->src_port);
524 udp = (struct udp_hdr *)((unsigned char *) ipv4_hdr +
525 sizeof(struct ipv4_hdr));
526 key.port_dst = rte_be_to_cpu_16(udp->dst_port);
527 key.port_src = rte_be_to_cpu_16(udp->src_port);
536 /* Find destination port */
537 ret = rte_hash_lookup(ipv4_l3fwd_lookup_struct, (const void *)&key);
538 return (uint8_t)((ret < 0)? portid : ipv4_l3fwd_out_if[ret]);
541 static inline uint8_t
542 get_ipv6_dst_port(struct ipv6_hdr *ipv6_hdr, uint8_t portid, lookup_struct_t * ipv6_l3fwd_lookup_struct)
544 struct ipv6_5tuple key;
549 memcpy(key.ip_dst, ipv6_hdr->dst_addr, IPV6_ADDR_LEN);
550 memcpy(key.ip_src, ipv6_hdr->src_addr, IPV6_ADDR_LEN);
552 key.proto = ipv6_hdr->proto;
554 switch (ipv6_hdr->proto) {
556 tcp = (struct tcp_hdr *)((unsigned char *) ipv6_hdr +
557 sizeof(struct ipv6_hdr));
558 key.port_dst = rte_be_to_cpu_16(tcp->dst_port);
559 key.port_src = rte_be_to_cpu_16(tcp->src_port);
563 udp = (struct udp_hdr *)((unsigned char *) ipv6_hdr +
564 sizeof(struct ipv6_hdr));
565 key.port_dst = rte_be_to_cpu_16(udp->dst_port);
566 key.port_src = rte_be_to_cpu_16(udp->src_port);
575 /* Find destination port */
576 ret = rte_hash_lookup(ipv6_l3fwd_lookup_struct, (const void *)&key);
577 return (uint8_t)((ret < 0)? portid : ipv6_l3fwd_out_if[ret]);
581 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
582 static inline uint8_t
583 get_ipv4_dst_port(struct ipv4_hdr *ipv4_hdr, uint8_t portid, lookup_struct_t * ipv4_l3fwd_lookup_struct)
587 return (uint8_t) ((rte_lpm_lookup(ipv4_l3fwd_lookup_struct,
588 rte_be_to_cpu_32(ipv4_hdr->dst_addr), &next_hop) == 0)?
592 static inline uint8_t
593 get_ipv6_dst_port(struct ipv6_hdr *ipv6_hdr, uint8_t portid, lookup6_struct_t * ipv6_l3fwd_lookup_struct)
597 return (uint8_t) ((rte_lpm6_lookup(ipv6_l3fwd_lookup_struct,
598 ipv6_hdr->dst_addr, &next_hop) == 0)?
604 l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid, uint32_t queue,
605 struct lcore_conf *qconf, uint64_t tms)
607 struct ether_hdr *eth_hdr;
608 struct ipv4_hdr *ipv4_hdr;
611 uint16_t flag_offset, ip_flag, ip_ofs;
613 eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
615 if (m->ol_flags & PKT_RX_IPV4_HDR) {
616 /* Handle IPv4 headers.*/
617 ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
619 #ifdef DO_RFC_1812_CHECKS
620 /* Check to make sure the packet is valid (RFC1812) */
621 if (is_valid_ipv4_pkt(ipv4_hdr, m->pkt.pkt_len) < 0) {
626 /* Update time to live and header checksum */
627 --(ipv4_hdr->time_to_live);
628 ++(ipv4_hdr->hdr_checksum);
631 flag_offset = rte_be_to_cpu_16(ipv4_hdr->fragment_offset);
632 ip_ofs = (uint16_t)(flag_offset & IPV4_HDR_OFFSET_MASK);
633 ip_flag = (uint16_t)(flag_offset & IPV4_HDR_MF_FLAG);
635 /* if it is a fragmented packet, then try to reassemble. */
636 if (ip_flag != 0 || ip_ofs != 0) {
639 struct ipv4_frag_tbl *tbl;
641 tbl = qconf->frag_tbl[queue];
643 /* prepare mbuf: setup l2_len/l3_len. */
644 m->pkt.vlan_macip.f.l2_len = sizeof(*eth_hdr);
645 m->pkt.vlan_macip.f.l3_len = sizeof(*ipv4_hdr);
647 /* process this fragment. */
648 if ((mo = ipv4_frag_mbuf(tbl, m, tms, ipv4_hdr,
649 ip_ofs, ip_flag)) == NULL)
650 /* no packet to send out. */
653 /* we have our packet reassembled. */
656 eth_hdr = rte_pktmbuf_mtod(m,
658 ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
662 dst_port = get_ipv4_dst_port(ipv4_hdr, portid, qconf->ipv4_lookup_struct);
663 if (dst_port >= MAX_PORTS || (enabled_port_mask & 1 << dst_port) == 0)
666 /* 02:00:00:00:00:xx */
667 d_addr_bytes = ð_hdr->d_addr.addr_bytes[0];
668 *((uint64_t *)d_addr_bytes) = 0x000000000002 + ((uint64_t)dst_port << 40);
671 ether_addr_copy(&ports_eth_addr[dst_port], ð_hdr->s_addr);
673 send_single_packet(m, dst_port);
676 /* Handle IPv6 headers.*/
677 struct ipv6_hdr *ipv6_hdr;
679 ipv6_hdr = (struct ipv6_hdr *)(rte_pktmbuf_mtod(m, unsigned char *) +
680 sizeof(struct ether_hdr));
682 dst_port = get_ipv6_dst_port(ipv6_hdr, portid, qconf->ipv6_lookup_struct);
684 if (dst_port >= MAX_PORTS || (enabled_port_mask & 1 << dst_port) == 0)
687 /* 02:00:00:00:00:xx */
688 d_addr_bytes = ð_hdr->d_addr.addr_bytes[0];
689 *((uint64_t *)d_addr_bytes) = 0x000000000002 + ((uint64_t)dst_port << 40);
692 ether_addr_copy(&ports_eth_addr[dst_port], ð_hdr->s_addr);
694 send_single_packet(m, dst_port);
699 /* main processing loop */
701 main_loop(__attribute__((unused)) void *dummy)
703 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
705 uint64_t diff_tsc, cur_tsc, prev_tsc;
707 uint8_t portid, queueid;
708 struct lcore_conf *qconf;
709 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
713 lcore_id = rte_lcore_id();
714 qconf = &lcore_conf[lcore_id];
716 if (qconf->n_rx_queue == 0) {
717 RTE_LOG(INFO, L3FWD, "lcore %u has nothing to do\n", lcore_id);
721 RTE_LOG(INFO, L3FWD, "entering main loop on lcore %u\n", lcore_id);
723 for (i = 0; i < qconf->n_rx_queue; i++) {
725 portid = qconf->rx_queue_list[i].port_id;
726 queueid = qconf->rx_queue_list[i].queue_id;
727 RTE_LOG(INFO, L3FWD, " -- lcoreid=%u portid=%hhu rxqueueid=%hhu\n", lcore_id,
733 cur_tsc = rte_rdtsc();
736 * TX burst queue drain
738 diff_tsc = cur_tsc - prev_tsc;
739 if (unlikely(diff_tsc > drain_tsc)) {
742 * This could be optimized (use queueid instead of
743 * portid), but it is not called so often
745 for (portid = 0; portid < MAX_PORTS; portid++) {
746 if (qconf->tx_mbufs[portid].len == 0)
748 send_burst(&lcore_conf[lcore_id],
749 qconf->tx_mbufs[portid].len,
751 qconf->tx_mbufs[portid].len = 0;
758 * Read packet from RX queues
760 for (i = 0; i < qconf->n_rx_queue; ++i) {
762 portid = qconf->rx_queue_list[i].port_id;
763 queueid = qconf->rx_queue_list[i].queue_id;
765 nb_rx = rte_eth_rx_burst(portid, queueid, pkts_burst,
768 /* Prefetch first packets */
769 for (j = 0; j < PREFETCH_OFFSET && j < nb_rx; j++) {
770 rte_prefetch0(rte_pktmbuf_mtod(
771 pkts_burst[j], void *));
774 /* Prefetch and forward already prefetched packets */
775 for (j = 0; j < (nb_rx - PREFETCH_OFFSET); j++) {
776 rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[
777 j + PREFETCH_OFFSET], void *));
778 l3fwd_simple_forward(pkts_burst[j], portid,
782 /* Forward remaining prefetched packets */
783 for (; j < nb_rx; j++) {
784 l3fwd_simple_forward(pkts_burst[j], portid,
792 check_lcore_params(void)
794 uint8_t queue, lcore;
798 for (i = 0; i < nb_lcore_params; ++i) {
799 queue = lcore_params[i].queue_id;
800 if (queue >= MAX_RX_QUEUE_PER_PORT) {
801 printf("invalid queue number: %hhu\n", queue);
804 lcore = lcore_params[i].lcore_id;
805 if (!rte_lcore_is_enabled(lcore)) {
806 printf("error: lcore %hhu is not enabled in lcore mask\n", lcore);
809 if ((socketid = rte_lcore_to_socket_id(lcore) != 0) &&
811 printf("warning: lcore %hhu is on socket %d with numa off \n",
819 check_port_config(const unsigned nb_ports)
824 for (i = 0; i < nb_lcore_params; ++i) {
825 portid = lcore_params[i].port_id;
826 if ((enabled_port_mask & (1 << portid)) == 0) {
827 printf("port %u is not enabled in port mask\n", portid);
830 if (portid >= nb_ports) {
831 printf("port %u is not present on the board\n", portid);
839 get_port_n_rx_queues(const uint8_t port)
844 for (i = 0; i < nb_lcore_params; ++i) {
845 if (lcore_params[i].port_id == port && lcore_params[i].queue_id > queue)
846 queue = lcore_params[i].queue_id;
848 return (uint8_t)(++queue);
852 init_lcore_rx_queues(void)
854 uint16_t i, nb_rx_queue;
857 for (i = 0; i < nb_lcore_params; ++i) {
858 lcore = lcore_params[i].lcore_id;
859 nb_rx_queue = lcore_conf[lcore].n_rx_queue;
860 if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
861 printf("error: too many queues (%u) for lcore: %u\n",
862 (unsigned)nb_rx_queue + 1, (unsigned)lcore);
865 lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id =
866 lcore_params[i].port_id;
867 lcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id =
868 lcore_params[i].queue_id;
869 lcore_conf[lcore].n_rx_queue++;
877 print_usage(const char *prgname)
879 printf ("%s [EAL options] -- -p PORTMASK -P"
880 " [--config (port,queue,lcore)[,(port,queue,lcore]]"
881 " [--enable-jumbo [--max-pkt-len PKTLEN]]"
882 " [--maxflows=<flows>] [--flowttl=<ttl>[(s|ms)]]\n"
883 " -p PORTMASK: hexadecimal bitmask of ports to configure\n"
884 " -P : enable promiscuous mode\n"
885 " --config (port,queue,lcore): rx queues configuration\n"
886 " --no-numa: optional, disable numa awareness\n"
887 " --enable-jumbo: enable jumbo frame"
888 " which max packet len is PKTLEN in decimal (64-9600)\n"
889 " --maxflows=<flows>: optional, maximum number of flows "
891 " --flowttl=<ttl>[(s|ms)]: optional, maximum TTL for each "
897 parse_flow_num(const char *str, uint32_t min, uint32_t max, uint32_t *val)
902 /* parse decimal string */
904 v = strtoul(str, &end, 10);
905 if (errno != 0 || *end != '\0')
908 if (v < min || v > max)
916 parse_flow_ttl(const char *str, uint32_t min, uint32_t max, uint32_t *val)
921 static const char frmt_sec[] = "s";
922 static const char frmt_msec[] = "ms";
924 /* parse decimal string */
926 v = strtoul(str, &end, 10);
931 if (strncmp(frmt_sec, end, sizeof(frmt_sec)) == 0)
933 else if (strncmp(frmt_msec, end, sizeof (frmt_msec)) != 0)
937 if (v < min || v > max)
945 static int parse_max_pkt_len(const char *pktlen)
950 /* parse decimal string */
951 len = strtoul(pktlen, &end, 10);
952 if ((pktlen[0] == '\0') || (end == NULL) || (*end != '\0'))
962 parse_portmask(const char *portmask)
967 /* parse hexadecimal string */
968 pm = strtoul(portmask, &end, 16);
969 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
979 parse_config(const char *q_arg)
982 const char *p, *p0 = q_arg;
990 unsigned long int_fld[_NUM_FLD];
991 char *str_fld[_NUM_FLD];
997 while ((p = strchr(p0,'(')) != NULL) {
999 if((p0 = strchr(p,')')) == NULL)
1003 if(size >= sizeof(s))
1006 rte_snprintf(s, sizeof(s), "%.*s", size, p);
1007 if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') != _NUM_FLD)
1009 for (i = 0; i < _NUM_FLD; i++){
1011 int_fld[i] = strtoul(str_fld[i], &end, 0);
1012 if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
1015 if (nb_lcore_params >= MAX_LCORE_PARAMS) {
1016 printf("exceeded max number of lcore params: %hu\n",
1020 lcore_params_array[nb_lcore_params].port_id = (uint8_t)int_fld[FLD_PORT];
1021 lcore_params_array[nb_lcore_params].queue_id = (uint8_t)int_fld[FLD_QUEUE];
1022 lcore_params_array[nb_lcore_params].lcore_id = (uint8_t)int_fld[FLD_LCORE];
1025 lcore_params = lcore_params_array;
1029 /* Parse the argument given in the command line of the application */
1031 parse_args(int argc, char **argv)
1036 char *prgname = argv[0];
1037 static struct option lgopts[] = {
1038 {"config", 1, 0, 0},
1039 {"no-numa", 0, 0, 0},
1040 {"enable-jumbo", 0, 0, 0},
1041 {"maxflows", 1, 0, 0},
1042 {"flowttl", 1, 0, 0},
1048 while ((opt = getopt_long(argc, argvopt, "p:P",
1049 lgopts, &option_index)) != EOF) {
1054 enabled_port_mask = parse_portmask(optarg);
1055 if (enabled_port_mask == 0) {
1056 printf("invalid portmask\n");
1057 print_usage(prgname);
1062 printf("Promiscuous mode selected\n");
1068 if (!strncmp(lgopts[option_index].name, "config", 6)) {
1069 ret = parse_config(optarg);
1071 printf("invalid config\n");
1072 print_usage(prgname);
1077 if (!strncmp(lgopts[option_index].name, "no-numa", 7)) {
1078 printf("numa is disabled \n");
1082 if (!strncmp(lgopts[option_index].name,
1084 if ((ret = parse_flow_num(optarg, MIN_FLOW_NUM,
1086 &max_flow_num)) != 0) {
1087 printf("invalid value: \"%s\" for "
1090 lgopts[option_index].name);
1091 print_usage(prgname);
1096 if (!strncmp(lgopts[option_index].name, "flowttl", 7)) {
1097 if ((ret = parse_flow_ttl(optarg, MIN_FLOW_TTL,
1099 &max_flow_ttl)) != 0) {
1100 printf("invalid value: \"%s\" for "
1103 lgopts[option_index].name);
1104 print_usage(prgname);
1109 if (!strncmp(lgopts[option_index].name, "enable-jumbo", 12)) {
1110 struct option lenopts = {"max-pkt-len", required_argument, 0, 0};
1112 printf("jumbo frame is enabled \n");
1113 port_conf.rxmode.jumbo_frame = 1;
1115 /* if no max-pkt-len set, use the default value ETHER_MAX_LEN */
1116 if (0 == getopt_long(argc, argvopt, "", &lenopts, &option_index)) {
1117 ret = parse_max_pkt_len(optarg);
1118 if ((ret < 64) || (ret > MAX_JUMBO_PKT_LEN)){
1119 printf("invalid packet length\n");
1120 print_usage(prgname);
1123 port_conf.rxmode.max_rx_pkt_len = ret;
1125 printf("set jumbo frame max packet length to %u\n",
1126 (unsigned int)port_conf.rxmode.max_rx_pkt_len);
1132 print_usage(prgname);
1138 argv[optind-1] = prgname;
1141 optind = 0; /* reset getopt lib */
1146 print_ethaddr(const char *name, const struct ether_addr *eth_addr)
1148 printf ("%s%02X:%02X:%02X:%02X:%02X:%02X", name,
1149 eth_addr->addr_bytes[0],
1150 eth_addr->addr_bytes[1],
1151 eth_addr->addr_bytes[2],
1152 eth_addr->addr_bytes[3],
1153 eth_addr->addr_bytes[4],
1154 eth_addr->addr_bytes[5]);
1157 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
1159 setup_hash(int socketid)
1161 struct rte_hash_parameters ipv4_l3fwd_hash_params = {
1163 .entries = L3FWD_HASH_ENTRIES,
1164 .bucket_entries = 4,
1165 .key_len = sizeof(struct ipv4_5tuple),
1166 .hash_func = DEFAULT_HASH_FUNC,
1167 .hash_func_init_val = 0,
1170 struct rte_hash_parameters ipv6_l3fwd_hash_params = {
1172 .entries = L3FWD_HASH_ENTRIES,
1173 .bucket_entries = 4,
1174 .key_len = sizeof(struct ipv6_5tuple),
1175 .hash_func = DEFAULT_HASH_FUNC,
1176 .hash_func_init_val = 0,
1183 /* create ipv4 hash */
1184 rte_snprintf(s, sizeof(s), "ipv4_l3fwd_hash_%d", socketid);
1185 ipv4_l3fwd_hash_params.name = s;
1186 ipv4_l3fwd_hash_params.socket_id = socketid;
1187 ipv4_l3fwd_lookup_struct[socketid] = rte_hash_create(&ipv4_l3fwd_hash_params);
1188 if (ipv4_l3fwd_lookup_struct[socketid] == NULL)
1189 rte_exit(EXIT_FAILURE, "Unable to create the l3fwd hash on "
1190 "socket %d\n", socketid);
1192 /* create ipv6 hash */
1193 rte_snprintf(s, sizeof(s), "ipv6_l3fwd_hash_%d", socketid);
1194 ipv6_l3fwd_hash_params.name = s;
1195 ipv6_l3fwd_hash_params.socket_id = socketid;
1196 ipv6_l3fwd_lookup_struct[socketid] = rte_hash_create(&ipv6_l3fwd_hash_params);
1197 if (ipv6_l3fwd_lookup_struct[socketid] == NULL)
1198 rte_exit(EXIT_FAILURE, "Unable to create the l3fwd hash on "
1199 "socket %d\n", socketid);
1202 /* populate the ipv4 hash */
1203 for (i = 0; i < IPV4_L3FWD_NUM_ROUTES; i++) {
1204 ret = rte_hash_add_key (ipv4_l3fwd_lookup_struct[socketid],
1205 (void *) &ipv4_l3fwd_route_array[i].key);
1207 rte_exit(EXIT_FAILURE, "Unable to add entry %u to the"
1208 "l3fwd hash on socket %d\n", i, socketid);
1210 ipv4_l3fwd_out_if[ret] = ipv4_l3fwd_route_array[i].if_out;
1211 printf("Hash: Adding key\n");
1212 print_ipv4_key(ipv4_l3fwd_route_array[i].key);
1215 /* populate the ipv6 hash */
1216 for (i = 0; i < IPV6_L3FWD_NUM_ROUTES; i++) {
1217 ret = rte_hash_add_key (ipv6_l3fwd_lookup_struct[socketid],
1218 (void *) &ipv6_l3fwd_route_array[i].key);
1220 rte_exit(EXIT_FAILURE, "Unable to add entry %u to the"
1221 "l3fwd hash on socket %d\n", i, socketid);
1223 ipv6_l3fwd_out_if[ret] = ipv6_l3fwd_route_array[i].if_out;
1224 printf("Hash: Adding key\n");
1225 print_ipv6_key(ipv6_l3fwd_route_array[i].key);
1230 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
1232 setup_lpm(int socketid)
1234 struct rte_lpm6_config config;
1239 /* create the LPM table */
1240 rte_snprintf(s, sizeof(s), "IPV4_L3FWD_LPM_%d", socketid);
1241 ipv4_l3fwd_lookup_struct[socketid] = rte_lpm_create(s, socketid,
1242 IPV4_L3FWD_LPM_MAX_RULES, 0);
1243 if (ipv4_l3fwd_lookup_struct[socketid] == NULL)
1244 rte_exit(EXIT_FAILURE, "Unable to create the l3fwd LPM table"
1245 " on socket %d\n", socketid);
1247 /* populate the LPM table */
1248 for (i = 0; i < IPV4_L3FWD_NUM_ROUTES; i++) {
1249 ret = rte_lpm_add(ipv4_l3fwd_lookup_struct[socketid],
1250 ipv4_l3fwd_route_array[i].ip,
1251 ipv4_l3fwd_route_array[i].depth,
1252 ipv4_l3fwd_route_array[i].if_out);
1255 rte_exit(EXIT_FAILURE, "Unable to add entry %u to the "
1256 "l3fwd LPM table on socket %d\n",
1260 printf("LPM: Adding route 0x%08x / %d (%d)\n",
1261 (unsigned)ipv4_l3fwd_route_array[i].ip,
1262 ipv4_l3fwd_route_array[i].depth,
1263 ipv4_l3fwd_route_array[i].if_out);
1266 /* create the LPM6 table */
1267 rte_snprintf(s, sizeof(s), "IPV6_L3FWD_LPM_%d", socketid);
1269 config.max_rules = IPV6_L3FWD_LPM_MAX_RULES;
1270 config.number_tbl8s = IPV6_L3FWD_LPM_NUMBER_TBL8S;
1272 ipv6_l3fwd_lookup_struct[socketid] = rte_lpm6_create(s, socketid,
1274 if (ipv6_l3fwd_lookup_struct[socketid] == NULL)
1275 rte_exit(EXIT_FAILURE, "Unable to create the l3fwd LPM table"
1276 " on socket %d\n", socketid);
1278 /* populate the LPM table */
1279 for (i = 0; i < IPV6_L3FWD_NUM_ROUTES; i++) {
1280 ret = rte_lpm6_add(ipv6_l3fwd_lookup_struct[socketid],
1281 ipv6_l3fwd_route_array[i].ip,
1282 ipv6_l3fwd_route_array[i].depth,
1283 ipv6_l3fwd_route_array[i].if_out);
1286 rte_exit(EXIT_FAILURE, "Unable to add entry %u to the "
1287 "l3fwd LPM table on socket %d\n",
1291 printf("LPM: Adding route %s / %d (%d)\n",
1293 ipv6_l3fwd_route_array[i].depth,
1294 ipv6_l3fwd_route_array[i].if_out);
1302 struct lcore_conf *qconf;
1306 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1307 if (rte_lcore_is_enabled(lcore_id) == 0)
1311 socketid = rte_lcore_to_socket_id(lcore_id);
1315 if (socketid >= NB_SOCKETS) {
1316 rte_exit(EXIT_FAILURE,
1317 "Socket %d of lcore %u is out of range %d\n",
1318 socketid, lcore_id, NB_SOCKETS);
1321 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
1322 setup_lpm(socketid);
1324 setup_hash(socketid);
1326 qconf = &lcore_conf[lcore_id];
1327 qconf->ipv4_lookup_struct = ipv4_l3fwd_lookup_struct[socketid];
1328 qconf->ipv6_lookup_struct = ipv6_l3fwd_lookup_struct[socketid];
1333 /* Check the link status of all ports in up to 9s, and print them finally */
1335 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
1337 #define CHECK_INTERVAL 100 /* 100ms */
1338 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1339 uint8_t portid, count, all_ports_up, print_flag = 0;
1340 struct rte_eth_link link;
1342 printf("\nChecking link status");
1344 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1346 for (portid = 0; portid < port_num; portid++) {
1347 if ((port_mask & (1 << portid)) == 0)
1349 memset(&link, 0, sizeof(link));
1350 rte_eth_link_get_nowait(portid, &link);
1351 /* print link status if flag set */
1352 if (print_flag == 1) {
1353 if (link.link_status)
1354 printf("Port %d Link Up - speed %u "
1355 "Mbps - %s\n", (uint8_t)portid,
1356 (unsigned)link.link_speed,
1357 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1358 ("full-duplex") : ("half-duplex\n"));
1360 printf("Port %d Link Down\n",
1364 /* clear all_ports_up flag if any link down */
1365 if (link.link_status == 0) {
1370 /* after finally printing all link status, get out */
1371 if (print_flag == 1)
1374 if (all_ports_up == 0) {
1377 rte_delay_ms(CHECK_INTERVAL);
1380 /* set the print_flag if all ports up or timeout */
1381 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1389 setup_queue_frag_tbl(struct lcore_conf *qconf, uint32_t lcore, int socket,
1393 uint64_t frag_cycles;
1394 char buf[RTE_MEMPOOL_NAMESIZE];
1396 frag_cycles = (rte_get_tsc_hz() + MS_PER_S - 1) / MS_PER_S *
1399 if ((qconf->frag_tbl[queue] = ipv4_frag_tbl_create(max_flow_num,
1400 IPV4_FRAG_TBL_BUCKET_ENTRIES, max_flow_num, frag_cycles,
1402 rte_exit(EXIT_FAILURE, "ipv4_frag_tbl_create(%u) on "
1403 "lcore: %u for queue: %u failed\n",
1404 max_flow_num, lcore, queue);
1406 nb_mbuf = max_flow_num * MAX_FRAG_NUM;
1407 nb_mbuf *= (port_conf.rxmode.max_rx_pkt_len + BUF_SIZE - 1) / BUF_SIZE;
1408 nb_mbuf += RTE_TEST_RX_DESC_DEFAULT + MAX_PKT_BURST +
1409 RTE_TEST_TX_DESC_DEFAULT;
1410 nb_mbuf = RTE_MAX(nb_mbuf, (uint32_t)DEF_MBUF_NUM);
1412 rte_snprintf(buf, sizeof(buf), "mbuf_pool_%u_%u", lcore, queue);
1414 if ((qconf->pool[queue] = rte_mempool_create(buf, nb_mbuf, MBUF_SIZE, 0,
1415 sizeof(struct rte_pktmbuf_pool_private),
1416 rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL,
1417 socket, MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)) == NULL)
1418 rte_exit(EXIT_FAILURE, "mempool_create(%s) failed", buf);
1422 queue_frag_tbl_dump_stat(void)
1425 const struct lcore_conf *qconf;
1427 for (lcore = 0; lcore < RTE_MAX_LCORE; lcore++) {
1428 if (rte_lcore_is_enabled(lcore) == 0)
1431 qconf = lcore_conf + lcore;
1432 for (i = 0; i < qconf->n_rx_queue; i++) {
1434 fprintf(stdout, " -- lcoreid=%u portid=%hhu "
1435 "rxqueueid=%hhu frag tbl stat:\n",
1436 lcore, qconf->rx_queue_list[i].port_id,
1437 qconf->rx_queue_list[i].queue_id);
1438 ipv4_frag_tbl_dump_stat(stdout, qconf->frag_tbl[i]);
1444 signal_handler(int signum)
1446 queue_frag_tbl_dump_stat();
1447 if (signum != SIGUSR1)
1448 rte_exit(0, "received signal: %d, exiting\n", signum);
1452 MAIN(int argc, char **argv)
1454 struct lcore_conf *qconf;
1459 uint32_t n_tx_queue, nb_lcores;
1460 uint8_t portid, nb_rx_queue, queue, socketid;
1463 ret = rte_eal_init(argc, argv);
1465 rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
1469 /* parse application arguments (after the EAL ones) */
1470 ret = parse_args(argc, argv);
1472 rte_exit(EXIT_FAILURE, "Invalid L3FWD parameters\n");
1474 if (check_lcore_params() < 0)
1475 rte_exit(EXIT_FAILURE, "check_lcore_params failed\n");
1477 ret = init_lcore_rx_queues();
1479 rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n");
1482 /* init driver(s) */
1483 if (rte_pmd_init_all() < 0)
1484 rte_exit(EXIT_FAILURE, "Cannot init pmd\n");
1486 if (rte_eal_pci_probe() < 0)
1487 rte_exit(EXIT_FAILURE, "Cannot probe PCI\n");
1489 nb_ports = rte_eth_dev_count();
1490 if (nb_ports > MAX_PORTS)
1491 nb_ports = MAX_PORTS;
1493 if (check_port_config(nb_ports) < 0)
1494 rte_exit(EXIT_FAILURE, "check_port_config failed\n");
1496 nb_lcores = rte_lcore_count();
1498 /* initialize all ports */
1499 for (portid = 0; portid < nb_ports; portid++) {
1500 /* skip ports that are not enabled */
1501 if ((enabled_port_mask & (1 << portid)) == 0) {
1502 printf("\nSkipping disabled port %d\n", portid);
1507 printf("Initializing port %d ... ", portid );
1510 nb_rx_queue = get_port_n_rx_queues(portid);
1511 n_tx_queue = nb_lcores;
1512 if (n_tx_queue > MAX_TX_QUEUE_PER_PORT)
1513 n_tx_queue = MAX_TX_QUEUE_PER_PORT;
1514 printf("Creating queues: nb_rxq=%d nb_txq=%u... ",
1515 nb_rx_queue, (unsigned)n_tx_queue );
1516 ret = rte_eth_dev_configure(portid, nb_rx_queue,
1517 (uint16_t)n_tx_queue, &port_conf);
1519 rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%d\n",
1522 rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
1523 print_ethaddr(" Address:", &ports_eth_addr[portid]);
1529 rte_exit(EXIT_FAILURE, "init_mem failed\n");
1531 /* init one TX queue per couple (lcore,port) */
1533 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1534 if (rte_lcore_is_enabled(lcore_id) == 0)
1538 socketid = (uint8_t)rte_lcore_to_socket_id(lcore_id);
1542 printf("txq=%u,%d,%d ", lcore_id, queueid, socketid);
1544 ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
1545 socketid, &tx_conf);
1547 rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, "
1548 "port=%d\n", ret, portid);
1550 qconf = &lcore_conf[lcore_id];
1551 qconf->tx_queue_id[portid] = queueid;
1557 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1558 if (rte_lcore_is_enabled(lcore_id) == 0)
1560 qconf = &lcore_conf[lcore_id];
1561 printf("\nInitializing rx queues on lcore %u ... ", lcore_id );
1563 /* init RX queues */
1564 for(queue = 0; queue < qconf->n_rx_queue; ++queue) {
1565 portid = qconf->rx_queue_list[queue].port_id;
1566 queueid = qconf->rx_queue_list[queue].queue_id;
1569 socketid = (uint8_t)rte_lcore_to_socket_id(lcore_id);
1573 printf("rxq=%d,%d,%d ", portid, queueid, socketid);
1576 setup_queue_frag_tbl(qconf, lcore_id, socketid, queue);
1578 ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd,
1579 socketid, &rx_conf, qconf->pool[queue]);
1581 rte_exit(EXIT_FAILURE,
1582 "rte_eth_rx_queue_setup: err=%d,"
1583 "port=%d\n", ret, portid);
1590 for (portid = 0; portid < nb_ports; portid++) {
1591 if ((enabled_port_mask & (1 << portid)) == 0) {
1595 ret = rte_eth_dev_start(portid);
1597 rte_exit(EXIT_FAILURE, "rte_eth_dev_start: err=%d, port=%d\n",
1601 * If enabled, put device in promiscuous mode.
1602 * This allows IO forwarding mode to forward packets
1603 * to itself through 2 cross-connected ports of the
1607 rte_eth_promiscuous_enable(portid);
1610 check_all_ports_link_status((uint8_t)nb_ports, enabled_port_mask);
1612 signal(SIGUSR1, signal_handler);
1613 signal(SIGTERM, signal_handler);
1614 signal(SIGINT, signal_handler);
1616 /* launch per-lcore init on every lcore */
1617 rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
1618 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
1619 if (rte_eal_wait_lcore(lcore_id) < 0)