4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 #include <sys/types.h>
40 #include <sys/queue.h>
41 #include <netinet/in.h>
49 #include <rte_common.h>
51 #include <rte_malloc.h>
52 #include <rte_memory.h>
53 #include <rte_memcpy.h>
54 #include <rte_memzone.h>
56 #include <rte_per_lcore.h>
57 #include <rte_launch.h>
58 #include <rte_atomic.h>
59 #include <rte_cycles.h>
60 #include <rte_prefetch.h>
61 #include <rte_lcore.h>
62 #include <rte_per_lcore.h>
63 #include <rte_branch_prediction.h>
64 #include <rte_interrupts.h>
66 #include <rte_random.h>
67 #include <rte_debug.h>
68 #include <rte_ether.h>
69 #include <rte_ethdev.h>
70 #include <rte_mempool.h>
72 #include <rte_timer.h>
73 #include <rte_keepalive.h>
77 #define RTE_LOGTYPE_L2FWD RTE_LOGTYPE_USER1
81 #define MAX_PKT_BURST 32
82 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
85 * Configurable number of RX/TX ring descriptors
87 #define RTE_TEST_RX_DESC_DEFAULT 128
88 #define RTE_TEST_TX_DESC_DEFAULT 512
89 static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
90 static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
92 /* ethernet addresses of ports */
93 static struct ether_addr l2fwd_ports_eth_addr[RTE_MAX_ETHPORTS];
95 /* mask of enabled ports */
96 static uint32_t l2fwd_enabled_port_mask;
98 /* list of enabled ports */
99 static uint32_t l2fwd_dst_ports[RTE_MAX_ETHPORTS];
101 static unsigned int l2fwd_rx_queue_per_lcore = 1;
103 #define MAX_RX_QUEUE_PER_LCORE 16
104 #define MAX_TX_QUEUE_PER_PORT 16
105 struct lcore_queue_conf {
107 unsigned rx_port_list[MAX_RX_QUEUE_PER_LCORE];
108 } __rte_cache_aligned;
109 struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
111 struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS];
113 static const struct rte_eth_conf port_conf = {
116 .header_split = 0, /**< Header Split disabled */
117 .hw_ip_checksum = 0, /**< IP checksum offload disabled */
118 .hw_vlan_filter = 0, /**< VLAN filtering disabled */
119 .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
120 .hw_strip_crc = 1, /**< CRC stripped by hardware */
123 .mq_mode = ETH_MQ_TX_NONE,
127 struct rte_mempool *l2fwd_pktmbuf_pool = NULL;
129 /* Per-port statistics struct */
130 struct l2fwd_port_statistics {
134 } __rte_cache_aligned;
135 struct l2fwd_port_statistics port_statistics[RTE_MAX_ETHPORTS];
137 /* A tsc-based timer responsible for triggering statistics printout */
138 #define TIMER_MILLISECOND 1
139 #define MAX_TIMER_PERIOD 86400 /* 1 day max */
140 static int64_t timer_period = 10 * TIMER_MILLISECOND * 1000; /* 10 seconds */
141 static int64_t check_period = 5; /* default check cycle is 5ms */
143 /* Keepalive structure */
144 struct rte_keepalive *rte_global_keepalive_info;
146 /* Termination signalling */
147 static int terminate_signal_received;
149 /* Termination signal handler */
150 static void handle_sigterm(__rte_unused int value)
152 terminate_signal_received = 1;
155 /* Print out statistics on packets dropped */
157 print_stats(__attribute__((unused)) struct rte_timer *ptr_timer,
158 __attribute__((unused)) void *ptr_data)
160 uint64_t total_packets_dropped, total_packets_tx, total_packets_rx;
163 total_packets_dropped = 0;
164 total_packets_tx = 0;
165 total_packets_rx = 0;
167 const char clr[] = { 27, '[', '2', 'J', '\0' };
168 const char topLeft[] = { 27, '[', '1', ';', '1', 'H', '\0' };
170 /* Clear screen and move to top left */
171 printf("%s%s", clr, topLeft);
173 printf("\nPort statistics ====================================");
175 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
176 /* skip disabled ports */
177 if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
179 printf("\nStatistics for port %u ------------------------------"
180 "\nPackets sent: %24"PRIu64
181 "\nPackets received: %20"PRIu64
182 "\nPackets dropped: %21"PRIu64,
184 port_statistics[portid].tx,
185 port_statistics[portid].rx,
186 port_statistics[portid].dropped);
188 total_packets_dropped += port_statistics[portid].dropped;
189 total_packets_tx += port_statistics[portid].tx;
190 total_packets_rx += port_statistics[portid].rx;
192 printf("\nAggregate statistics ==============================="
193 "\nTotal packets sent: %18"PRIu64
194 "\nTotal packets received: %14"PRIu64
195 "\nTotal packets dropped: %15"PRIu64,
198 total_packets_dropped);
199 printf("\n====================================================\n");
203 l2fwd_simple_forward(struct rte_mbuf *m, unsigned portid)
205 struct ether_hdr *eth;
209 struct rte_eth_dev_tx_buffer *buffer;
211 dst_port = l2fwd_dst_ports[portid];
212 eth = rte_pktmbuf_mtod(m, struct ether_hdr *);
214 /* 02:00:00:00:00:xx */
215 tmp = ð->d_addr.addr_bytes[0];
216 *((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)dst_port << 40);
219 ether_addr_copy(&l2fwd_ports_eth_addr[dst_port], ð->s_addr);
221 buffer = tx_buffer[dst_port];
222 sent = rte_eth_tx_buffer(dst_port, 0, buffer, m);
224 port_statistics[dst_port].tx += sent;
227 /* main processing loop */
229 l2fwd_main_loop(void)
231 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
235 uint64_t prev_tsc, diff_tsc, cur_tsc;
236 unsigned i, j, portid, nb_rx;
237 struct lcore_queue_conf *qconf;
238 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
239 / US_PER_S * BURST_TX_DRAIN_US;
240 struct rte_eth_dev_tx_buffer *buffer;
244 lcore_id = rte_lcore_id();
245 qconf = &lcore_queue_conf[lcore_id];
247 if (qconf->n_rx_port == 0) {
248 RTE_LOG(INFO, L2FWD, "lcore %u has nothing to do\n", lcore_id);
252 RTE_LOG(INFO, L2FWD, "entering main loop on lcore %u\n", lcore_id);
254 for (i = 0; i < qconf->n_rx_port; i++) {
256 portid = qconf->rx_port_list[i];
257 RTE_LOG(INFO, L2FWD, " -- lcoreid=%u portid=%u\n", lcore_id,
261 uint64_t tsc_initial = rte_rdtsc();
262 uint64_t tsc_lifetime = (rand()&0x07) * rte_get_tsc_hz();
264 while (!terminate_signal_received) {
265 /* Keepalive heartbeat */
266 rte_keepalive_mark_alive(rte_global_keepalive_info);
268 cur_tsc = rte_rdtsc();
271 * Die randomly within 7 secs for demo purposes if
274 if (check_period > 0 && cur_tsc - tsc_initial > tsc_lifetime)
278 * TX burst queue drain
280 diff_tsc = cur_tsc - prev_tsc;
281 if (unlikely(diff_tsc > drain_tsc)) {
283 for (i = 0; i < qconf->n_rx_port; i++) {
285 portid = l2fwd_dst_ports[qconf->rx_port_list[i]];
286 buffer = tx_buffer[portid];
288 sent = rte_eth_tx_buffer_flush(portid, 0, buffer);
290 port_statistics[portid].tx += sent;
298 * Read packet from RX queues
300 for (i = 0; i < qconf->n_rx_port; i++) {
302 portid = qconf->rx_port_list[i];
303 nb_rx = rte_eth_rx_burst((uint8_t) portid, 0,
304 pkts_burst, MAX_PKT_BURST);
306 port_statistics[portid].rx += nb_rx;
308 for (j = 0; j < nb_rx; j++) {
310 rte_prefetch0(rte_pktmbuf_mtod(m, void *));
311 l2fwd_simple_forward(m, portid);
318 l2fwd_launch_one_lcore(__attribute__((unused)) void *dummy)
326 l2fwd_usage(const char *prgname)
328 printf("%s [EAL options] -- -p PORTMASK [-q NQ]\n"
329 " -p PORTMASK: hexadecimal bitmask of ports to configure\n"
330 " -q NQ: number of queue (=ports) per lcore (default is 1)\n"
331 " -K PERIOD: Keepalive check period (5 default; 86400 max)\n"
332 " -T PERIOD: statistics will be refreshed each PERIOD seconds (0 to disable, 10 default, 86400 maximum)\n",
337 l2fwd_parse_portmask(const char *portmask)
342 /* parse hexadecimal string */
343 pm = strtoul(portmask, &end, 16);
344 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
354 l2fwd_parse_nqueue(const char *q_arg)
359 /* parse hexadecimal string */
360 n = strtoul(q_arg, &end, 10);
361 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
365 if (n >= MAX_RX_QUEUE_PER_LCORE)
372 l2fwd_parse_timer_period(const char *q_arg)
377 /* parse number string */
378 n = strtol(q_arg, &end, 10);
379 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
381 if (n >= MAX_TIMER_PERIOD)
388 l2fwd_parse_check_period(const char *q_arg)
393 /* parse number string */
394 n = strtol(q_arg, &end, 10);
395 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
397 if (n >= MAX_TIMER_PERIOD)
403 /* Parse the argument given in the command line of the application */
405 l2fwd_parse_args(int argc, char **argv)
410 char *prgname = argv[0];
411 static struct option lgopts[] = {
417 while ((opt = getopt_long(argc, argvopt, "p:q:T:K:",
418 lgopts, &option_index)) != EOF) {
423 l2fwd_enabled_port_mask = l2fwd_parse_portmask(optarg);
424 if (l2fwd_enabled_port_mask == 0) {
425 printf("invalid portmask\n");
426 l2fwd_usage(prgname);
433 l2fwd_rx_queue_per_lcore = l2fwd_parse_nqueue(optarg);
434 if (l2fwd_rx_queue_per_lcore == 0) {
435 printf("invalid queue number\n");
436 l2fwd_usage(prgname);
443 timer_period = l2fwd_parse_timer_period(optarg)
444 * (int64_t)(1000 * TIMER_MILLISECOND);
445 if (timer_period < 0) {
446 printf("invalid timer period\n");
447 l2fwd_usage(prgname);
454 check_period = l2fwd_parse_check_period(optarg);
455 if (check_period < 0) {
456 printf("invalid check period\n");
457 l2fwd_usage(prgname);
464 l2fwd_usage(prgname);
468 l2fwd_usage(prgname);
474 argv[optind-1] = prgname;
477 optind = 1; /* reset getopt lib */
481 /* Check the link status of all ports in up to 9s, and print them finally */
483 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
485 #define CHECK_INTERVAL 100 /* 100ms */
486 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
487 uint8_t portid, count, all_ports_up, print_flag = 0;
488 struct rte_eth_link link;
490 printf("\nChecking link status");
492 for (count = 0; count <= MAX_CHECK_TIME; count++) {
494 for (portid = 0; portid < port_num; portid++) {
495 if ((port_mask & (1 << portid)) == 0)
497 memset(&link, 0, sizeof(link));
498 rte_eth_link_get_nowait(portid, &link);
499 /* print link status if flag set */
500 if (print_flag == 1) {
501 if (link.link_status)
502 printf("Port %d Link Up - speed %u "
503 "Mbps - %s\n", (uint8_t)portid,
504 (unsigned)link.link_speed,
505 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
506 ("full-duplex") : ("half-duplex\n"));
508 printf("Port %d Link Down\n",
512 /* clear all_ports_up flag if any link down */
513 if (link.link_status == ETH_LINK_DOWN) {
518 /* after finally printing all link status, get out */
522 if (all_ports_up == 0) {
525 rte_delay_ms(CHECK_INTERVAL);
528 /* set the print_flag if all ports up or timeout */
529 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
537 dead_core(__rte_unused void *ptr_data, const int id_core)
539 if (terminate_signal_received)
541 printf("Dead core %i - restarting..\n", id_core);
542 if (rte_eal_get_lcore_state(id_core) == FINISHED) {
543 rte_eal_wait_lcore(id_core);
544 rte_eal_remote_launch(l2fwd_launch_one_lcore, NULL, id_core);
546 printf("..false positive!\n");
551 relay_core_state(void *ptr_data, const int id_core,
552 const enum rte_keepalive_state core_state, uint64_t last_alive)
554 rte_keepalive_relayed_state((struct rte_keepalive_shm *)ptr_data,
555 id_core, core_state, last_alive);
559 main(int argc, char **argv)
561 struct lcore_queue_conf *qconf;
562 struct rte_eth_dev_info dev_info;
565 uint8_t nb_ports_available;
566 uint8_t portid, last_port;
567 unsigned lcore_id, rx_lcore_id;
568 unsigned nb_ports_in_mask = 0;
569 struct sigaction signal_handler;
570 struct rte_keepalive_shm *ka_shm;
572 memset(&signal_handler, 0, sizeof(signal_handler));
573 terminate_signal_received = 0;
574 signal_handler.sa_handler = &handle_sigterm;
575 if (sigaction(SIGINT, &signal_handler, NULL) == -1 ||
576 sigaction(SIGTERM, &signal_handler, NULL) == -1)
577 rte_exit(EXIT_FAILURE, "SIGNAL\n");
581 ret = rte_eal_init(argc, argv);
583 rte_exit(EXIT_FAILURE, "Invalid EAL arguments\n");
587 l2fwd_enabled_port_mask = 0;
589 /* parse application arguments (after the EAL ones) */
590 ret = l2fwd_parse_args(argc, argv);
592 rte_exit(EXIT_FAILURE, "Invalid L2FWD arguments\n");
594 /* create the mbuf pool */
595 l2fwd_pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", NB_MBUF, 32,
596 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
597 if (l2fwd_pktmbuf_pool == NULL)
598 rte_exit(EXIT_FAILURE, "Cannot init mbuf pool\n");
600 nb_ports = rte_eth_dev_count();
602 rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n");
604 /* reset l2fwd_dst_ports */
605 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++)
606 l2fwd_dst_ports[portid] = 0;
610 * Each logical core is assigned a dedicated TX queue on each port.
612 for (portid = 0; portid < nb_ports; portid++) {
613 /* skip ports that are not enabled */
614 if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
617 if (nb_ports_in_mask % 2) {
618 l2fwd_dst_ports[portid] = last_port;
619 l2fwd_dst_ports[last_port] = portid;
625 rte_eth_dev_info_get(portid, &dev_info);
627 if (nb_ports_in_mask % 2) {
628 printf("Notice: odd number of ports in portmask.\n");
629 l2fwd_dst_ports[last_port] = last_port;
635 /* Initialize the port/queue configuration of each logical core */
636 for (portid = 0; portid < nb_ports; portid++) {
637 /* skip ports that are not enabled */
638 if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
641 /* get the lcore_id for this port */
642 while (rte_lcore_is_enabled(rx_lcore_id) == 0 ||
643 lcore_queue_conf[rx_lcore_id].n_rx_port ==
644 l2fwd_rx_queue_per_lcore) {
646 if (rx_lcore_id >= RTE_MAX_LCORE)
647 rte_exit(EXIT_FAILURE, "Not enough cores\n");
650 if (qconf != &lcore_queue_conf[rx_lcore_id])
651 /* Assigned a new logical core in the loop above. */
652 qconf = &lcore_queue_conf[rx_lcore_id];
654 qconf->rx_port_list[qconf->n_rx_port] = portid;
656 printf("Lcore %u: RX port %u\n",
657 rx_lcore_id, (unsigned) portid);
660 nb_ports_available = nb_ports;
662 /* Initialise each port */
663 for (portid = 0; portid < nb_ports; portid++) {
664 /* skip ports that are not enabled */
665 if ((l2fwd_enabled_port_mask & (1 << portid)) == 0) {
666 printf("Skipping disabled port %u\n",
668 nb_ports_available--;
672 printf("Initializing port %u... ", (unsigned) portid);
674 ret = rte_eth_dev_configure(portid, 1, 1, &port_conf);
676 rte_exit(EXIT_FAILURE,
677 "Cannot configure device: err=%d, port=%u\n",
678 ret, (unsigned) portid);
680 ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
683 rte_exit(EXIT_FAILURE,
684 "Cannot adjust number of descriptors: err=%d, port=%u\n",
685 ret, (unsigned) portid);
687 rte_eth_macaddr_get(portid, &l2fwd_ports_eth_addr[portid]);
689 /* init one RX queue */
691 ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd,
692 rte_eth_dev_socket_id(portid),
696 rte_exit(EXIT_FAILURE,
697 "rte_eth_rx_queue_setup:err=%d, port=%u\n",
698 ret, (unsigned) portid);
700 /* init one TX queue on each port */
702 ret = rte_eth_tx_queue_setup(portid, 0, nb_txd,
703 rte_eth_dev_socket_id(portid),
706 rte_exit(EXIT_FAILURE,
707 "rte_eth_tx_queue_setup:err=%d, port=%u\n",
708 ret, (unsigned) portid);
710 /* Initialize TX buffers */
711 tx_buffer[portid] = rte_zmalloc_socket("tx_buffer",
712 RTE_ETH_TX_BUFFER_SIZE(MAX_PKT_BURST), 0,
713 rte_eth_dev_socket_id(portid));
714 if (tx_buffer[portid] == NULL)
715 rte_exit(EXIT_FAILURE, "Cannot allocate buffer for tx on port %u\n",
718 rte_eth_tx_buffer_init(tx_buffer[portid], MAX_PKT_BURST);
720 ret = rte_eth_tx_buffer_set_err_callback(tx_buffer[portid],
721 rte_eth_tx_buffer_count_callback,
722 &port_statistics[portid].dropped);
724 rte_exit(EXIT_FAILURE, "Cannot set error callback for "
725 "tx buffer on port %u\n", (unsigned) portid);
728 ret = rte_eth_dev_start(portid);
730 rte_exit(EXIT_FAILURE,
731 "rte_eth_dev_start:err=%d, port=%u\n",
732 ret, (unsigned) portid);
734 rte_eth_promiscuous_enable(portid);
736 printf("Port %u, MAC address: "
737 "%02X:%02X:%02X:%02X:%02X:%02X\n\n",
739 l2fwd_ports_eth_addr[portid].addr_bytes[0],
740 l2fwd_ports_eth_addr[portid].addr_bytes[1],
741 l2fwd_ports_eth_addr[portid].addr_bytes[2],
742 l2fwd_ports_eth_addr[portid].addr_bytes[3],
743 l2fwd_ports_eth_addr[portid].addr_bytes[4],
744 l2fwd_ports_eth_addr[portid].addr_bytes[5]);
746 /* initialize port stats */
747 memset(&port_statistics, 0, sizeof(port_statistics));
750 if (!nb_ports_available) {
751 rte_exit(EXIT_FAILURE,
752 "All available ports are disabled. Please set portmask.\n");
755 check_all_ports_link_status(nb_ports, l2fwd_enabled_port_mask);
757 struct rte_timer hb_timer, stats_timer;
759 rte_timer_subsystem_init();
760 rte_timer_init(&stats_timer);
763 if (check_period > 0) {
764 ka_shm = rte_keepalive_shm_create();
766 rte_exit(EXIT_FAILURE,
767 "rte_keepalive_shm_create() failed");
768 rte_global_keepalive_info =
769 rte_keepalive_create(&dead_core, ka_shm);
770 if (rte_global_keepalive_info == NULL)
771 rte_exit(EXIT_FAILURE, "init_keep_alive() failed");
772 rte_keepalive_register_relay_callback(rte_global_keepalive_info,
773 relay_core_state, ka_shm);
774 rte_timer_init(&hb_timer);
775 if (rte_timer_reset(&hb_timer,
776 (check_period * rte_get_timer_hz()) / 1000,
779 (void(*)(struct rte_timer*, void*))
780 &rte_keepalive_dispatch_pings,
781 rte_global_keepalive_info
783 rte_exit(EXIT_FAILURE, "Keepalive setup failure.\n");
785 if (timer_period > 0) {
786 if (rte_timer_reset(&stats_timer,
787 (timer_period * rte_get_timer_hz()) / 1000,
792 rte_exit(EXIT_FAILURE, "Stats setup failure.\n");
794 /* launch per-lcore init on every slave lcore */
795 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
796 struct lcore_queue_conf *qconf = &lcore_queue_conf[lcore_id];
798 if (qconf->n_rx_port == 0)
800 "lcore %u has nothing to do\n",
804 rte_eal_remote_launch(
805 l2fwd_launch_one_lcore,
809 rte_keepalive_register_core(rte_global_keepalive_info,
813 while (!terminate_signal_received) {
818 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
819 if (rte_eal_wait_lcore(lcore_id) < 0)
824 rte_keepalive_shm_cleanup(ka_shm);