1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
12 #include <sys/types.h>
13 #include <sys/queue.h>
14 #include <netinet/in.h>
21 #include <rte_common.h>
23 #include <rte_memory.h>
24 #include <rte_memcpy.h>
26 #include <rte_launch.h>
27 #include <rte_atomic.h>
28 #include <rte_spinlock.h>
29 #include <rte_cycles.h>
30 #include <rte_prefetch.h>
31 #include <rte_lcore.h>
32 #include <rte_per_lcore.h>
33 #include <rte_branch_prediction.h>
34 #include <rte_interrupts.h>
35 #include <rte_random.h>
36 #include <rte_debug.h>
37 #include <rte_ether.h>
38 #include <rte_ethdev.h>
40 #include <rte_mempool.h>
42 #include <rte_malloc.h>
46 #define RTE_LOGTYPE_L2FWD RTE_LOGTYPE_USER1
47 #define MBUF_NAME "mbuf_pool_%d"
48 #define MBUF_DATA_SIZE RTE_MBUF_DEFAULT_BUF_SIZE
50 #define RING_MASTER_NAME "l2fwd_ring_m2s_"
51 #define RING_SLAVE_NAME "l2fwd_ring_s2m_"
52 #define MAX_NAME_LEN 32
53 /* RECREATE flag indicate needs initialize resource and launch slave_core again */
54 #define SLAVE_RECREATE_FLAG 0x1
55 /* RESTART flag indicate needs restart port and send START command again */
56 #define SLAVE_RESTART_FLAG 0x2
57 #define INVALID_MAPPING_ID ((unsigned)LCORE_ID_ANY)
58 /* Maximum message buffer per slave */
59 #define NB_CORE_MSGBUF 32
65 #define MAX_PKT_BURST 32
66 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
69 * Configurable number of RX/TX ring descriptors
71 #define RTE_TEST_RX_DESC_DEFAULT 1024
72 #define RTE_TEST_TX_DESC_DEFAULT 1024
73 static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
74 static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
76 /* ethernet addresses of ports */
77 static struct ether_addr l2fwd_ports_eth_addr[RTE_MAX_ETHPORTS];
79 /* mask of enabled ports */
80 static uint32_t l2fwd_enabled_port_mask = 0;
82 /* list of enabled ports */
83 static uint32_t l2fwd_dst_ports[RTE_MAX_ETHPORTS];
85 static unsigned int l2fwd_rx_queue_per_lcore = 1;
89 struct rte_mbuf *m_table[MAX_PKT_BURST];
92 #define MAX_RX_QUEUE_PER_LCORE 16
93 #define MAX_TX_QUEUE_PER_PORT 16
94 struct lcore_queue_conf {
96 unsigned rx_port_list[MAX_RX_QUEUE_PER_LCORE];
97 } __rte_cache_aligned;
98 struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
100 struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS];
102 struct lcore_resource_struct {
103 int enabled; /* Only set in case this lcore involved into packet forwarding */
104 int flags; /* Set only slave need to restart or recreate */
105 unsigned lcore_id; /* lcore ID */
106 unsigned pair_id; /* dependency lcore ID on port */
107 char ring_name[2][MAX_NAME_LEN];
108 /* ring[0] for master send cmd, slave read */
109 /* ring[1] for slave send ack, master read */
110 struct rte_ring *ring[2];
111 int port_num; /* Total port numbers */
112 /* Port id for that lcore to receive packets */
113 uint16_t port[RTE_MAX_ETHPORTS];
114 }__attribute__((packed)) __rte_cache_aligned;
116 static struct lcore_resource_struct lcore_resource[RTE_MAX_LCORE];
117 static struct rte_mempool *message_pool;
118 static rte_spinlock_t res_lock = RTE_SPINLOCK_INITIALIZER;
119 /* use floating processes */
120 static int float_proc = 0;
121 /* Save original cpu affinity */
127 static const struct rte_eth_conf port_conf = {
130 .offloads = DEV_RX_OFFLOAD_CRC_STRIP,
133 .mq_mode = ETH_MQ_TX_NONE,
137 static struct rte_mempool * l2fwd_pktmbuf_pool[RTE_MAX_ETHPORTS];
139 /* Per-port statistics struct */
140 struct l2fwd_port_statistics {
144 } __rte_cache_aligned;
145 struct l2fwd_port_statistics *port_statistics;
147 * pointer to lcore ID mapping array, used to return lcore id in case slave
148 * process exited unexpectedly, use only floating process option applied
150 unsigned *mapping_id;
152 /* A tsc-based timer responsible for triggering statistics printout */
153 #define TIMER_MILLISECOND 2000000ULL /* around 1ms at 2 Ghz */
154 #define MAX_TIMER_PERIOD 86400 /* 1 day max */
155 static int64_t timer_period = 10 * TIMER_MILLISECOND * 1000; /* default period is 10 seconds */
157 static int l2fwd_launch_one_lcore(void *dummy);
159 /* Print out statistics on packets dropped */
163 uint64_t total_packets_dropped, total_packets_tx, total_packets_rx;
166 total_packets_dropped = 0;
167 total_packets_tx = 0;
168 total_packets_rx = 0;
170 const char clr[] = { 27, '[', '2', 'J', '\0' };
171 const char topLeft[] = { 27, '[', '1', ';', '1', 'H','\0' };
173 /* Clear screen and move to top left */
174 printf("%s%s", clr, topLeft);
176 printf("\nPort statistics ====================================");
178 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
179 /* skip disabled ports */
180 if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
182 printf("\nStatistics for port %u ------------------------------"
183 "\nPackets sent: %24"PRIu64
184 "\nPackets received: %20"PRIu64
185 "\nPackets dropped: %21"PRIu64,
187 port_statistics[portid].tx,
188 port_statistics[portid].rx,
189 port_statistics[portid].dropped);
191 total_packets_dropped += port_statistics[portid].dropped;
192 total_packets_tx += port_statistics[portid].tx;
193 total_packets_rx += port_statistics[portid].rx;
195 printf("\nAggregate statistics ==============================="
196 "\nTotal packets sent: %18"PRIu64
197 "\nTotal packets received: %14"PRIu64
198 "\nTotal packets dropped: %15"PRIu64,
201 total_packets_dropped);
202 printf("\n====================================================\n");
206 clear_cpu_affinity(void)
210 s = sched_setaffinity(0, cpu_aff.size, &cpu_aff.set);
212 printf("sched_setaffinity failed:%s\n", strerror(errno));
220 get_cpu_affinity(void)
224 cpu_aff.size = sizeof(cpu_set_t);
225 CPU_ZERO(&cpu_aff.set);
227 s = sched_getaffinity(0, cpu_aff.size, &cpu_aff.set);
229 printf("sched_getaffinity failed:%s\n", strerror(errno));
237 * This fnciton demonstrates the approach to create ring in first instance
238 * or re-attach an existed ring in later instance.
240 static struct rte_ring *
241 create_ring(const char *name, unsigned count,
242 int socket_id,unsigned flags)
244 struct rte_ring *ring;
249 /* If already create, just attached it */
250 if (likely((ring = rte_ring_lookup(name)) != NULL))
253 /* First call it, create one */
254 return rte_ring_create(name, count, socket_id, flags);
257 /* Malloc with rte_malloc on structures that shared by master and slave */
259 l2fwd_malloc_shared_struct(void)
261 port_statistics = rte_zmalloc("port_stat",
262 sizeof(struct l2fwd_port_statistics) * RTE_MAX_ETHPORTS,
264 if (port_statistics == NULL)
267 /* allocate mapping_id array */
270 mapping_id = rte_malloc("mapping_id", sizeof(unsigned) * RTE_MAX_LCORE,
273 if (mapping_id == NULL)
276 for (i = 0 ;i < RTE_MAX_LCORE; i++)
277 mapping_id[i] = INVALID_MAPPING_ID;
282 /* Create ring which used for communicate among master and slave */
284 create_ms_ring(unsigned slaveid)
286 unsigned flag = RING_F_SP_ENQ | RING_F_SC_DEQ;
287 struct lcore_resource_struct *res = &lcore_resource[slaveid];
288 unsigned socketid = rte_socket_id();
290 /* Always assume create ring on master socket_id */
291 /* Default only create a ring size 32 */
292 snprintf(res->ring_name[0], MAX_NAME_LEN, "%s%u",
293 RING_MASTER_NAME, slaveid);
294 if ((res->ring[0] = create_ring(res->ring_name[0], NB_CORE_MSGBUF,
295 socketid, flag)) == NULL) {
296 printf("Create m2s ring %s failed\n", res->ring_name[0]);
300 snprintf(res->ring_name[1], MAX_NAME_LEN, "%s%u",
301 RING_SLAVE_NAME, slaveid);
302 if ((res->ring[1] = create_ring(res->ring_name[1], NB_CORE_MSGBUF,
303 socketid, flag)) == NULL) {
304 printf("Create s2m ring %s failed\n", res->ring_name[1]);
311 /* send command to pair in paired master and slave ring */
313 sendcmd(unsigned slaveid, enum l2fwd_cmd cmd, int is_master)
315 struct lcore_resource_struct *res = &lcore_resource[slaveid];
319 /* Only check master, it must be enabled and running if it is slave */
320 if (is_master && !res->enabled)
323 if (res->ring[fd] == NULL)
326 if (rte_mempool_get(message_pool, &msg) < 0) {
327 printf("Error to get message buffer\n");
331 *(enum l2fwd_cmd *)msg = cmd;
333 if (rte_ring_enqueue(res->ring[fd], msg) != 0) {
334 printf("Enqueue error\n");
335 rte_mempool_put(message_pool, msg);
342 /* Get command from pair in paired master and slave ring */
344 getcmd(unsigned slaveid, enum l2fwd_cmd *cmd, int is_master)
346 struct lcore_resource_struct *res = &lcore_resource[slaveid];
348 int fd = !!is_master;
350 /* Only check master, it must be enabled and running if it is slave */
351 if (is_master && (!res->enabled))
354 if (res->ring[fd] == NULL)
357 ret = rte_ring_dequeue(res->ring[fd], &msg);
360 *cmd = *(enum l2fwd_cmd *)msg;
361 rte_mempool_put(message_pool, msg);
366 /* Master send command to slave and wait until ack received or error met */
368 master_sendcmd_with_ack(unsigned slaveid, enum l2fwd_cmd cmd)
370 enum l2fwd_cmd ack_cmd;
373 if (sendcmd(slaveid, cmd, 1) != 0)
374 rte_exit(EXIT_FAILURE, "Failed to send message\n");
378 ret = getcmd(slaveid, &ack_cmd, 1);
379 if (ret == 0 && cmd == ack_cmd)
382 /* If slave not running yet, return an error */
383 if (flib_query_slave_status(slaveid) != ST_RUN) {
392 /* restart all port that assigned to that slave lcore */
394 reset_slave_all_ports(unsigned slaveid)
396 struct lcore_resource_struct *slave = &lcore_resource[slaveid];
399 /* stop/start port */
400 for (i = 0; i < slave->port_num; i++) {
401 char buf_name[RTE_MEMPOOL_NAMESIZE];
402 struct rte_mempool *pool;
403 printf("Stop port :%d\n", slave->port[i]);
404 rte_eth_dev_stop(slave->port[i]);
405 snprintf(buf_name, RTE_MEMPOOL_NAMESIZE, MBUF_NAME, slave->port[i]);
406 pool = rte_mempool_lookup(buf_name);
408 printf("Port %d mempool free object is %u(%u)\n", slave->port[i],
409 rte_mempool_avail_count(pool),
410 (unsigned int)NB_MBUF);
412 printf("Can't find mempool %s\n", buf_name);
414 printf("Start port :%d\n", slave->port[i]);
415 ret = rte_eth_dev_start(slave->port[i]);
423 reset_shared_structures(unsigned slaveid)
426 /* Only port are shared resource here */
427 ret = reset_slave_all_ports(slaveid);
433 * Call this function to re-create resource that needed for slave process that
434 * exited in last instance
437 init_slave_res(unsigned slaveid)
439 struct lcore_resource_struct *slave = &lcore_resource[slaveid];
442 if (!slave->enabled) {
443 printf("Something wrong with lcore=%u enabled=%d\n",slaveid,
448 /* Initialize ring */
449 if (create_ms_ring(slaveid) != 0)
450 rte_exit(EXIT_FAILURE, "failed to create ring for slave %u\n",
453 /* drain un-read buffer if have */
454 while (getcmd(slaveid, &cmd, 1) == 0);
455 while (getcmd(slaveid, &cmd, 0) == 0);
461 recreate_one_slave(unsigned slaveid)
464 /* Re-initialize resource for stalled slave */
465 if ((ret = init_slave_res(slaveid)) != 0) {
466 printf("Init slave=%u failed\n", slaveid);
470 if ((ret = flib_remote_launch(l2fwd_launch_one_lcore, NULL, slaveid))
472 printf("Launch slave %u failed\n", slaveid);
478 * remapping resource belong to slave_id to new lcore that gets from flib_assign_lcore_id(),
479 * used only floating process option applied.
482 * original lcore_id that apply for remapping
485 remapping_slave_resource(unsigned slaveid, unsigned map_id)
488 /* remapping lcore_resource */
489 memcpy(&lcore_resource[map_id], &lcore_resource[slaveid],
490 sizeof(struct lcore_resource_struct));
492 /* remapping lcore_queue_conf */
493 memcpy(&lcore_queue_conf[map_id], &lcore_queue_conf[slaveid],
494 sizeof(struct lcore_queue_conf));
498 reset_pair(unsigned slaveid, unsigned pairid)
501 if ((ret = reset_shared_structures(slaveid)) != 0)
504 if((ret = reset_shared_structures(pairid)) != 0)
508 unsigned map_id = mapping_id[slaveid];
510 if (map_id != INVALID_MAPPING_ID) {
511 printf("%u return mapping id %u\n", slaveid, map_id);
512 flib_free_lcore_id(map_id);
513 mapping_id[slaveid] = INVALID_MAPPING_ID;
516 map_id = mapping_id[pairid];
517 if (map_id != INVALID_MAPPING_ID) {
518 printf("%u return mapping id %u\n", pairid, map_id);
519 flib_free_lcore_id(map_id);
520 mapping_id[pairid] = INVALID_MAPPING_ID;
524 if((ret = recreate_one_slave(slaveid)) != 0)
527 ret = recreate_one_slave(pairid);
534 slave_exit_cb(unsigned slaveid, __attribute__((unused))int stat)
536 struct lcore_resource_struct *slave = &lcore_resource[slaveid];
538 printf("Get slave %u leave info\n", slaveid);
539 if (!slave->enabled) {
540 printf("Lcore=%u not registered for it's exit\n", slaveid);
543 rte_spinlock_lock(&res_lock);
545 /* Change the state and wait master to start them */
546 slave->flags = SLAVE_RECREATE_FLAG;
548 rte_spinlock_unlock(&res_lock);
552 l2fwd_simple_forward(struct rte_mbuf *m, unsigned portid)
554 struct ether_hdr *eth;
558 struct rte_eth_dev_tx_buffer *buffer;
560 dst_port = l2fwd_dst_ports[portid];
561 eth = rte_pktmbuf_mtod(m, struct ether_hdr *);
563 /* 02:00:00:00:00:xx */
564 tmp = ð->d_addr.addr_bytes[0];
565 *((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)dst_port << 40);
568 ether_addr_copy(&l2fwd_ports_eth_addr[dst_port], ð->s_addr);
570 buffer = tx_buffer[dst_port];
571 sent = rte_eth_tx_buffer(dst_port, 0, buffer, m);
573 port_statistics[dst_port].tx += sent;
576 /* main processing loop */
578 l2fwd_main_loop(void)
580 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
584 uint64_t prev_tsc, diff_tsc, cur_tsc;
585 unsigned i, j, portid, nb_rx;
586 struct lcore_queue_conf *qconf;
587 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S *
589 struct rte_eth_dev_tx_buffer *buffer;
593 lcore_id = rte_lcore_id();
595 qconf = &lcore_queue_conf[lcore_id];
597 if (qconf->n_rx_port == 0) {
598 RTE_LOG(INFO, L2FWD, "lcore %u has nothing to do\n", lcore_id);
602 RTE_LOG(INFO, L2FWD, "entering main loop on lcore %u\n", lcore_id);
604 for (i = 0; i < qconf->n_rx_port; i++) {
605 portid = qconf->rx_port_list[i];
606 RTE_LOG(INFO, L2FWD, " -- lcoreid=%u portid=%u\n", lcore_id,
612 cur_tsc = rte_rdtsc();
614 if (unlikely(getcmd(lcore_id, &cmd, 0) == 0)) {
615 sendcmd(lcore_id, cmd, 0);
617 /* If get stop command, stop forwarding and exit */
618 if (cmd == CMD_STOP) {
624 * TX burst queue drain
626 diff_tsc = cur_tsc - prev_tsc;
627 if (unlikely(diff_tsc > drain_tsc)) {
629 for (i = 0; i < qconf->n_rx_port; i++) {
631 portid = l2fwd_dst_ports[qconf->rx_port_list[i]];
632 buffer = tx_buffer[portid];
634 sent = rte_eth_tx_buffer_flush(portid, 0, buffer);
636 port_statistics[portid].tx += sent;
644 * Read packet from RX queues
646 for (i = 0; i < qconf->n_rx_port; i++) {
648 portid = qconf->rx_port_list[i];
649 nb_rx = rte_eth_rx_burst((uint8_t) portid, 0,
650 pkts_burst, MAX_PKT_BURST);
652 port_statistics[portid].rx += nb_rx;
654 for (j = 0; j < nb_rx; j++) {
656 rte_prefetch0(rte_pktmbuf_mtod(m, void *));
657 l2fwd_simple_forward(m, portid);
664 l2fwd_launch_one_lcore(__attribute__((unused)) void *dummy)
666 unsigned lcore_id = rte_lcore_id();
671 /* Change it to floating process, also change it's lcore_id */
672 clear_cpu_affinity();
673 RTE_PER_LCORE(_lcore_id) = 0;
675 if (flib_assign_lcore_id() < 0 ) {
676 printf("flib_assign_lcore_id failed\n");
679 flcore_id = rte_lcore_id();
680 /* Set mapping id, so master can return it after slave exited */
681 mapping_id[lcore_id] = flcore_id;
682 printf("Org lcore_id = %u, cur lcore_id = %u\n",
683 lcore_id, flcore_id);
684 remapping_slave_resource(lcore_id, flcore_id);
689 /* return lcore_id before return */
691 flib_free_lcore_id(rte_lcore_id());
692 mapping_id[lcore_id] = INVALID_MAPPING_ID;
699 l2fwd_usage(const char *prgname)
701 printf("%s [EAL options] -- -p PORTMASK -s COREMASK [-q NQ] -f\n"
702 " -p PORTMASK: hexadecimal bitmask of ports to configure\n"
703 " -q NQ: number of queue (=ports) per lcore (default is 1)\n"
704 " -f use floating process which won't bind to any core to run\n"
705 " -T PERIOD: statistics will be refreshed each PERIOD seconds (0 to disable, 10 default, 86400 maximum)\n",
710 l2fwd_parse_portmask(const char *portmask)
715 /* parse hexadecimal string */
716 pm = strtoul(portmask, &end, 16);
717 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
727 l2fwd_parse_nqueue(const char *q_arg)
732 /* parse hexadecimal string */
733 n = strtoul(q_arg, &end, 10);
734 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
738 if (n >= MAX_RX_QUEUE_PER_LCORE)
745 l2fwd_parse_timer_period(const char *q_arg)
750 /* parse number string */
751 n = strtol(q_arg, &end, 10);
752 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
754 if (n >= MAX_TIMER_PERIOD)
760 /* Parse the argument given in the command line of the application */
762 l2fwd_parse_args(int argc, char **argv)
767 char *prgname = argv[0];
768 static struct option lgopts[] = {
775 while ((opt = getopt_long(argc, argvopt, "p:q:T:f",
776 lgopts, &option_index)) != EOF) {
781 l2fwd_enabled_port_mask = l2fwd_parse_portmask(optarg);
782 if (l2fwd_enabled_port_mask == 0) {
783 printf("invalid portmask\n");
784 l2fwd_usage(prgname);
792 l2fwd_rx_queue_per_lcore = l2fwd_parse_nqueue(optarg);
793 if (l2fwd_rx_queue_per_lcore == 0) {
794 printf("invalid queue number\n");
795 l2fwd_usage(prgname);
802 timer_period = l2fwd_parse_timer_period(optarg) * 1000 * TIMER_MILLISECOND;
803 if (timer_period < 0) {
804 printf("invalid timer period\n");
805 l2fwd_usage(prgname);
810 /* use floating process */
817 l2fwd_usage(prgname);
821 l2fwd_usage(prgname);
827 argv[optind-1] = prgname;
830 l2fwd_usage(prgname);
834 optind = 1; /* reset getopt lib */
838 /* Check the link status of all ports in up to 9s, and print them finally */
840 check_all_ports_link_status(uint32_t port_mask)
842 #define CHECK_INTERVAL 100 /* 100ms */
843 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
845 uint8_t count, all_ports_up, print_flag = 0;
846 struct rte_eth_link link;
848 printf("\nChecking link status");
850 for (count = 0; count <= MAX_CHECK_TIME; count++) {
852 RTE_ETH_FOREACH_DEV(portid) {
853 if ((port_mask & (1 << portid)) == 0)
855 memset(&link, 0, sizeof(link));
856 rte_eth_link_get_nowait(portid, &link);
857 /* print link status if flag set */
858 if (print_flag == 1) {
859 if (link.link_status)
861 "Port%d Link Up- speed %u Mbps- %s\n",
862 portid, link.link_speed,
863 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
864 ("full-duplex") : ("half-duplex\n"));
866 printf("Port %d Link Down\n", portid);
869 /* clear all_ports_up flag if any link down */
870 if (link.link_status == ETH_LINK_DOWN) {
875 /* after finally printing all link status, get out */
879 if (all_ports_up == 0) {
882 rte_delay_ms(CHECK_INTERVAL);
885 /* set the print_flag if all ports up or timeout */
886 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
894 main(int argc, char **argv)
896 struct lcore_queue_conf *qconf;
899 uint16_t nb_ports_available = 0;
900 uint16_t portid, last_port;
901 unsigned rx_lcore_id;
902 unsigned nb_ports_in_mask = 0;
904 uint64_t prev_tsc, diff_tsc, cur_tsc, timer_tsc;
906 /* Save cpu_affinity first, restore it in case it's floating process option */
907 if (get_cpu_affinity() != 0)
908 rte_exit(EXIT_FAILURE, "get_cpu_affinity error\n");
910 /* Also tries to set cpu affinity to detect whether it will fail in child process */
911 if(clear_cpu_affinity() != 0)
912 rte_exit(EXIT_FAILURE, "clear_cpu_affinity error\n");
915 ret = rte_eal_init(argc, argv);
917 rte_exit(EXIT_FAILURE, "Invalid EAL arguments\n");
921 /* parse application arguments (after the EAL ones) */
922 ret = l2fwd_parse_args(argc, argv);
924 rte_exit(EXIT_FAILURE, "Invalid L2FWD arguments\n");
927 if (flib_init() != 0)
928 rte_exit(EXIT_FAILURE, "flib init error");
931 * Allocated structures that slave lcore would change. For those that slaves are
932 * read only, needn't use malloc to share and global or static variables is ok since
933 * slave inherit all the knowledge that master initialized.
935 if (l2fwd_malloc_shared_struct() != 0)
936 rte_exit(EXIT_FAILURE, "malloc mem failed\n");
938 /* Initialize lcore_resource structures */
939 memset(lcore_resource, 0, sizeof(lcore_resource));
940 for (i = 0; i < RTE_MAX_LCORE; i++)
941 lcore_resource[i].lcore_id = i;
943 nb_ports = rte_eth_dev_count_avail();
945 rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n");
947 /* create the mbuf pool */
948 RTE_ETH_FOREACH_DEV(portid) {
949 /* skip ports that are not enabled */
950 if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
952 char buf_name[RTE_MEMPOOL_NAMESIZE];
953 snprintf(buf_name, RTE_MEMPOOL_NAMESIZE, MBUF_NAME, portid);
954 l2fwd_pktmbuf_pool[portid] =
955 rte_pktmbuf_pool_create(buf_name, NB_MBUF, 32,
956 0, MBUF_DATA_SIZE, rte_socket_id());
957 if (l2fwd_pktmbuf_pool[portid] == NULL)
958 rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
960 printf("Create mbuf %s\n", buf_name);
963 /* reset l2fwd_dst_ports */
964 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++)
965 l2fwd_dst_ports[portid] = 0;
969 * Each logical core is assigned a dedicated TX queue on each port.
971 RTE_ETH_FOREACH_DEV(portid) {
972 /* skip ports that are not enabled */
973 if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
976 if (nb_ports_in_mask % 2) {
977 l2fwd_dst_ports[portid] = last_port;
978 l2fwd_dst_ports[last_port] = portid;
985 if (nb_ports_in_mask % 2) {
986 printf("Notice: odd number of ports in portmask.\n");
987 l2fwd_dst_ports[last_port] = last_port;
993 /* Initialize the port/queue configuration of each logical core */
994 RTE_ETH_FOREACH_DEV(portid) {
995 struct lcore_resource_struct *res;
996 /* skip ports that are not enabled */
997 if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
1000 /* get the lcore_id for this port */
1001 /* skip master lcore */
1002 while (rte_lcore_is_enabled(rx_lcore_id) == 0 ||
1003 rte_get_master_lcore() == rx_lcore_id ||
1004 lcore_queue_conf[rx_lcore_id].n_rx_port ==
1005 l2fwd_rx_queue_per_lcore) {
1008 if (rx_lcore_id >= RTE_MAX_LCORE)
1009 rte_exit(EXIT_FAILURE, "Not enough cores\n");
1012 if (qconf != &lcore_queue_conf[rx_lcore_id])
1013 /* Assigned a new logical core in the loop above. */
1014 qconf = &lcore_queue_conf[rx_lcore_id];
1016 qconf->rx_port_list[qconf->n_rx_port] = portid;
1019 /* Save the port resource info into lcore_resource strucutres */
1020 res = &lcore_resource[rx_lcore_id];
1022 res->port[res->port_num++] = portid;
1024 printf("Lcore %u: RX port %u\n", rx_lcore_id, (unsigned) portid);
1027 /* Initialise each port */
1028 RTE_ETH_FOREACH_DEV(portid) {
1029 struct rte_eth_rxconf rxq_conf;
1030 struct rte_eth_txconf txq_conf;
1031 struct rte_eth_conf local_port_conf = port_conf;
1033 /* skip ports that are not enabled */
1034 if ((l2fwd_enabled_port_mask & (1 << portid)) == 0) {
1035 printf("Skipping disabled port %u\n", (unsigned) portid);
1038 nb_ports_available++;
1040 printf("Initializing port %u... ", (unsigned) portid);
1042 rte_eth_dev_info_get(portid, &dev_info);
1043 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
1044 local_port_conf.txmode.offloads |=
1045 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1046 ret = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
1048 rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%u\n",
1049 ret, (unsigned) portid);
1051 ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
1054 rte_exit(EXIT_FAILURE,
1055 "rte_eth_dev_adjust_nb_rx_tx_desc: err=%d, port=%u\n",
1056 ret, (unsigned) portid);
1058 rte_eth_macaddr_get(portid,&l2fwd_ports_eth_addr[portid]);
1060 /* init one RX queue */
1062 rxq_conf = dev_info.default_rxconf;
1063 rxq_conf.offloads = local_port_conf.rxmode.offloads;
1064 ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd,
1065 rte_eth_dev_socket_id(portid),
1067 l2fwd_pktmbuf_pool[portid]);
1069 rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup:err=%d, port=%u\n",
1070 ret, (unsigned) portid);
1072 /* init one TX queue on each port */
1074 txq_conf = dev_info.default_txconf;
1075 txq_conf.tx_offloads = local_port_conf.txmode.offloads;
1076 ret = rte_eth_tx_queue_setup(portid, 0, nb_txd,
1077 rte_eth_dev_socket_id(portid),
1080 rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup:err=%d, port=%u\n",
1081 ret, (unsigned) portid);
1083 /* Initialize TX buffers */
1084 tx_buffer[portid] = rte_zmalloc_socket("tx_buffer",
1085 RTE_ETH_TX_BUFFER_SIZE(MAX_PKT_BURST), 0,
1086 rte_eth_dev_socket_id(portid));
1087 if (tx_buffer[portid] == NULL)
1088 rte_exit(EXIT_FAILURE, "Cannot allocate buffer for tx on port %u\n",
1091 rte_eth_tx_buffer_init(tx_buffer[portid], MAX_PKT_BURST);
1093 ret = rte_eth_tx_buffer_set_err_callback(tx_buffer[portid],
1094 rte_eth_tx_buffer_count_callback,
1095 &port_statistics[portid].dropped);
1097 rte_exit(EXIT_FAILURE, "Cannot set error callback for "
1098 "tx buffer on port %u\n", (unsigned) portid);
1101 ret = rte_eth_dev_start(portid);
1103 rte_exit(EXIT_FAILURE, "rte_eth_dev_start:err=%d, port=%u\n",
1104 ret, (unsigned) portid);
1108 rte_eth_promiscuous_enable(portid);
1110 printf("Port %u, MAC address: %02X:%02X:%02X:%02X:%02X:%02X\n\n",
1112 l2fwd_ports_eth_addr[portid].addr_bytes[0],
1113 l2fwd_ports_eth_addr[portid].addr_bytes[1],
1114 l2fwd_ports_eth_addr[portid].addr_bytes[2],
1115 l2fwd_ports_eth_addr[portid].addr_bytes[3],
1116 l2fwd_ports_eth_addr[portid].addr_bytes[4],
1117 l2fwd_ports_eth_addr[portid].addr_bytes[5]);
1119 /* initialize port stats */
1120 //memset(&port_statistics, 0, sizeof(port_statistics));
1123 if (!nb_ports_available) {
1124 rte_exit(EXIT_FAILURE,
1125 "All available ports are disabled. Please set portmask.\n");
1128 check_all_ports_link_status(l2fwd_enabled_port_mask);
1130 /* Record pair lcore */
1132 * Since l2fwd example would create pair between different neighbour port, that's
1133 * port 0 receive and forward to port 1, the same to port 1, these 2 ports will have
1134 * dependency. If one port stopped working (killed, for example), the port need to
1135 * be stopped/started again. During the time, another port need to wait until stop/start
1136 * procedure completed. So, record the pair relationship for those lcores working
1139 RTE_ETH_FOREACH_DEV(portid) {
1141 unsigned lcore = 0, pair_lcore = 0;
1142 unsigned j, find_lcore, find_pair_lcore;
1143 /* skip ports that are not enabled */
1144 if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
1147 /* Find pair ports' lcores */
1148 find_lcore = find_pair_lcore = 0;
1149 pair_port = l2fwd_dst_ports[portid];
1150 for (i = 0; i < RTE_MAX_LCORE; i++) {
1151 if (!rte_lcore_is_enabled(i))
1153 for (j = 0; j < lcore_queue_conf[i].n_rx_port;j++) {
1154 if (lcore_queue_conf[i].rx_port_list[j] == portid) {
1159 if (lcore_queue_conf[i].rx_port_list[j] == pair_port) {
1161 find_pair_lcore = 1;
1165 if (find_lcore && find_pair_lcore)
1168 if (!find_lcore || !find_pair_lcore)
1169 rte_exit(EXIT_FAILURE, "Not find port=%d pair\n", portid);
1171 printf("lcore %u and %u paired\n", lcore, pair_lcore);
1172 lcore_resource[lcore].pair_id = pair_lcore;
1173 lcore_resource[pair_lcore].pair_id = lcore;
1176 /* Create message buffer for all master and slave */
1177 message_pool = rte_mempool_create("ms_msg_pool",
1178 NB_CORE_MSGBUF * RTE_MAX_LCORE,
1179 sizeof(enum l2fwd_cmd), NB_CORE_MSGBUF / 2,
1180 0, NULL, NULL, NULL, NULL, rte_socket_id(), 0);
1182 if (message_pool == NULL)
1183 rte_exit(EXIT_FAILURE, "Create msg mempool failed\n");
1185 /* Create ring for each master and slave pair, also register cb when slave leaves */
1186 for (i = 0; i < RTE_MAX_LCORE; i++) {
1188 * Only create ring and register slave_exit cb in case that core involved into
1191 if (lcore_resource[i].enabled) {
1192 /* Create ring for master and slave communication */
1193 ret = create_ms_ring(i);
1195 rte_exit(EXIT_FAILURE, "Create ring for lcore=%u failed",
1198 if (flib_register_slave_exit_notify(i,
1199 slave_exit_cb) != 0)
1200 rte_exit(EXIT_FAILURE,
1201 "Register master_trace_slave_exit failed");
1205 /* launch per-lcore init on every lcore except master */
1206 flib_mp_remote_launch(l2fwd_launch_one_lcore, NULL, SKIP_MASTER);
1208 /* print statistics 10 second */
1209 prev_tsc = cur_tsc = rte_rdtsc();
1213 cur_tsc = rte_rdtsc();
1214 diff_tsc = cur_tsc - prev_tsc;
1215 /* if timer is enabled */
1216 if (timer_period > 0) {
1218 /* advance the timer */
1219 timer_tsc += diff_tsc;
1221 /* if timer has reached its timeout */
1222 if (unlikely(timer_tsc >= (uint64_t) timer_period)) {
1225 /* reset the timer */
1232 /* Check any slave need restart or recreate */
1233 rte_spinlock_lock(&res_lock);
1234 for (i = 0; i < RTE_MAX_LCORE; i++) {
1235 struct lcore_resource_struct *res = &lcore_resource[i];
1236 struct lcore_resource_struct *pair = &lcore_resource[res->pair_id];
1238 /* If find slave exited, try to reset pair */
1239 if (res->enabled && res->flags && pair->enabled) {
1241 master_sendcmd_with_ack(pair->lcore_id, CMD_STOP);
1242 rte_spinlock_unlock(&res_lock);
1244 rte_spinlock_lock(&res_lock);
1248 if (reset_pair(res->lcore_id, pair->lcore_id) != 0)
1249 rte_exit(EXIT_FAILURE, "failed to reset slave");
1254 rte_spinlock_unlock(&res_lock);