4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/types.h>
42 #include <sys/queue.h>
43 #include <netinet/in.h>
50 #include <rte_common.h>
52 #include <rte_memory.h>
53 #include <rte_memcpy.h>
54 #include <rte_memzone.h>
55 #include <rte_tailq.h>
57 #include <rte_per_lcore.h>
58 #include <rte_launch.h>
59 #include <rte_atomic.h>
60 #include <rte_spinlock.h>
61 #include <rte_cycles.h>
62 #include <rte_prefetch.h>
63 #include <rte_lcore.h>
64 #include <rte_per_lcore.h>
65 #include <rte_branch_prediction.h>
66 #include <rte_interrupts.h>
68 #include <rte_random.h>
69 #include <rte_debug.h>
70 #include <rte_ether.h>
71 #include <rte_ethdev.h>
73 #include <rte_mempool.h>
75 #include <rte_malloc.h>
80 #define RTE_LOGTYPE_L2FWD RTE_LOGTYPE_USER1
81 #define MBUF_NAME "mbuf_pool_%d"
82 #define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
84 #define RING_MASTER_NAME "l2fwd_ring_m2s_"
85 #define RING_SLAVE_NAME "l2fwd_ring_s2m_"
86 #define MAX_NAME_LEN 32
87 /* RECREATE flag indicate needs initialize resource and launch slave_core again */
88 #define SLAVE_RECREATE_FLAG 0x1
89 /* RESTART flag indicate needs restart port and send START command again */
90 #define SLAVE_RESTART_FLAG 0x2
91 #define INVALID_MAPPING_ID ((unsigned)LCORE_ID_ANY)
92 /* Maximum message buffer per slave */
93 #define NB_CORE_MSGBUF 32
99 #define MAX_PKT_BURST 32
100 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
103 * Configurable number of RX/TX ring descriptors
105 #define RTE_TEST_RX_DESC_DEFAULT 128
106 #define RTE_TEST_TX_DESC_DEFAULT 512
107 static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
108 static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
110 /* ethernet addresses of ports */
111 static struct ether_addr l2fwd_ports_eth_addr[RTE_MAX_ETHPORTS];
113 /* mask of enabled ports */
114 static uint32_t l2fwd_enabled_port_mask = 0;
116 /* list of enabled ports */
117 static uint32_t l2fwd_dst_ports[RTE_MAX_ETHPORTS];
119 static unsigned int l2fwd_rx_queue_per_lcore = 1;
123 struct rte_mbuf *m_table[MAX_PKT_BURST];
126 #define MAX_RX_QUEUE_PER_LCORE 16
127 #define MAX_TX_QUEUE_PER_PORT 16
128 struct lcore_queue_conf {
130 unsigned rx_port_list[MAX_RX_QUEUE_PER_LCORE];
131 struct mbuf_table tx_mbufs[RTE_MAX_ETHPORTS];
133 } __rte_cache_aligned;
134 struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
136 struct lcore_resource_struct {
137 int enabled; /* Only set in case this lcore involved into packet forwarding */
138 int flags; /* Set only slave need to restart or recreate */
139 unsigned lcore_id; /* lcore ID */
140 unsigned pair_id; /* dependency lcore ID on port */
141 char ring_name[2][MAX_NAME_LEN];
142 /* ring[0] for master send cmd, slave read */
143 /* ring[1] for slave send ack, master read */
144 struct rte_ring *ring[2];
145 int port_num; /* Total port numbers */
146 uint8_t port[RTE_MAX_ETHPORTS]; /* Port id for that lcore to receive packets */
147 }__attribute__((packed)) __rte_cache_aligned;
149 static struct lcore_resource_struct lcore_resource[RTE_MAX_LCORE];
150 static struct rte_mempool *message_pool;
151 static rte_spinlock_t res_lock = RTE_SPINLOCK_INITIALIZER;
152 /* use floating processes */
153 static int float_proc = 0;
154 /* Save original cpu affinity */
160 static const struct rte_eth_conf port_conf = {
163 .header_split = 0, /**< Header Split disabled */
164 .hw_ip_checksum = 0, /**< IP checksum offload disabled */
165 .hw_vlan_filter = 0, /**< VLAN filtering disabled */
166 .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
167 .hw_strip_crc = 0, /**< CRC stripped by hardware */
170 .mq_mode = ETH_MQ_TX_NONE,
174 static struct rte_mempool * l2fwd_pktmbuf_pool[RTE_MAX_ETHPORTS];
176 /* Per-port statistics struct */
177 struct l2fwd_port_statistics {
181 } __rte_cache_aligned;
182 struct l2fwd_port_statistics *port_statistics;
184 * pointer to lcore ID mapping array, used to return lcore id in case slave
185 * process exited unexpectedly, use only floating process option applied
187 unsigned *mapping_id;
189 /* A tsc-based timer responsible for triggering statistics printout */
190 #define TIMER_MILLISECOND 2000000ULL /* around 1ms at 2 Ghz */
191 #define MAX_TIMER_PERIOD 86400 /* 1 day max */
192 static int64_t timer_period = 10 * TIMER_MILLISECOND * 1000; /* default period is 10 seconds */
194 static int l2fwd_launch_one_lcore(void *dummy);
196 /* Print out statistics on packets dropped */
200 uint64_t total_packets_dropped, total_packets_tx, total_packets_rx;
203 total_packets_dropped = 0;
204 total_packets_tx = 0;
205 total_packets_rx = 0;
207 const char clr[] = { 27, '[', '2', 'J', '\0' };
208 const char topLeft[] = { 27, '[', '1', ';', '1', 'H','\0' };
210 /* Clear screen and move to top left */
211 printf("%s%s", clr, topLeft);
213 printf("\nPort statistics ====================================");
215 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
216 /* skip disabled ports */
217 if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
219 printf("\nStatistics for port %u ------------------------------"
220 "\nPackets sent: %24"PRIu64
221 "\nPackets received: %20"PRIu64
222 "\nPackets dropped: %21"PRIu64,
224 port_statistics[portid].tx,
225 port_statistics[portid].rx,
226 port_statistics[portid].dropped);
228 total_packets_dropped += port_statistics[portid].dropped;
229 total_packets_tx += port_statistics[portid].tx;
230 total_packets_rx += port_statistics[portid].rx;
232 printf("\nAggregate statistics ==============================="
233 "\nTotal packets sent: %18"PRIu64
234 "\nTotal packets received: %14"PRIu64
235 "\nTotal packets dropped: %15"PRIu64,
238 total_packets_dropped);
239 printf("\n====================================================\n");
243 clear_cpu_affinity(void)
247 s = sched_setaffinity(0, cpu_aff.size, &cpu_aff.set);
249 printf("sched_setaffinity failed:%s\n", strerror(errno));
257 get_cpu_affinity(void)
261 cpu_aff.size = sizeof(cpu_set_t);
262 CPU_ZERO(&cpu_aff.set);
264 s = sched_getaffinity(0, cpu_aff.size, &cpu_aff.set);
266 printf("sched_getaffinity failed:%s\n", strerror(errno));
274 * This fnciton demonstrates the approach to create ring in first instance
275 * or re-attach an existed ring in later instance.
277 static struct rte_ring *
278 create_ring(const char *name, unsigned count,
279 int socket_id,unsigned flags)
281 struct rte_ring *ring;
286 /* If already create, just attached it */
287 if (likely((ring = rte_ring_lookup(name)) != NULL))
290 /* First call it, create one */
291 return rte_ring_create(name, count, socket_id, flags);
294 /* Malloc with rte_malloc on structures that shared by master and slave */
296 l2fwd_malloc_shared_struct(void)
298 port_statistics = rte_zmalloc("port_stat",
299 sizeof(struct l2fwd_port_statistics) * RTE_MAX_ETHPORTS,
301 if (port_statistics == NULL)
304 /* allocate mapping_id array */
307 mapping_id = rte_malloc("mapping_id", sizeof(unsigned) * RTE_MAX_LCORE,
310 if (mapping_id == NULL)
313 for (i = 0 ;i < RTE_MAX_LCORE; i++)
314 mapping_id[i] = INVALID_MAPPING_ID;
319 /* Create ring which used for communicate among master and slave */
321 create_ms_ring(unsigned slaveid)
323 unsigned flag = RING_F_SP_ENQ | RING_F_SC_DEQ;
324 struct lcore_resource_struct *res = &lcore_resource[slaveid];
325 unsigned socketid = rte_socket_id();
327 /* Always assume create ring on master socket_id */
328 /* Default only create a ring size 32 */
329 snprintf(res->ring_name[0], MAX_NAME_LEN, "%s%u",
330 RING_MASTER_NAME, slaveid);
331 if ((res->ring[0] = create_ring(res->ring_name[0], NB_CORE_MSGBUF,
332 socketid, flag)) == NULL) {
333 printf("Create m2s ring %s failed\n", res->ring_name[0]);
337 snprintf(res->ring_name[1], MAX_NAME_LEN, "%s%u",
338 RING_SLAVE_NAME, slaveid);
339 if ((res->ring[1] = create_ring(res->ring_name[1], NB_CORE_MSGBUF,
340 socketid, flag)) == NULL) {
341 printf("Create s2m ring %s failed\n", res->ring_name[1]);
348 /* send command to pair in paired master and slave ring */
350 sendcmd(unsigned slaveid, enum l2fwd_cmd cmd, int is_master)
352 struct lcore_resource_struct *res = &lcore_resource[slaveid];
356 /* Only check master, it must be enabled and running if it is slave */
357 if (is_master && !res->enabled)
360 if (res->ring[fd] == NULL)
363 if (rte_mempool_get(message_pool, &msg) < 0) {
364 printf("Error to get message buffer\n");
368 *(enum l2fwd_cmd *)msg = cmd;
370 if (rte_ring_enqueue(res->ring[fd], msg) != 0) {
371 printf("Enqueue error\n");
372 rte_mempool_put(message_pool, msg);
379 /* Get command from pair in paired master and slave ring */
381 getcmd(unsigned slaveid, enum l2fwd_cmd *cmd, int is_master)
383 struct lcore_resource_struct *res = &lcore_resource[slaveid];
385 int fd = !!is_master;
387 /* Only check master, it must be enabled and running if it is slave */
388 if (is_master && (!res->enabled))
391 if (res->ring[fd] == NULL)
394 ret = rte_ring_dequeue(res->ring[fd], &msg);
397 *cmd = *(enum l2fwd_cmd *)msg;
398 rte_mempool_put(message_pool, msg);
403 /* Master send command to slave and wait until ack received or error met */
405 master_sendcmd_with_ack(unsigned slaveid, enum l2fwd_cmd cmd)
407 enum l2fwd_cmd ack_cmd;
410 if (sendcmd(slaveid, cmd, 1) != 0)
411 rte_exit(EXIT_FAILURE, "Failed to send message\n");
415 ret = getcmd(slaveid, &ack_cmd, 1);
416 if (ret == 0 && cmd == ack_cmd)
419 /* If slave not running yet, return an error */
420 if (flib_query_slave_status(slaveid) != ST_RUN) {
429 /* restart all port that assigned to that slave lcore */
431 reset_slave_all_ports(unsigned slaveid)
433 struct lcore_resource_struct *slave = &lcore_resource[slaveid];
436 /* stop/start port */
437 for (i = 0; i < slave->port_num; i++) {
438 char buf_name[RTE_MEMPOOL_NAMESIZE];
439 struct rte_mempool *pool;
440 printf("Stop port :%d\n", slave->port[i]);
441 rte_eth_dev_stop(slave->port[i]);
442 snprintf(buf_name, RTE_MEMPOOL_NAMESIZE, MBUF_NAME, slave->port[i]);
443 pool = rte_mempool_lookup(buf_name);
445 printf("Port %d mempool free object is %u(%u)\n", slave->port[i],
446 rte_mempool_count(pool), (unsigned)NB_MBUF);
448 printf("Can't find mempool %s\n", buf_name);
450 printf("Start port :%d\n", slave->port[i]);
451 ret = rte_eth_dev_start(slave->port[i]);
459 reset_shared_structures(unsigned slaveid)
462 /* Only port are shared resource here */
463 ret = reset_slave_all_ports(slaveid);
469 * Call this function to re-create resource that needed for slave process that
470 * exited in last instance
473 init_slave_res(unsigned slaveid)
475 struct lcore_resource_struct *slave = &lcore_resource[slaveid];
478 if (!slave->enabled) {
479 printf("Something wrong with lcore=%u enabled=%d\n",slaveid,
484 /* Initialize ring */
485 if (create_ms_ring(slaveid) != 0)
486 rte_exit(EXIT_FAILURE, "failed to create ring for slave %u\n",
489 /* drain un-read buffer if have */
490 while (getcmd(slaveid, &cmd, 1) == 0);
491 while (getcmd(slaveid, &cmd, 0) == 0);
497 recreate_one_slave(unsigned slaveid)
500 /* Re-initialize resource for stalled slave */
501 if ((ret = init_slave_res(slaveid)) != 0) {
502 printf("Init slave=%u failed\n", slaveid);
506 if ((ret = flib_remote_launch(l2fwd_launch_one_lcore, NULL, slaveid))
508 printf("Launch slave %u failed\n", slaveid);
514 * remapping resource belong to slave_id to new lcore that gets from flib_assign_lcore_id(),
515 * used only floating process option applied.
518 * original lcore_id that apply for remapping
521 remapping_slave_resource(unsigned slaveid, unsigned map_id)
524 /* remapping lcore_resource */
525 memcpy(&lcore_resource[map_id], &lcore_resource[slaveid],
526 sizeof(struct lcore_resource_struct));
528 /* remapping lcore_queue_conf */
529 memcpy(&lcore_queue_conf[map_id], &lcore_queue_conf[slaveid],
530 sizeof(struct lcore_queue_conf));
534 reset_pair(unsigned slaveid, unsigned pairid)
537 if ((ret = reset_shared_structures(slaveid)) != 0)
540 if((ret = reset_shared_structures(pairid)) != 0)
544 unsigned map_id = mapping_id[slaveid];
546 if (map_id != INVALID_MAPPING_ID) {
547 printf("%u return mapping id %u\n", slaveid, map_id);
548 flib_free_lcore_id(map_id);
549 mapping_id[slaveid] = INVALID_MAPPING_ID;
552 map_id = mapping_id[pairid];
553 if (map_id != INVALID_MAPPING_ID) {
554 printf("%u return mapping id %u\n", pairid, map_id);
555 flib_free_lcore_id(map_id);
556 mapping_id[pairid] = INVALID_MAPPING_ID;
560 if((ret = recreate_one_slave(slaveid)) != 0)
563 ret = recreate_one_slave(pairid);
570 slave_exit_cb(unsigned slaveid, __attribute__((unused))int stat)
572 struct lcore_resource_struct *slave = &lcore_resource[slaveid];
574 printf("Get slave %u leave info\n", slaveid);
575 if (!slave->enabled) {
576 printf("Lcore=%u not registered for it's exit\n", slaveid);
579 rte_spinlock_lock(&res_lock);
581 /* Change the state and wait master to start them */
582 slave->flags = SLAVE_RECREATE_FLAG;
584 rte_spinlock_unlock(&res_lock);
587 /* Send the packet on an output interface */
589 l2fwd_send_burst(struct lcore_queue_conf *qconf, unsigned n, uint8_t port)
591 struct rte_mbuf **m_table;
595 m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
597 ret = rte_eth_tx_burst(port, (uint16_t) queueid, m_table, (uint16_t) n);
598 port_statistics[port].tx += ret;
599 if (unlikely(ret < n)) {
600 port_statistics[port].dropped += (n - ret);
602 rte_pktmbuf_free(m_table[ret]);
609 /* Send the packet on an output interface */
611 l2fwd_send_packet(struct rte_mbuf *m, uint8_t port)
613 unsigned lcore_id, len;
614 struct lcore_queue_conf *qconf;
616 lcore_id = rte_lcore_id();
618 qconf = &lcore_queue_conf[lcore_id];
619 len = qconf->tx_mbufs[port].len;
620 qconf->tx_mbufs[port].m_table[len] = m;
623 /* enough pkts to be sent */
624 if (unlikely(len == MAX_PKT_BURST)) {
625 l2fwd_send_burst(qconf, MAX_PKT_BURST, port);
629 qconf->tx_mbufs[port].len = len;
634 l2fwd_simple_forward(struct rte_mbuf *m, unsigned portid)
636 struct ether_hdr *eth;
640 dst_port = l2fwd_dst_ports[portid];
641 eth = rte_pktmbuf_mtod(m, struct ether_hdr *);
643 /* 02:00:00:00:00:xx */
644 tmp = ð->d_addr.addr_bytes[0];
645 *((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)dst_port << 40);
648 ether_addr_copy(&l2fwd_ports_eth_addr[dst_port], ð->s_addr);
650 l2fwd_send_packet(m, (uint8_t) dst_port);
653 /* main processing loop */
655 l2fwd_main_loop(void)
657 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
660 uint64_t prev_tsc, diff_tsc, cur_tsc;
661 unsigned i, j, portid, nb_rx;
662 struct lcore_queue_conf *qconf;
663 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
667 lcore_id = rte_lcore_id();
669 qconf = &lcore_queue_conf[lcore_id];
671 if (qconf->n_rx_port == 0) {
672 RTE_LOG(INFO, L2FWD, "lcore %u has nothing to do\n", lcore_id);
676 RTE_LOG(INFO, L2FWD, "entering main loop on lcore %u\n", lcore_id);
678 for (i = 0; i < qconf->n_rx_port; i++) {
679 portid = qconf->rx_port_list[i];
680 RTE_LOG(INFO, L2FWD, " -- lcoreid=%u portid=%u\n", lcore_id,
686 cur_tsc = rte_rdtsc();
688 if (unlikely(getcmd(lcore_id, &cmd, 0) == 0)) {
689 sendcmd(lcore_id, cmd, 0);
691 /* If get stop command, stop forwarding and exit */
692 if (cmd == CMD_STOP) {
698 * TX burst queue drain
700 diff_tsc = cur_tsc - prev_tsc;
701 if (unlikely(diff_tsc > drain_tsc)) {
703 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
704 if (qconf->tx_mbufs[portid].len == 0)
706 l2fwd_send_burst(&lcore_queue_conf[lcore_id],
707 qconf->tx_mbufs[portid].len,
709 qconf->tx_mbufs[portid].len = 0;
714 * Read packet from RX queues
716 for (i = 0; i < qconf->n_rx_port; i++) {
718 portid = qconf->rx_port_list[i];
719 nb_rx = rte_eth_rx_burst((uint8_t) portid, 0,
720 pkts_burst, MAX_PKT_BURST);
722 port_statistics[portid].rx += nb_rx;
724 for (j = 0; j < nb_rx; j++) {
726 rte_prefetch0(rte_pktmbuf_mtod(m, void *));
727 l2fwd_simple_forward(m, portid);
734 l2fwd_launch_one_lcore(__attribute__((unused)) void *dummy)
736 unsigned lcore_id = rte_lcore_id();
741 /* Change it to floating process, also change it's lcore_id */
742 clear_cpu_affinity();
743 RTE_PER_LCORE(_lcore_id) = 0;
745 if (flib_assign_lcore_id() < 0 ) {
746 printf("flib_assign_lcore_id failed\n");
749 flcore_id = rte_lcore_id();
750 /* Set mapping id, so master can return it after slave exited */
751 mapping_id[lcore_id] = flcore_id;
752 printf("Org lcore_id = %u, cur lcore_id = %u\n",
753 lcore_id, flcore_id);
754 remapping_slave_resource(lcore_id, flcore_id);
759 /* return lcore_id before return */
761 flib_free_lcore_id(rte_lcore_id());
762 mapping_id[lcore_id] = INVALID_MAPPING_ID;
769 l2fwd_usage(const char *prgname)
771 printf("%s [EAL options] -- -p PORTMASK -s COREMASK [-q NQ] -f\n"
772 " -p PORTMASK: hexadecimal bitmask of ports to configure\n"
773 " -q NQ: number of queue (=ports) per lcore (default is 1)\n"
774 " -f use floating process which won't bind to any core to run\n"
775 " -T PERIOD: statistics will be refreshed each PERIOD seconds (0 to disable, 10 default, 86400 maximum)\n",
780 l2fwd_parse_portmask(const char *portmask)
785 /* parse hexadecimal string */
786 pm = strtoul(portmask, &end, 16);
787 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
797 l2fwd_parse_nqueue(const char *q_arg)
802 /* parse hexadecimal string */
803 n = strtoul(q_arg, &end, 10);
804 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
808 if (n >= MAX_RX_QUEUE_PER_LCORE)
815 l2fwd_parse_timer_period(const char *q_arg)
820 /* parse number string */
821 n = strtol(q_arg, &end, 10);
822 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
824 if (n >= MAX_TIMER_PERIOD)
830 /* Parse the argument given in the command line of the application */
832 l2fwd_parse_args(int argc, char **argv)
837 char *prgname = argv[0];
838 static struct option lgopts[] = {
845 while ((opt = getopt_long(argc, argvopt, "p:q:T:f",
846 lgopts, &option_index)) != EOF) {
851 l2fwd_enabled_port_mask = l2fwd_parse_portmask(optarg);
852 if (l2fwd_enabled_port_mask == 0) {
853 printf("invalid portmask\n");
854 l2fwd_usage(prgname);
862 l2fwd_rx_queue_per_lcore = l2fwd_parse_nqueue(optarg);
863 if (l2fwd_rx_queue_per_lcore == 0) {
864 printf("invalid queue number\n");
865 l2fwd_usage(prgname);
872 timer_period = l2fwd_parse_timer_period(optarg) * 1000 * TIMER_MILLISECOND;
873 if (timer_period < 0) {
874 printf("invalid timer period\n");
875 l2fwd_usage(prgname);
880 /* use floating process */
887 l2fwd_usage(prgname);
891 l2fwd_usage(prgname);
897 argv[optind-1] = prgname;
900 l2fwd_usage(prgname);
904 optind = 0; /* reset getopt lib */
908 /* Check the link status of all ports in up to 9s, and print them finally */
910 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
912 #define CHECK_INTERVAL 100 /* 100ms */
913 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
914 uint8_t portid, count, all_ports_up, print_flag = 0;
915 struct rte_eth_link link;
917 printf("\nChecking link status");
919 for (count = 0; count <= MAX_CHECK_TIME; count++) {
921 for (portid = 0; portid < port_num; portid++) {
922 if ((port_mask & (1 << portid)) == 0)
924 memset(&link, 0, sizeof(link));
925 rte_eth_link_get_nowait(portid, &link);
926 /* print link status if flag set */
927 if (print_flag == 1) {
928 if (link.link_status)
929 printf("Port %d Link Up - speed %u "
930 "Mbps - %s\n", (uint8_t)portid,
931 (unsigned)link.link_speed,
932 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
933 ("full-duplex") : ("half-duplex\n"));
935 printf("Port %d Link Down\n",
939 /* clear all_ports_up flag if any link down */
940 if (link.link_status == 0) {
945 /* after finally printing all link status, get out */
949 if (all_ports_up == 0) {
952 rte_delay_ms(CHECK_INTERVAL);
955 /* set the print_flag if all ports up or timeout */
956 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
964 MAIN(int argc, char **argv)
966 struct lcore_queue_conf *qconf;
967 struct rte_eth_dev_info dev_info;
970 uint8_t nb_ports_available;
971 uint8_t portid, last_port;
972 unsigned rx_lcore_id;
973 unsigned nb_ports_in_mask = 0;
976 uint64_t prev_tsc, diff_tsc, cur_tsc, timer_tsc;
978 /* Save cpu_affinity first, restore it in case it's floating process option */
979 if (get_cpu_affinity() != 0)
980 rte_exit(EXIT_FAILURE, "get_cpu_affinity error\n");
982 /* Also tries to set cpu affinity to detect whether it will fail in child process */
983 if(clear_cpu_affinity() != 0)
984 rte_exit(EXIT_FAILURE, "clear_cpu_affinity error\n");
987 ret = rte_eal_init(argc, argv);
989 rte_exit(EXIT_FAILURE, "Invalid EAL arguments\n");
993 /* parse application arguments (after the EAL ones) */
994 ret = l2fwd_parse_args(argc, argv);
996 rte_exit(EXIT_FAILURE, "Invalid L2FWD arguments\n");
999 if (flib_init() != 0)
1000 rte_exit(EXIT_FAILURE, "flib init error");
1003 * Allocated structures that slave lcore would change. For those that slaves are
1004 * read only, needn't use malloc to share and global or static variables is ok since
1005 * slave inherit all the knowledge that master initialized.
1007 if (l2fwd_malloc_shared_struct() != 0)
1008 rte_exit(EXIT_FAILURE, "malloc mem failed\n");
1010 /* Initialize lcore_resource structures */
1011 memset(lcore_resource, 0, sizeof(lcore_resource));
1012 for (i = 0; i < RTE_MAX_LCORE; i++)
1013 lcore_resource[i].lcore_id = i;
1015 nb_ports = rte_eth_dev_count();
1017 rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n");
1019 if (nb_ports > RTE_MAX_ETHPORTS)
1020 nb_ports = RTE_MAX_ETHPORTS;
1022 /* create the mbuf pool */
1023 for (portid = 0; portid < nb_ports; portid++) {
1024 /* skip ports that are not enabled */
1025 if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
1027 char buf_name[RTE_MEMPOOL_NAMESIZE];
1028 flags = MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET;
1029 snprintf(buf_name, RTE_MEMPOOL_NAMESIZE, MBUF_NAME, portid);
1030 l2fwd_pktmbuf_pool[portid] =
1031 rte_mempool_create(buf_name, NB_MBUF,
1033 sizeof(struct rte_pktmbuf_pool_private),
1034 rte_pktmbuf_pool_init, NULL,
1035 rte_pktmbuf_init, NULL,
1036 rte_socket_id(), flags);
1037 if (l2fwd_pktmbuf_pool[portid] == NULL)
1038 rte_exit(EXIT_FAILURE, "Cannot init mbuf pool\n");
1040 printf("Create mbuf %s\n", buf_name);
1043 /* reset l2fwd_dst_ports */
1044 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++)
1045 l2fwd_dst_ports[portid] = 0;
1049 * Each logical core is assigned a dedicated TX queue on each port.
1051 for (portid = 0; portid < nb_ports; portid++) {
1052 /* skip ports that are not enabled */
1053 if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
1056 if (nb_ports_in_mask % 2) {
1057 l2fwd_dst_ports[portid] = last_port;
1058 l2fwd_dst_ports[last_port] = portid;
1065 rte_eth_dev_info_get(portid, &dev_info);
1067 if (nb_ports_in_mask % 2) {
1068 printf("Notice: odd number of ports in portmask.\n");
1069 l2fwd_dst_ports[last_port] = last_port;
1075 /* Initialize the port/queue configuration of each logical core */
1076 for (portid = 0; portid < nb_ports; portid++) {
1077 struct lcore_resource_struct *res;
1078 /* skip ports that are not enabled */
1079 if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
1082 /* get the lcore_id for this port */
1083 /* skip master lcore */
1084 while (rte_lcore_is_enabled(rx_lcore_id) == 0 ||
1085 rte_get_master_lcore() == rx_lcore_id ||
1086 lcore_queue_conf[rx_lcore_id].n_rx_port ==
1087 l2fwd_rx_queue_per_lcore) {
1090 if (rx_lcore_id >= RTE_MAX_LCORE)
1091 rte_exit(EXIT_FAILURE, "Not enough cores\n");
1094 if (qconf != &lcore_queue_conf[rx_lcore_id])
1095 /* Assigned a new logical core in the loop above. */
1096 qconf = &lcore_queue_conf[rx_lcore_id];
1098 qconf->rx_port_list[qconf->n_rx_port] = portid;
1101 /* Save the port resource info into lcore_resource strucutres */
1102 res = &lcore_resource[rx_lcore_id];
1104 res->port[res->port_num++] = portid;
1106 printf("Lcore %u: RX port %u\n", rx_lcore_id, (unsigned) portid);
1109 nb_ports_available = nb_ports;
1111 /* Initialise each port */
1112 for (portid = 0; portid < nb_ports; portid++) {
1113 /* skip ports that are not enabled */
1114 if ((l2fwd_enabled_port_mask & (1 << portid)) == 0) {
1115 printf("Skipping disabled port %u\n", (unsigned) portid);
1116 nb_ports_available--;
1120 printf("Initializing port %u... ", (unsigned) portid);
1122 ret = rte_eth_dev_configure(portid, 1, 1, &port_conf);
1124 rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%u\n",
1125 ret, (unsigned) portid);
1127 rte_eth_macaddr_get(portid,&l2fwd_ports_eth_addr[portid]);
1129 /* init one RX queue */
1131 ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd,
1132 rte_eth_dev_socket_id(portid),
1134 l2fwd_pktmbuf_pool[portid]);
1136 rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup:err=%d, port=%u\n",
1137 ret, (unsigned) portid);
1139 /* init one TX queue on each port */
1141 ret = rte_eth_tx_queue_setup(portid, 0, nb_txd,
1142 rte_eth_dev_socket_id(portid),
1145 rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup:err=%d, port=%u\n",
1146 ret, (unsigned) portid);
1149 ret = rte_eth_dev_start(portid);
1151 rte_exit(EXIT_FAILURE, "rte_eth_dev_start:err=%d, port=%u\n",
1152 ret, (unsigned) portid);
1156 rte_eth_promiscuous_enable(portid);
1158 printf("Port %u, MAC address: %02X:%02X:%02X:%02X:%02X:%02X\n\n",
1160 l2fwd_ports_eth_addr[portid].addr_bytes[0],
1161 l2fwd_ports_eth_addr[portid].addr_bytes[1],
1162 l2fwd_ports_eth_addr[portid].addr_bytes[2],
1163 l2fwd_ports_eth_addr[portid].addr_bytes[3],
1164 l2fwd_ports_eth_addr[portid].addr_bytes[4],
1165 l2fwd_ports_eth_addr[portid].addr_bytes[5]);
1167 /* initialize port stats */
1168 //memset(&port_statistics, 0, sizeof(port_statistics));
1171 if (!nb_ports_available) {
1172 rte_exit(EXIT_FAILURE,
1173 "All available ports are disabled. Please set portmask.\n");
1176 check_all_ports_link_status(nb_ports, l2fwd_enabled_port_mask);
1178 /* Record pair lcore */
1180 * Since l2fwd example would create pair between different neighbour port, that's
1181 * port 0 receive and forward to port 1, the same to port 1, these 2 ports will have
1182 * dependency. If one port stopped working (killed, for example), the port need to
1183 * be stopped/started again. During the time, another port need to wait until stop/start
1184 * procedure completed. So, record the pair relationship for those lcores working
1187 for (portid = 0; portid < nb_ports; portid++) {
1189 unsigned lcore = 0, pair_lcore = 0;
1190 unsigned j, find_lcore, find_pair_lcore;
1191 /* skip ports that are not enabled */
1192 if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
1195 /* Find pair ports' lcores */
1196 find_lcore = find_pair_lcore = 0;
1197 pair_port = l2fwd_dst_ports[portid];
1198 for (i = 0; i < RTE_MAX_LCORE; i++) {
1199 if (!rte_lcore_is_enabled(i))
1201 for (j = 0; j < lcore_queue_conf[i].n_rx_port;j++) {
1202 if (lcore_queue_conf[i].rx_port_list[j] == portid) {
1207 if (lcore_queue_conf[i].rx_port_list[j] == pair_port) {
1209 find_pair_lcore = 1;
1213 if (find_lcore && find_pair_lcore)
1216 if (!find_lcore || !find_pair_lcore)
1217 rte_exit(EXIT_FAILURE, "Not find port=%d pair\n", portid);
1219 printf("lcore %u and %u paired\n", lcore, pair_lcore);
1220 lcore_resource[lcore].pair_id = pair_lcore;
1221 lcore_resource[pair_lcore].pair_id = lcore;
1224 /* Create message buffer for all master and slave */
1225 message_pool = rte_mempool_create("ms_msg_pool",
1226 NB_CORE_MSGBUF * RTE_MAX_LCORE,
1227 sizeof(enum l2fwd_cmd), NB_CORE_MSGBUF / 2,
1229 rte_pktmbuf_pool_init, NULL,
1230 rte_pktmbuf_init, NULL,
1231 rte_socket_id(), 0);
1233 if (message_pool == NULL)
1234 rte_exit(EXIT_FAILURE, "Create msg mempool failed\n");
1236 /* Create ring for each master and slave pair, also register cb when slave leaves */
1237 for (i = 0; i < RTE_MAX_LCORE; i++) {
1239 * Only create ring and register slave_exit cb in case that core involved into
1242 if (lcore_resource[i].enabled) {
1243 /* Create ring for master and slave communication */
1244 ret = create_ms_ring(i);
1246 rte_exit(EXIT_FAILURE, "Create ring for lcore=%u failed",
1249 if (flib_register_slave_exit_notify(i,
1250 slave_exit_cb) != 0)
1251 rte_exit(EXIT_FAILURE,
1252 "Register master_trace_slave_exit failed");
1256 /* launch per-lcore init on every lcore except master */
1257 flib_mp_remote_launch(l2fwd_launch_one_lcore, NULL, SKIP_MASTER);
1259 /* print statistics 10 second */
1260 prev_tsc = cur_tsc = rte_rdtsc();
1264 cur_tsc = rte_rdtsc();
1265 diff_tsc = cur_tsc - prev_tsc;
1266 /* if timer is enabled */
1267 if (timer_period > 0) {
1269 /* advance the timer */
1270 timer_tsc += diff_tsc;
1272 /* if timer has reached its timeout */
1273 if (unlikely(timer_tsc >= (uint64_t) timer_period)) {
1276 /* reset the timer */
1283 /* Check any slave need restart or recreate */
1284 rte_spinlock_lock(&res_lock);
1285 for (i = 0; i < RTE_MAX_LCORE; i++) {
1286 struct lcore_resource_struct *res = &lcore_resource[i];
1287 struct lcore_resource_struct *pair = &lcore_resource[res->pair_id];
1289 /* If find slave exited, try to reset pair */
1290 if (res->enabled && res->flags && pair->enabled) {
1292 master_sendcmd_with_ack(pair->lcore_id, CMD_STOP);
1293 rte_spinlock_unlock(&res_lock);
1295 rte_spinlock_lock(&res_lock);
1299 if (reset_pair(res->lcore_id, pair->lcore_id) != 0)
1300 rte_exit(EXIT_FAILURE, "failed to reset slave");
1305 rte_spinlock_unlock(&res_lock);