1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
11 #include <sys/queue.h>
18 #include <rte_common.h>
20 #include <rte_byteorder.h>
22 #include <rte_malloc.h>
23 #include <rte_memory.h>
24 #include <rte_memcpy.h>
26 #include <rte_launch.h>
27 #include <rte_atomic.h>
28 #include <rte_cycles.h>
29 #include <rte_prefetch.h>
30 #include <rte_lcore.h>
31 #include <rte_per_lcore.h>
32 #include <rte_branch_prediction.h>
33 #include <rte_interrupts.h>
34 #include <rte_random.h>
35 #include <rte_debug.h>
36 #include <rte_ether.h>
37 #include <rte_mempool.h>
42 #include <rte_string_fns.h>
43 #include <rte_cpuflags.h>
45 #include <cmdline_parse.h>
46 #include <cmdline_parse_etheraddr.h>
49 #include "l3fwd_event.h"
51 #define MAX_TX_QUEUE_PER_PORT RTE_MAX_ETHPORTS
52 #define MAX_RX_QUEUE_PER_PORT 128
54 #define MAX_LCORE_PARAMS 1024
56 /* Static global variables used within this file. */
57 static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
58 static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
60 /**< Ports set in promiscuous mode off by default. */
61 static int promiscuous_on;
63 /* Select Longest-Prefix or Exact match. */
64 static int l3fwd_lpm_on;
65 static int l3fwd_em_on;
67 /* Global variables. */
69 static int numa_on = 1; /**< NUMA is enabled by default. */
70 static int parse_ptype; /**< Parse packet type using rx callback, and */
71 /**< disabled by default */
72 static int per_port_pool; /**< Use separate buffer pools per port; disabled */
75 volatile bool force_quit;
77 /* ethernet addresses of ports */
78 uint64_t dest_eth_addr[RTE_MAX_ETHPORTS];
79 struct rte_ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
81 xmm_t val_eth[RTE_MAX_ETHPORTS];
83 /* mask of enabled ports */
84 uint32_t enabled_port_mask;
86 /* Used only in exact match mode. */
87 int ipv6; /**< ipv6 is false by default. */
88 uint32_t hash_entry_number = HASH_ENTRY_NUMBER_DEFAULT;
90 struct lcore_conf lcore_conf[RTE_MAX_LCORE];
96 } __rte_cache_aligned;
98 static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
99 static struct lcore_params lcore_params_array_default[] = {
111 static struct lcore_params * lcore_params = lcore_params_array_default;
112 static uint16_t nb_lcore_params = sizeof(lcore_params_array_default) /
113 sizeof(lcore_params_array_default[0]);
115 static struct rte_eth_conf port_conf = {
117 .mq_mode = ETH_MQ_RX_RSS,
118 .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
120 .offloads = DEV_RX_OFFLOAD_CHECKSUM,
125 .rss_hf = ETH_RSS_IP,
129 .mq_mode = ETH_MQ_TX_NONE,
133 static struct rte_mempool *pktmbuf_pool[RTE_MAX_ETHPORTS][NB_SOCKETS];
134 static uint8_t lkp_per_socket[NB_SOCKETS];
136 struct l3fwd_lkp_mode {
138 int (*check_ptype)(int);
139 rte_rx_callback_fn cb_parse_ptype;
140 int (*main_loop)(void *);
141 void* (*get_ipv4_lookup_struct)(int);
142 void* (*get_ipv6_lookup_struct)(int);
145 static struct l3fwd_lkp_mode l3fwd_lkp;
147 static struct l3fwd_lkp_mode l3fwd_em_lkp = {
149 .check_ptype = em_check_ptype,
150 .cb_parse_ptype = em_cb_parse_ptype,
151 .main_loop = em_main_loop,
152 .get_ipv4_lookup_struct = em_get_ipv4_l3fwd_lookup_struct,
153 .get_ipv6_lookup_struct = em_get_ipv6_l3fwd_lookup_struct,
156 static struct l3fwd_lkp_mode l3fwd_lpm_lkp = {
158 .check_ptype = lpm_check_ptype,
159 .cb_parse_ptype = lpm_cb_parse_ptype,
160 .main_loop = lpm_main_loop,
161 .get_ipv4_lookup_struct = lpm_get_ipv4_l3fwd_lookup_struct,
162 .get_ipv6_lookup_struct = lpm_get_ipv6_l3fwd_lookup_struct,
166 * Setup lookup methods for forwarding.
167 * Currently exact-match and longest-prefix-match
168 * are supported ones.
171 setup_l3fwd_lookup_tables(void)
173 /* Setup HASH lookup functions. */
175 l3fwd_lkp = l3fwd_em_lkp;
176 /* Setup LPM lookup functions. */
178 l3fwd_lkp = l3fwd_lpm_lkp;
182 check_lcore_params(void)
184 uint8_t queue, lcore;
188 for (i = 0; i < nb_lcore_params; ++i) {
189 queue = lcore_params[i].queue_id;
190 if (queue >= MAX_RX_QUEUE_PER_PORT) {
191 printf("invalid queue number: %hhu\n", queue);
194 lcore = lcore_params[i].lcore_id;
195 if (!rte_lcore_is_enabled(lcore)) {
196 printf("error: lcore %hhu is not enabled in lcore mask\n", lcore);
199 if ((socketid = rte_lcore_to_socket_id(lcore) != 0) &&
201 printf("warning: lcore %hhu is on socket %d with numa off \n",
209 check_port_config(void)
214 for (i = 0; i < nb_lcore_params; ++i) {
215 portid = lcore_params[i].port_id;
216 if ((enabled_port_mask & (1 << portid)) == 0) {
217 printf("port %u is not enabled in port mask\n", portid);
220 if (!rte_eth_dev_is_valid_port(portid)) {
221 printf("port %u is not present on the board\n", portid);
229 get_port_n_rx_queues(const uint16_t port)
234 for (i = 0; i < nb_lcore_params; ++i) {
235 if (lcore_params[i].port_id == port) {
236 if (lcore_params[i].queue_id == queue+1)
237 queue = lcore_params[i].queue_id;
239 rte_exit(EXIT_FAILURE, "queue ids of the port %d must be"
240 " in sequence and must start with 0\n",
241 lcore_params[i].port_id);
244 return (uint8_t)(++queue);
248 init_lcore_rx_queues(void)
250 uint16_t i, nb_rx_queue;
253 for (i = 0; i < nb_lcore_params; ++i) {
254 lcore = lcore_params[i].lcore_id;
255 nb_rx_queue = lcore_conf[lcore].n_rx_queue;
256 if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
257 printf("error: too many queues (%u) for lcore: %u\n",
258 (unsigned)nb_rx_queue + 1, (unsigned)lcore);
261 lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id =
262 lcore_params[i].port_id;
263 lcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id =
264 lcore_params[i].queue_id;
265 lcore_conf[lcore].n_rx_queue++;
273 print_usage(const char *prgname)
275 fprintf(stderr, "%s [EAL options] --"
280 " --config (port,queue,lcore)[,(port,queue,lcore)]"
281 " [--eth-dest=X,MM:MM:MM:MM:MM:MM]"
282 " [--enable-jumbo [--max-pkt-len PKTLEN]]"
284 " [--hash-entry-num]"
289 " [--eventq-sched]\n\n"
291 " -p PORTMASK: Hexadecimal bitmask of ports to configure\n"
292 " -P : Enable promiscuous mode\n"
293 " -E : Enable exact match\n"
294 " -L : Enable longest prefix match (default)\n"
295 " --config (port,queue,lcore): Rx queue configuration\n"
296 " --eth-dest=X,MM:MM:MM:MM:MM:MM: Ethernet destination for port X\n"
297 " --enable-jumbo: Enable jumbo frames\n"
298 " --max-pkt-len: Under the premise of enabling jumbo,\n"
299 " maximum packet length in decimal (64-9600)\n"
300 " --no-numa: Disable numa awareness\n"
301 " --hash-entry-num: Specify the hash entry number in hexadecimal to be setup\n"
302 " --ipv6: Set if running ipv6 packets\n"
303 " --parse-ptype: Set to use software to analyze packet type\n"
304 " --per-port-pool: Use separate buffer pool per port\n"
305 " --mode: Packet transfer mode for I/O, poll or eventdev\n"
306 " Default mode = poll\n"
307 " --eventq-sched: Event queue synchronization method\n"
308 " ordered, atomic or parallel.\n"
310 " Valid only if --mode=eventdev\n"
311 " --event-eth-rxqs: Number of ethernet RX queues per device.\n"
313 " Valid only if --mode=eventdev\n\n",
318 parse_max_pkt_len(const char *pktlen)
323 /* parse decimal string */
324 len = strtoul(pktlen, &end, 10);
325 if ((pktlen[0] == '\0') || (end == NULL) || (*end != '\0'))
335 parse_portmask(const char *portmask)
340 /* parse hexadecimal string */
341 pm = strtoul(portmask, &end, 16);
342 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
352 parse_hash_entry_number(const char *hash_entry_num)
355 unsigned long hash_en;
356 /* parse hexadecimal string */
357 hash_en = strtoul(hash_entry_num, &end, 16);
358 if ((hash_entry_num[0] == '\0') || (end == NULL) || (*end != '\0'))
368 parse_config(const char *q_arg)
371 const char *p, *p0 = q_arg;
379 unsigned long int_fld[_NUM_FLD];
380 char *str_fld[_NUM_FLD];
386 while ((p = strchr(p0,'(')) != NULL) {
388 if((p0 = strchr(p,')')) == NULL)
392 if(size >= sizeof(s))
395 snprintf(s, sizeof(s), "%.*s", size, p);
396 if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') != _NUM_FLD)
398 for (i = 0; i < _NUM_FLD; i++){
400 int_fld[i] = strtoul(str_fld[i], &end, 0);
401 if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
404 if (nb_lcore_params >= MAX_LCORE_PARAMS) {
405 printf("exceeded max number of lcore params: %hu\n",
409 lcore_params_array[nb_lcore_params].port_id =
410 (uint8_t)int_fld[FLD_PORT];
411 lcore_params_array[nb_lcore_params].queue_id =
412 (uint8_t)int_fld[FLD_QUEUE];
413 lcore_params_array[nb_lcore_params].lcore_id =
414 (uint8_t)int_fld[FLD_LCORE];
417 lcore_params = lcore_params_array;
422 parse_eth_dest(const char *optarg)
426 uint8_t c, *dest, peer_addr[6];
429 portid = strtoul(optarg, &port_end, 10);
430 if (errno != 0 || port_end == optarg || *port_end++ != ',')
431 rte_exit(EXIT_FAILURE,
432 "Invalid eth-dest: %s", optarg);
433 if (portid >= RTE_MAX_ETHPORTS)
434 rte_exit(EXIT_FAILURE,
435 "eth-dest: port %d >= RTE_MAX_ETHPORTS(%d)\n",
436 portid, RTE_MAX_ETHPORTS);
438 if (cmdline_parse_etheraddr(NULL, port_end,
439 &peer_addr, sizeof(peer_addr)) < 0)
440 rte_exit(EXIT_FAILURE,
441 "Invalid ethernet address: %s\n",
443 dest = (uint8_t *)&dest_eth_addr[portid];
444 for (c = 0; c < 6; c++)
445 dest[c] = peer_addr[c];
446 *(uint64_t *)(val_eth + portid) = dest_eth_addr[portid];
450 parse_mode(const char *optarg)
452 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
454 if (!strcmp(optarg, "poll"))
455 evt_rsrc->enabled = false;
456 else if (!strcmp(optarg, "eventdev"))
457 evt_rsrc->enabled = true;
461 parse_eventq_sched(const char *optarg)
463 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
465 if (!strcmp(optarg, "ordered"))
466 evt_rsrc->sched_type = RTE_SCHED_TYPE_ORDERED;
467 if (!strcmp(optarg, "atomic"))
468 evt_rsrc->sched_type = RTE_SCHED_TYPE_ATOMIC;
469 if (!strcmp(optarg, "parallel"))
470 evt_rsrc->sched_type = RTE_SCHED_TYPE_PARALLEL;
474 parse_event_eth_rx_queues(const char *eth_rx_queues)
476 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
478 uint8_t num_eth_rx_queues;
480 /* parse decimal string */
481 num_eth_rx_queues = strtoul(eth_rx_queues, &end, 10);
482 if ((eth_rx_queues[0] == '\0') || (end == NULL) || (*end != '\0'))
485 if (num_eth_rx_queues == 0)
488 evt_rsrc->eth_rx_queues = num_eth_rx_queues;
491 #define MAX_JUMBO_PKT_LEN 9600
493 static const char short_options[] =
495 "P" /* promiscuous */
496 "L" /* enable long prefix match */
497 "E" /* enable exact match */
500 #define CMD_LINE_OPT_CONFIG "config"
501 #define CMD_LINE_OPT_ETH_DEST "eth-dest"
502 #define CMD_LINE_OPT_NO_NUMA "no-numa"
503 #define CMD_LINE_OPT_IPV6 "ipv6"
504 #define CMD_LINE_OPT_ENABLE_JUMBO "enable-jumbo"
505 #define CMD_LINE_OPT_HASH_ENTRY_NUM "hash-entry-num"
506 #define CMD_LINE_OPT_PARSE_PTYPE "parse-ptype"
507 #define CMD_LINE_OPT_PER_PORT_POOL "per-port-pool"
508 #define CMD_LINE_OPT_MODE "mode"
509 #define CMD_LINE_OPT_EVENTQ_SYNC "eventq-sched"
510 #define CMD_LINE_OPT_EVENT_ETH_RX_QUEUES "event-eth-rxqs"
512 /* long options mapped to a short option */
514 /* first long only option value must be >= 256, so that we won't
515 * conflict with short options */
516 CMD_LINE_OPT_MIN_NUM = 256,
517 CMD_LINE_OPT_CONFIG_NUM,
518 CMD_LINE_OPT_ETH_DEST_NUM,
519 CMD_LINE_OPT_NO_NUMA_NUM,
520 CMD_LINE_OPT_IPV6_NUM,
521 CMD_LINE_OPT_ENABLE_JUMBO_NUM,
522 CMD_LINE_OPT_HASH_ENTRY_NUM_NUM,
523 CMD_LINE_OPT_PARSE_PTYPE_NUM,
524 CMD_LINE_OPT_PARSE_PER_PORT_POOL,
525 CMD_LINE_OPT_MODE_NUM,
526 CMD_LINE_OPT_EVENTQ_SYNC_NUM,
527 CMD_LINE_OPT_EVENT_ETH_RX_QUEUES_NUM,
530 static const struct option lgopts[] = {
531 {CMD_LINE_OPT_CONFIG, 1, 0, CMD_LINE_OPT_CONFIG_NUM},
532 {CMD_LINE_OPT_ETH_DEST, 1, 0, CMD_LINE_OPT_ETH_DEST_NUM},
533 {CMD_LINE_OPT_NO_NUMA, 0, 0, CMD_LINE_OPT_NO_NUMA_NUM},
534 {CMD_LINE_OPT_IPV6, 0, 0, CMD_LINE_OPT_IPV6_NUM},
535 {CMD_LINE_OPT_ENABLE_JUMBO, 0, 0, CMD_LINE_OPT_ENABLE_JUMBO_NUM},
536 {CMD_LINE_OPT_HASH_ENTRY_NUM, 1, 0, CMD_LINE_OPT_HASH_ENTRY_NUM_NUM},
537 {CMD_LINE_OPT_PARSE_PTYPE, 0, 0, CMD_LINE_OPT_PARSE_PTYPE_NUM},
538 {CMD_LINE_OPT_PER_PORT_POOL, 0, 0, CMD_LINE_OPT_PARSE_PER_PORT_POOL},
539 {CMD_LINE_OPT_MODE, 1, 0, CMD_LINE_OPT_MODE_NUM},
540 {CMD_LINE_OPT_EVENTQ_SYNC, 1, 0, CMD_LINE_OPT_EVENTQ_SYNC_NUM},
541 {CMD_LINE_OPT_EVENT_ETH_RX_QUEUES, 1, 0,
542 CMD_LINE_OPT_EVENT_ETH_RX_QUEUES_NUM},
547 * This expression is used to calculate the number of mbufs needed
548 * depending on user input, taking into account memory for rx and
549 * tx hardware rings, cache per lcore and mtable per port per lcore.
550 * RTE_MAX is used to ensure that NB_MBUF never goes below a minimum
553 #define NB_MBUF(nports) RTE_MAX( \
554 (nports*nb_rx_queue*nb_rxd + \
555 nports*nb_lcores*MAX_PKT_BURST + \
556 nports*n_tx_queue*nb_txd + \
557 nb_lcores*MEMPOOL_CACHE_SIZE), \
560 /* Parse the argument given in the command line of the application */
562 parse_args(int argc, char **argv)
567 char *prgname = argv[0];
568 uint8_t lcore_params = 0;
569 uint8_t eventq_sched = 0;
570 uint8_t eth_rx_q = 0;
571 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
575 /* Error or normal output strings. */
576 while ((opt = getopt_long(argc, argvopt, short_options,
577 lgopts, &option_index)) != EOF) {
582 enabled_port_mask = parse_portmask(optarg);
583 if (enabled_port_mask == 0) {
584 fprintf(stderr, "Invalid portmask\n");
585 print_usage(prgname);
603 case CMD_LINE_OPT_CONFIG_NUM:
604 ret = parse_config(optarg);
606 fprintf(stderr, "Invalid config\n");
607 print_usage(prgname);
613 case CMD_LINE_OPT_ETH_DEST_NUM:
614 parse_eth_dest(optarg);
617 case CMD_LINE_OPT_NO_NUMA_NUM:
621 case CMD_LINE_OPT_IPV6_NUM:
625 case CMD_LINE_OPT_ENABLE_JUMBO_NUM: {
626 const struct option lenopts = {
627 "max-pkt-len", required_argument, 0, 0
630 port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
631 port_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
634 * if no max-pkt-len set, use the default
635 * value RTE_ETHER_MAX_LEN.
637 if (getopt_long(argc, argvopt, "",
638 &lenopts, &option_index) == 0) {
639 ret = parse_max_pkt_len(optarg);
640 if (ret < 64 || ret > MAX_JUMBO_PKT_LEN) {
642 "invalid maximum packet length\n");
643 print_usage(prgname);
646 port_conf.rxmode.max_rx_pkt_len = ret;
651 case CMD_LINE_OPT_HASH_ENTRY_NUM_NUM:
652 ret = parse_hash_entry_number(optarg);
653 if ((ret > 0) && (ret <= L3FWD_HASH_ENTRIES)) {
654 hash_entry_number = ret;
656 fprintf(stderr, "invalid hash entry number\n");
657 print_usage(prgname);
662 case CMD_LINE_OPT_PARSE_PTYPE_NUM:
663 printf("soft parse-ptype is enabled\n");
667 case CMD_LINE_OPT_PARSE_PER_PORT_POOL:
668 printf("per port buffer pool is enabled\n");
672 case CMD_LINE_OPT_MODE_NUM:
676 case CMD_LINE_OPT_EVENTQ_SYNC_NUM:
677 parse_eventq_sched(optarg);
681 case CMD_LINE_OPT_EVENT_ETH_RX_QUEUES_NUM:
682 parse_event_eth_rx_queues(optarg);
687 print_usage(prgname);
692 /* If both LPM and EM are selected, return error. */
693 if (l3fwd_lpm_on && l3fwd_em_on) {
694 fprintf(stderr, "LPM and EM are mutually exclusive, select only one\n");
698 if (evt_rsrc->enabled && lcore_params) {
699 fprintf(stderr, "lcore config is not valid when event mode is selected\n");
703 if (!evt_rsrc->enabled && eth_rx_q) {
704 fprintf(stderr, "eth_rx_queues is valid only when event mode is selected\n");
708 if (!evt_rsrc->enabled && eventq_sched) {
709 fprintf(stderr, "eventq_sched is valid only when event mode is selected\n");
714 * Nothing is selected, pick longest-prefix match
717 if (!l3fwd_lpm_on && !l3fwd_em_on) {
718 fprintf(stderr, "LPM or EM none selected, default LPM on\n");
723 * ipv6 and hash flags are valid only for
724 * exact macth, reset them to default for
725 * longest-prefix match.
729 hash_entry_number = HASH_ENTRY_NUMBER_DEFAULT;
733 argv[optind-1] = prgname;
736 optind = 1; /* reset getopt lib */
741 print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
743 char buf[RTE_ETHER_ADDR_FMT_SIZE];
744 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
745 printf("%s%s", name, buf);
749 init_mem(uint16_t portid, unsigned int nb_mbuf)
751 struct lcore_conf *qconf;
756 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
757 if (rte_lcore_is_enabled(lcore_id) == 0)
761 socketid = rte_lcore_to_socket_id(lcore_id);
765 if (socketid >= NB_SOCKETS) {
766 rte_exit(EXIT_FAILURE,
767 "Socket %d of lcore %u is out of range %d\n",
768 socketid, lcore_id, NB_SOCKETS);
771 if (pktmbuf_pool[portid][socketid] == NULL) {
772 snprintf(s, sizeof(s), "mbuf_pool_%d:%d",
774 pktmbuf_pool[portid][socketid] =
775 rte_pktmbuf_pool_create(s, nb_mbuf,
776 MEMPOOL_CACHE_SIZE, 0,
777 RTE_MBUF_DEFAULT_BUF_SIZE, socketid);
778 if (pktmbuf_pool[portid][socketid] == NULL)
779 rte_exit(EXIT_FAILURE,
780 "Cannot init mbuf pool on socket %d\n",
783 printf("Allocated mbuf pool on socket %d\n",
786 /* Setup either LPM or EM(f.e Hash). But, only once per
789 if (!lkp_per_socket[socketid]) {
790 l3fwd_lkp.setup(socketid);
791 lkp_per_socket[socketid] = 1;
794 qconf = &lcore_conf[lcore_id];
795 qconf->ipv4_lookup_struct =
796 l3fwd_lkp.get_ipv4_lookup_struct(socketid);
797 qconf->ipv6_lookup_struct =
798 l3fwd_lkp.get_ipv6_lookup_struct(socketid);
803 /* Check the link status of all ports in up to 9s, and print them finally */
805 check_all_ports_link_status(uint32_t port_mask)
807 #define CHECK_INTERVAL 100 /* 100ms */
808 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
810 uint8_t count, all_ports_up, print_flag = 0;
811 struct rte_eth_link link;
814 printf("\nChecking link status");
816 for (count = 0; count <= MAX_CHECK_TIME; count++) {
820 RTE_ETH_FOREACH_DEV(portid) {
823 if ((port_mask & (1 << portid)) == 0)
825 memset(&link, 0, sizeof(link));
826 ret = rte_eth_link_get_nowait(portid, &link);
830 printf("Port %u link get failed: %s\n",
831 portid, rte_strerror(-ret));
834 /* print link status if flag set */
835 if (print_flag == 1) {
836 if (link.link_status)
838 "Port%d Link Up. Speed %u Mbps -%s\n",
839 portid, link.link_speed,
840 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
841 ("full-duplex") : ("half-duplex\n"));
843 printf("Port %d Link Down\n", portid);
846 /* clear all_ports_up flag if any link down */
847 if (link.link_status == ETH_LINK_DOWN) {
852 /* after finally printing all link status, get out */
856 if (all_ports_up == 0) {
859 rte_delay_ms(CHECK_INTERVAL);
862 /* set the print_flag if all ports up or timeout */
863 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
871 signal_handler(int signum)
873 if (signum == SIGINT || signum == SIGTERM) {
874 printf("\n\nSignal %d received, preparing to exit...\n",
881 prepare_ptype_parser(uint16_t portid, uint16_t queueid)
884 printf("Port %d: softly parse packet type info\n", portid);
885 if (rte_eth_add_rx_callback(portid, queueid,
886 l3fwd_lkp.cb_parse_ptype,
890 printf("Failed to add rx callback: port=%d\n", portid);
894 if (l3fwd_lkp.check_ptype(portid))
897 printf("port %d cannot parse packet type, please add --%s\n",
898 portid, CMD_LINE_OPT_PARSE_PTYPE);
903 l3fwd_poll_resource_setup(void)
905 uint8_t nb_rx_queue, queue, socketid;
906 struct rte_eth_dev_info dev_info;
907 uint32_t n_tx_queue, nb_lcores;
908 struct rte_eth_txconf *txconf;
909 struct lcore_conf *qconf;
910 uint16_t queueid, portid;
911 unsigned int nb_ports;
912 unsigned int lcore_id;
915 if (check_lcore_params() < 0)
916 rte_exit(EXIT_FAILURE, "check_lcore_params failed\n");
918 ret = init_lcore_rx_queues();
920 rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n");
922 nb_ports = rte_eth_dev_count_avail();
924 if (check_port_config() < 0)
925 rte_exit(EXIT_FAILURE, "check_port_config failed\n");
927 nb_lcores = rte_lcore_count();
929 /* initialize all ports */
930 RTE_ETH_FOREACH_DEV(portid) {
931 struct rte_eth_conf local_port_conf = port_conf;
933 /* skip ports that are not enabled */
934 if ((enabled_port_mask & (1 << portid)) == 0) {
935 printf("\nSkipping disabled port %d\n", portid);
940 printf("Initializing port %d ... ", portid );
943 nb_rx_queue = get_port_n_rx_queues(portid);
944 n_tx_queue = nb_lcores;
945 if (n_tx_queue > MAX_TX_QUEUE_PER_PORT)
946 n_tx_queue = MAX_TX_QUEUE_PER_PORT;
947 printf("Creating queues: nb_rxq=%d nb_txq=%u... ",
948 nb_rx_queue, (unsigned)n_tx_queue );
950 ret = rte_eth_dev_info_get(portid, &dev_info);
952 rte_exit(EXIT_FAILURE,
953 "Error during getting device (port %u) info: %s\n",
954 portid, strerror(-ret));
956 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
957 local_port_conf.txmode.offloads |=
958 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
960 local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
961 dev_info.flow_type_rss_offloads;
962 if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
963 port_conf.rx_adv_conf.rss_conf.rss_hf) {
964 printf("Port %u modified RSS hash function based on hardware support,"
965 "requested:%#"PRIx64" configured:%#"PRIx64"\n",
967 port_conf.rx_adv_conf.rss_conf.rss_hf,
968 local_port_conf.rx_adv_conf.rss_conf.rss_hf);
971 ret = rte_eth_dev_configure(portid, nb_rx_queue,
972 (uint16_t)n_tx_queue, &local_port_conf);
974 rte_exit(EXIT_FAILURE,
975 "Cannot configure device: err=%d, port=%d\n",
978 ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
981 rte_exit(EXIT_FAILURE,
982 "Cannot adjust number of descriptors: err=%d, "
983 "port=%d\n", ret, portid);
985 ret = rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
987 rte_exit(EXIT_FAILURE,
988 "Cannot get MAC address: err=%d, port=%d\n",
991 print_ethaddr(" Address:", &ports_eth_addr[portid]);
993 print_ethaddr("Destination:",
994 (const struct rte_ether_addr *)&dest_eth_addr[portid]);
998 * prepare src MACs for each port.
1000 rte_ether_addr_copy(&ports_eth_addr[portid],
1001 (struct rte_ether_addr *)(val_eth + portid) + 1);
1004 if (!per_port_pool) {
1005 /* portid = 0; this is *not* signifying the first port,
1006 * rather, it signifies that portid is ignored.
1008 ret = init_mem(0, NB_MBUF(nb_ports));
1010 ret = init_mem(portid, NB_MBUF(1));
1013 rte_exit(EXIT_FAILURE, "init_mem failed\n");
1015 /* init one TX queue per couple (lcore,port) */
1017 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1018 if (rte_lcore_is_enabled(lcore_id) == 0)
1023 (uint8_t)rte_lcore_to_socket_id(lcore_id);
1027 printf("txq=%u,%d,%d ", lcore_id, queueid, socketid);
1030 txconf = &dev_info.default_txconf;
1031 txconf->offloads = local_port_conf.txmode.offloads;
1032 ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
1035 rte_exit(EXIT_FAILURE,
1036 "rte_eth_tx_queue_setup: err=%d, "
1037 "port=%d\n", ret, portid);
1039 qconf = &lcore_conf[lcore_id];
1040 qconf->tx_queue_id[portid] = queueid;
1043 qconf->tx_port_id[qconf->n_tx_port] = portid;
1049 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1050 if (rte_lcore_is_enabled(lcore_id) == 0)
1052 qconf = &lcore_conf[lcore_id];
1053 printf("\nInitializing rx queues on lcore %u ... ", lcore_id );
1055 /* init RX queues */
1056 for(queue = 0; queue < qconf->n_rx_queue; ++queue) {
1057 struct rte_eth_rxconf rxq_conf;
1059 portid = qconf->rx_queue_list[queue].port_id;
1060 queueid = qconf->rx_queue_list[queue].queue_id;
1064 (uint8_t)rte_lcore_to_socket_id(lcore_id);
1068 printf("rxq=%d,%d,%d ", portid, queueid, socketid);
1071 ret = rte_eth_dev_info_get(portid, &dev_info);
1073 rte_exit(EXIT_FAILURE,
1074 "Error during getting device (port %u) info: %s\n",
1075 portid, strerror(-ret));
1077 rxq_conf = dev_info.default_rxconf;
1078 rxq_conf.offloads = port_conf.rxmode.offloads;
1080 ret = rte_eth_rx_queue_setup(portid, queueid,
1083 pktmbuf_pool[0][socketid]);
1085 ret = rte_eth_rx_queue_setup(portid, queueid,
1088 pktmbuf_pool[portid][socketid]);
1090 rte_exit(EXIT_FAILURE,
1091 "rte_eth_rx_queue_setup: err=%d, port=%d\n",
1098 l3fwd_service_enable(uint32_t service_id)
1100 uint8_t min_service_count = UINT8_MAX;
1101 uint32_t slcore_array[RTE_MAX_LCORE];
1102 unsigned int slcore = 0;
1103 uint8_t service_count;
1104 int32_t slcore_count;
1106 if (!rte_service_lcore_count())
1109 slcore_count = rte_service_lcore_list(slcore_array, RTE_MAX_LCORE);
1110 if (slcore_count < 0)
1112 /* Get the core which has least number of services running. */
1113 while (slcore_count--) {
1114 /* Reset default mapping */
1115 rte_service_map_lcore_set(service_id,
1116 slcore_array[slcore_count], 0);
1117 service_count = rte_service_lcore_count_services(
1118 slcore_array[slcore_count]);
1119 if (service_count < min_service_count) {
1120 slcore = slcore_array[slcore_count];
1121 min_service_count = service_count;
1124 if (rte_service_map_lcore_set(service_id, slcore, 1))
1126 rte_service_lcore_start(slcore);
1132 l3fwd_event_service_setup(void)
1134 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
1135 struct rte_event_dev_info evdev_info;
1136 uint32_t service_id, caps;
1139 rte_event_dev_info_get(evt_rsrc->event_d_id, &evdev_info);
1140 if (!(evdev_info.event_dev_cap & RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED)) {
1141 ret = rte_event_dev_service_id_get(evt_rsrc->event_d_id,
1143 if (ret != -ESRCH && ret != 0)
1144 rte_exit(EXIT_FAILURE,
1145 "Error in starting eventdev service\n");
1146 l3fwd_service_enable(service_id);
1149 for (i = 0; i < evt_rsrc->rx_adptr.nb_rx_adptr; i++) {
1150 ret = rte_event_eth_rx_adapter_caps_get(evt_rsrc->event_d_id,
1151 evt_rsrc->rx_adptr.rx_adptr[i], &caps);
1153 rte_exit(EXIT_FAILURE,
1154 "Failed to get Rx adapter[%d] caps\n",
1155 evt_rsrc->rx_adptr.rx_adptr[i]);
1156 ret = rte_event_eth_rx_adapter_service_id_get(
1157 evt_rsrc->event_d_id,
1159 if (ret != -ESRCH && ret != 0)
1160 rte_exit(EXIT_FAILURE,
1161 "Error in starting Rx adapter[%d] service\n",
1162 evt_rsrc->rx_adptr.rx_adptr[i]);
1163 l3fwd_service_enable(service_id);
1166 for (i = 0; i < evt_rsrc->tx_adptr.nb_tx_adptr; i++) {
1167 ret = rte_event_eth_tx_adapter_caps_get(evt_rsrc->event_d_id,
1168 evt_rsrc->tx_adptr.tx_adptr[i], &caps);
1170 rte_exit(EXIT_FAILURE,
1171 "Failed to get Rx adapter[%d] caps\n",
1172 evt_rsrc->tx_adptr.tx_adptr[i]);
1173 ret = rte_event_eth_tx_adapter_service_id_get(
1174 evt_rsrc->event_d_id,
1176 if (ret != -ESRCH && ret != 0)
1177 rte_exit(EXIT_FAILURE,
1178 "Error in starting Rx adapter[%d] service\n",
1179 evt_rsrc->tx_adptr.tx_adptr[i]);
1180 l3fwd_service_enable(service_id);
1185 main(int argc, char **argv)
1187 struct l3fwd_event_resources *evt_rsrc;
1188 struct lcore_conf *qconf;
1189 uint16_t queueid, portid;
1190 unsigned int lcore_id;
1195 ret = rte_eal_init(argc, argv);
1197 rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
1202 signal(SIGINT, signal_handler);
1203 signal(SIGTERM, signal_handler);
1205 /* pre-init dst MACs for all ports to 02:00:00:00:00:xx */
1206 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
1207 dest_eth_addr[portid] =
1208 RTE_ETHER_LOCAL_ADMIN_ADDR + ((uint64_t)portid << 40);
1209 *(uint64_t *)(val_eth + portid) = dest_eth_addr[portid];
1212 evt_rsrc = l3fwd_get_eventdev_rsrc();
1213 /* parse application arguments (after the EAL ones) */
1214 ret = parse_args(argc, argv);
1216 rte_exit(EXIT_FAILURE, "Invalid L3FWD parameters\n");
1218 /* Setup function pointers for lookup method. */
1219 setup_l3fwd_lookup_tables();
1221 evt_rsrc->per_port_pool = per_port_pool;
1222 evt_rsrc->pkt_pool = pktmbuf_pool;
1223 evt_rsrc->port_mask = enabled_port_mask;
1224 /* Configure eventdev parameters if user has requested */
1225 if (evt_rsrc->enabled) {
1226 l3fwd_event_resource_setup(&port_conf);
1228 l3fwd_lkp.main_loop = evt_rsrc->ops.em_event_loop;
1230 l3fwd_lkp.main_loop = evt_rsrc->ops.lpm_event_loop;
1231 l3fwd_event_service_setup();
1233 l3fwd_poll_resource_setup();
1236 RTE_ETH_FOREACH_DEV(portid) {
1237 if ((enabled_port_mask & (1 << portid)) == 0) {
1241 ret = rte_eth_dev_start(portid);
1243 rte_exit(EXIT_FAILURE,
1244 "rte_eth_dev_start: err=%d, port=%d\n",
1248 * If enabled, put device in promiscuous mode.
1249 * This allows IO forwarding mode to forward packets
1250 * to itself through 2 cross-connected ports of the
1253 if (promiscuous_on) {
1254 ret = rte_eth_promiscuous_enable(portid);
1256 rte_exit(EXIT_FAILURE,
1257 "rte_eth_promiscuous_enable: err=%s, port=%u\n",
1258 rte_strerror(-ret), portid);
1264 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1265 if (rte_lcore_is_enabled(lcore_id) == 0)
1267 qconf = &lcore_conf[lcore_id];
1268 for (queue = 0; queue < qconf->n_rx_queue; ++queue) {
1269 portid = qconf->rx_queue_list[queue].port_id;
1270 queueid = qconf->rx_queue_list[queue].queue_id;
1271 if (prepare_ptype_parser(portid, queueid) == 0)
1272 rte_exit(EXIT_FAILURE, "ptype check fails\n");
1276 check_all_ports_link_status(enabled_port_mask);
1279 /* launch per-lcore init on every lcore */
1280 rte_eal_mp_remote_launch(l3fwd_lkp.main_loop, NULL, CALL_MASTER);
1281 if (evt_rsrc->enabled) {
1282 for (i = 0; i < evt_rsrc->rx_adptr.nb_rx_adptr; i++)
1283 rte_event_eth_rx_adapter_stop(
1284 evt_rsrc->rx_adptr.rx_adptr[i]);
1285 for (i = 0; i < evt_rsrc->tx_adptr.nb_tx_adptr; i++)
1286 rte_event_eth_tx_adapter_stop(
1287 evt_rsrc->tx_adptr.tx_adptr[i]);
1289 RTE_ETH_FOREACH_DEV(portid) {
1290 if ((enabled_port_mask & (1 << portid)) == 0)
1292 rte_eth_dev_stop(portid);
1295 rte_eal_mp_wait_lcore();
1296 RTE_ETH_FOREACH_DEV(portid) {
1297 if ((enabled_port_mask & (1 << portid)) == 0)
1299 rte_eth_dev_close(portid);
1302 rte_event_dev_stop(evt_rsrc->event_d_id);
1303 rte_event_dev_close(evt_rsrc->event_d_id);
1306 rte_eal_mp_wait_lcore();
1308 RTE_ETH_FOREACH_DEV(portid) {
1309 if ((enabled_port_mask & (1 << portid)) == 0)
1311 printf("Closing port %d...", portid);
1312 rte_eth_dev_stop(portid);
1313 rte_eth_dev_close(portid);