1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
11 #include <sys/queue.h>
18 #include <rte_common.h>
20 #include <rte_byteorder.h>
22 #include <rte_memory.h>
23 #include <rte_memcpy.h>
25 #include <rte_launch.h>
26 #include <rte_atomic.h>
27 #include <rte_cycles.h>
28 #include <rte_prefetch.h>
29 #include <rte_lcore.h>
30 #include <rte_per_lcore.h>
31 #include <rte_branch_prediction.h>
32 #include <rte_interrupts.h>
33 #include <rte_random.h>
34 #include <rte_debug.h>
35 #include <rte_ether.h>
36 #include <rte_ethdev.h>
37 #include <rte_mempool.h>
42 #include <rte_string_fns.h>
43 #include <rte_cpuflags.h>
45 #include <cmdline_parse.h>
46 #include <cmdline_parse_etheraddr.h>
51 * Configurable number of RX/TX ring descriptors
53 #define RTE_TEST_RX_DESC_DEFAULT 1024
54 #define RTE_TEST_TX_DESC_DEFAULT 1024
56 #define MAX_TX_QUEUE_PER_PORT RTE_MAX_ETHPORTS
57 #define MAX_RX_QUEUE_PER_PORT 128
59 #define MAX_LCORE_PARAMS 1024
61 /* Static global variables used within this file. */
62 static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
63 static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
65 /**< Ports set in promiscuous mode off by default. */
66 static int promiscuous_on;
68 /* Select Longest-Prefix or Exact match. */
69 static int l3fwd_lpm_on;
70 static int l3fwd_em_on;
72 static int numa_on = 1; /**< NUMA is enabled by default. */
73 static int parse_ptype; /**< Parse packet type using rx callback, and */
74 /**< disabled by default */
76 /* Global variables. */
78 volatile bool force_quit;
80 /* ethernet addresses of ports */
81 uint64_t dest_eth_addr[RTE_MAX_ETHPORTS];
82 struct ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
84 xmm_t val_eth[RTE_MAX_ETHPORTS];
86 /* mask of enabled ports */
87 uint32_t enabled_port_mask;
89 /* Used only in exact match mode. */
90 int ipv6; /**< ipv6 is false by default. */
91 uint32_t hash_entry_number = HASH_ENTRY_NUMBER_DEFAULT;
93 struct lcore_conf lcore_conf[RTE_MAX_LCORE];
99 } __rte_cache_aligned;
101 static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
102 static struct lcore_params lcore_params_array_default[] = {
114 static struct lcore_params * lcore_params = lcore_params_array_default;
115 static uint16_t nb_lcore_params = sizeof(lcore_params_array_default) /
116 sizeof(lcore_params_array_default[0]);
118 static struct rte_eth_conf port_conf = {
120 .mq_mode = ETH_MQ_RX_RSS,
121 .max_rx_pkt_len = ETHER_MAX_LEN,
123 .offloads = DEV_RX_OFFLOAD_CHECKSUM,
128 .rss_hf = ETH_RSS_IP,
132 .mq_mode = ETH_MQ_TX_NONE,
136 static struct rte_mempool * pktmbuf_pool[NB_SOCKETS];
138 struct l3fwd_lkp_mode {
140 int (*check_ptype)(int);
141 rte_rx_callback_fn cb_parse_ptype;
142 int (*main_loop)(void *);
143 void* (*get_ipv4_lookup_struct)(int);
144 void* (*get_ipv6_lookup_struct)(int);
147 static struct l3fwd_lkp_mode l3fwd_lkp;
149 static struct l3fwd_lkp_mode l3fwd_em_lkp = {
151 .check_ptype = em_check_ptype,
152 .cb_parse_ptype = em_cb_parse_ptype,
153 .main_loop = em_main_loop,
154 .get_ipv4_lookup_struct = em_get_ipv4_l3fwd_lookup_struct,
155 .get_ipv6_lookup_struct = em_get_ipv6_l3fwd_lookup_struct,
158 static struct l3fwd_lkp_mode l3fwd_lpm_lkp = {
160 .check_ptype = lpm_check_ptype,
161 .cb_parse_ptype = lpm_cb_parse_ptype,
162 .main_loop = lpm_main_loop,
163 .get_ipv4_lookup_struct = lpm_get_ipv4_l3fwd_lookup_struct,
164 .get_ipv6_lookup_struct = lpm_get_ipv6_l3fwd_lookup_struct,
168 * Setup lookup methods for forwarding.
169 * Currently exact-match and longest-prefix-match
170 * are supported ones.
173 setup_l3fwd_lookup_tables(void)
175 /* Setup HASH lookup functions. */
177 l3fwd_lkp = l3fwd_em_lkp;
178 /* Setup LPM lookup functions. */
180 l3fwd_lkp = l3fwd_lpm_lkp;
184 check_lcore_params(void)
186 uint8_t queue, lcore;
190 for (i = 0; i < nb_lcore_params; ++i) {
191 queue = lcore_params[i].queue_id;
192 if (queue >= MAX_RX_QUEUE_PER_PORT) {
193 printf("invalid queue number: %hhu\n", queue);
196 lcore = lcore_params[i].lcore_id;
197 if (!rte_lcore_is_enabled(lcore)) {
198 printf("error: lcore %hhu is not enabled in lcore mask\n", lcore);
201 if ((socketid = rte_lcore_to_socket_id(lcore) != 0) &&
203 printf("warning: lcore %hhu is on socket %d with numa off \n",
211 check_port_config(void)
216 for (i = 0; i < nb_lcore_params; ++i) {
217 portid = lcore_params[i].port_id;
218 if ((enabled_port_mask & (1 << portid)) == 0) {
219 printf("port %u is not enabled in port mask\n", portid);
222 if (!rte_eth_dev_is_valid_port(portid)) {
223 printf("port %u is not present on the board\n", portid);
231 get_port_n_rx_queues(const uint16_t port)
236 for (i = 0; i < nb_lcore_params; ++i) {
237 if (lcore_params[i].port_id == port) {
238 if (lcore_params[i].queue_id == queue+1)
239 queue = lcore_params[i].queue_id;
241 rte_exit(EXIT_FAILURE, "queue ids of the port %d must be"
242 " in sequence and must start with 0\n",
243 lcore_params[i].port_id);
246 return (uint8_t)(++queue);
250 init_lcore_rx_queues(void)
252 uint16_t i, nb_rx_queue;
255 for (i = 0; i < nb_lcore_params; ++i) {
256 lcore = lcore_params[i].lcore_id;
257 nb_rx_queue = lcore_conf[lcore].n_rx_queue;
258 if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
259 printf("error: too many queues (%u) for lcore: %u\n",
260 (unsigned)nb_rx_queue + 1, (unsigned)lcore);
263 lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id =
264 lcore_params[i].port_id;
265 lcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id =
266 lcore_params[i].queue_id;
267 lcore_conf[lcore].n_rx_queue++;
275 print_usage(const char *prgname)
277 fprintf(stderr, "%s [EAL options] --"
282 " --config (port,queue,lcore)[,(port,queue,lcore)]"
283 " [--eth-dest=X,MM:MM:MM:MM:MM:MM]"
284 " [--enable-jumbo [--max-pkt-len PKTLEN]]"
286 " [--hash-entry-num]"
288 " [--parse-ptype]\n\n"
290 " -p PORTMASK: Hexadecimal bitmask of ports to configure\n"
291 " -P : Enable promiscuous mode\n"
292 " -E : Enable exact match\n"
293 " -L : Enable longest prefix match (default)\n"
294 " --config (port,queue,lcore): Rx queue configuration\n"
295 " --eth-dest=X,MM:MM:MM:MM:MM:MM: Ethernet destination for port X\n"
296 " --enable-jumbo: Enable jumbo frames\n"
297 " --max-pkt-len: Under the premise of enabling jumbo,\n"
298 " maximum packet length in decimal (64-9600)\n"
299 " --no-numa: Disable numa awareness\n"
300 " --hash-entry-num: Specify the hash entry number in hexadecimal to be setup\n"
301 " --ipv6: Set if running ipv6 packets\n"
302 " --parse-ptype: Set to use software to analyze packet type\n\n",
307 parse_max_pkt_len(const char *pktlen)
312 /* parse decimal string */
313 len = strtoul(pktlen, &end, 10);
314 if ((pktlen[0] == '\0') || (end == NULL) || (*end != '\0'))
324 parse_portmask(const char *portmask)
329 /* parse hexadecimal string */
330 pm = strtoul(portmask, &end, 16);
331 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
341 parse_hash_entry_number(const char *hash_entry_num)
344 unsigned long hash_en;
345 /* parse hexadecimal string */
346 hash_en = strtoul(hash_entry_num, &end, 16);
347 if ((hash_entry_num[0] == '\0') || (end == NULL) || (*end != '\0'))
357 parse_config(const char *q_arg)
360 const char *p, *p0 = q_arg;
368 unsigned long int_fld[_NUM_FLD];
369 char *str_fld[_NUM_FLD];
375 while ((p = strchr(p0,'(')) != NULL) {
377 if((p0 = strchr(p,')')) == NULL)
381 if(size >= sizeof(s))
384 snprintf(s, sizeof(s), "%.*s", size, p);
385 if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') != _NUM_FLD)
387 for (i = 0; i < _NUM_FLD; i++){
389 int_fld[i] = strtoul(str_fld[i], &end, 0);
390 if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
393 if (nb_lcore_params >= MAX_LCORE_PARAMS) {
394 printf("exceeded max number of lcore params: %hu\n",
398 lcore_params_array[nb_lcore_params].port_id =
399 (uint8_t)int_fld[FLD_PORT];
400 lcore_params_array[nb_lcore_params].queue_id =
401 (uint8_t)int_fld[FLD_QUEUE];
402 lcore_params_array[nb_lcore_params].lcore_id =
403 (uint8_t)int_fld[FLD_LCORE];
406 lcore_params = lcore_params_array;
411 parse_eth_dest(const char *optarg)
415 uint8_t c, *dest, peer_addr[6];
418 portid = strtoul(optarg, &port_end, 10);
419 if (errno != 0 || port_end == optarg || *port_end++ != ',')
420 rte_exit(EXIT_FAILURE,
421 "Invalid eth-dest: %s", optarg);
422 if (portid >= RTE_MAX_ETHPORTS)
423 rte_exit(EXIT_FAILURE,
424 "eth-dest: port %d >= RTE_MAX_ETHPORTS(%d)\n",
425 portid, RTE_MAX_ETHPORTS);
427 if (cmdline_parse_etheraddr(NULL, port_end,
428 &peer_addr, sizeof(peer_addr)) < 0)
429 rte_exit(EXIT_FAILURE,
430 "Invalid ethernet address: %s\n",
432 dest = (uint8_t *)&dest_eth_addr[portid];
433 for (c = 0; c < 6; c++)
434 dest[c] = peer_addr[c];
435 *(uint64_t *)(val_eth + portid) = dest_eth_addr[portid];
438 #define MAX_JUMBO_PKT_LEN 9600
439 #define MEMPOOL_CACHE_SIZE 256
441 static const char short_options[] =
443 "P" /* promiscuous */
444 "L" /* enable long prefix match */
445 "E" /* enable exact match */
448 #define CMD_LINE_OPT_CONFIG "config"
449 #define CMD_LINE_OPT_ETH_DEST "eth-dest"
450 #define CMD_LINE_OPT_NO_NUMA "no-numa"
451 #define CMD_LINE_OPT_IPV6 "ipv6"
452 #define CMD_LINE_OPT_ENABLE_JUMBO "enable-jumbo"
453 #define CMD_LINE_OPT_HASH_ENTRY_NUM "hash-entry-num"
454 #define CMD_LINE_OPT_PARSE_PTYPE "parse-ptype"
456 /* long options mapped to a short option */
458 /* first long only option value must be >= 256, so that we won't
459 * conflict with short options */
460 CMD_LINE_OPT_MIN_NUM = 256,
461 CMD_LINE_OPT_CONFIG_NUM,
462 CMD_LINE_OPT_ETH_DEST_NUM,
463 CMD_LINE_OPT_NO_NUMA_NUM,
464 CMD_LINE_OPT_IPV6_NUM,
465 CMD_LINE_OPT_ENABLE_JUMBO_NUM,
466 CMD_LINE_OPT_HASH_ENTRY_NUM_NUM,
467 CMD_LINE_OPT_PARSE_PTYPE_NUM,
470 static const struct option lgopts[] = {
471 {CMD_LINE_OPT_CONFIG, 1, 0, CMD_LINE_OPT_CONFIG_NUM},
472 {CMD_LINE_OPT_ETH_DEST, 1, 0, CMD_LINE_OPT_ETH_DEST_NUM},
473 {CMD_LINE_OPT_NO_NUMA, 0, 0, CMD_LINE_OPT_NO_NUMA_NUM},
474 {CMD_LINE_OPT_IPV6, 0, 0, CMD_LINE_OPT_IPV6_NUM},
475 {CMD_LINE_OPT_ENABLE_JUMBO, 0, 0, CMD_LINE_OPT_ENABLE_JUMBO_NUM},
476 {CMD_LINE_OPT_HASH_ENTRY_NUM, 1, 0, CMD_LINE_OPT_HASH_ENTRY_NUM_NUM},
477 {CMD_LINE_OPT_PARSE_PTYPE, 0, 0, CMD_LINE_OPT_PARSE_PTYPE_NUM},
482 * This expression is used to calculate the number of mbufs needed
483 * depending on user input, taking into account memory for rx and
484 * tx hardware rings, cache per lcore and mtable per port per lcore.
485 * RTE_MAX is used to ensure that NB_MBUF never goes below a minimum
488 #define NB_MBUF RTE_MAX( \
489 (nb_ports*nb_rx_queue*nb_rxd + \
490 nb_ports*nb_lcores*MAX_PKT_BURST + \
491 nb_ports*n_tx_queue*nb_txd + \
492 nb_lcores*MEMPOOL_CACHE_SIZE), \
495 /* Parse the argument given in the command line of the application */
497 parse_args(int argc, char **argv)
502 char *prgname = argv[0];
506 /* Error or normal output strings. */
507 while ((opt = getopt_long(argc, argvopt, short_options,
508 lgopts, &option_index)) != EOF) {
513 enabled_port_mask = parse_portmask(optarg);
514 if (enabled_port_mask == 0) {
515 fprintf(stderr, "Invalid portmask\n");
516 print_usage(prgname);
534 case CMD_LINE_OPT_CONFIG_NUM:
535 ret = parse_config(optarg);
537 fprintf(stderr, "Invalid config\n");
538 print_usage(prgname);
543 case CMD_LINE_OPT_ETH_DEST_NUM:
544 parse_eth_dest(optarg);
547 case CMD_LINE_OPT_NO_NUMA_NUM:
551 case CMD_LINE_OPT_IPV6_NUM:
555 case CMD_LINE_OPT_ENABLE_JUMBO_NUM: {
556 const struct option lenopts = {
557 "max-pkt-len", required_argument, 0, 0
560 port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
561 port_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
564 * if no max-pkt-len set, use the default
565 * value ETHER_MAX_LEN.
567 if (getopt_long(argc, argvopt, "",
568 &lenopts, &option_index) == 0) {
569 ret = parse_max_pkt_len(optarg);
570 if (ret < 64 || ret > MAX_JUMBO_PKT_LEN) {
572 "invalid maximum packet length\n");
573 print_usage(prgname);
576 port_conf.rxmode.max_rx_pkt_len = ret;
581 case CMD_LINE_OPT_HASH_ENTRY_NUM_NUM:
582 ret = parse_hash_entry_number(optarg);
583 if ((ret > 0) && (ret <= L3FWD_HASH_ENTRIES)) {
584 hash_entry_number = ret;
586 fprintf(stderr, "invalid hash entry number\n");
587 print_usage(prgname);
592 case CMD_LINE_OPT_PARSE_PTYPE_NUM:
593 printf("soft parse-ptype is enabled\n");
598 print_usage(prgname);
603 /* If both LPM and EM are selected, return error. */
604 if (l3fwd_lpm_on && l3fwd_em_on) {
605 fprintf(stderr, "LPM and EM are mutually exclusive, select only one\n");
610 * Nothing is selected, pick longest-prefix match
613 if (!l3fwd_lpm_on && !l3fwd_em_on) {
614 fprintf(stderr, "LPM or EM none selected, default LPM on\n");
619 * ipv6 and hash flags are valid only for
620 * exact macth, reset them to default for
621 * longest-prefix match.
625 hash_entry_number = HASH_ENTRY_NUMBER_DEFAULT;
629 argv[optind-1] = prgname;
632 optind = 1; /* reset getopt lib */
637 print_ethaddr(const char *name, const struct ether_addr *eth_addr)
639 char buf[ETHER_ADDR_FMT_SIZE];
640 ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);
641 printf("%s%s", name, buf);
645 init_mem(unsigned nb_mbuf)
647 struct lcore_conf *qconf;
652 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
653 if (rte_lcore_is_enabled(lcore_id) == 0)
657 socketid = rte_lcore_to_socket_id(lcore_id);
661 if (socketid >= NB_SOCKETS) {
662 rte_exit(EXIT_FAILURE,
663 "Socket %d of lcore %u is out of range %d\n",
664 socketid, lcore_id, NB_SOCKETS);
667 if (pktmbuf_pool[socketid] == NULL) {
668 snprintf(s, sizeof(s), "mbuf_pool_%d", socketid);
669 pktmbuf_pool[socketid] =
670 rte_pktmbuf_pool_create(s, nb_mbuf,
671 MEMPOOL_CACHE_SIZE, 0,
672 RTE_MBUF_DEFAULT_BUF_SIZE, socketid);
673 if (pktmbuf_pool[socketid] == NULL)
674 rte_exit(EXIT_FAILURE,
675 "Cannot init mbuf pool on socket %d\n",
678 printf("Allocated mbuf pool on socket %d\n",
681 /* Setup either LPM or EM(f.e Hash). */
682 l3fwd_lkp.setup(socketid);
684 qconf = &lcore_conf[lcore_id];
685 qconf->ipv4_lookup_struct =
686 l3fwd_lkp.get_ipv4_lookup_struct(socketid);
687 qconf->ipv6_lookup_struct =
688 l3fwd_lkp.get_ipv6_lookup_struct(socketid);
693 /* Check the link status of all ports in up to 9s, and print them finally */
695 check_all_ports_link_status(uint32_t port_mask)
697 #define CHECK_INTERVAL 100 /* 100ms */
698 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
700 uint8_t count, all_ports_up, print_flag = 0;
701 struct rte_eth_link link;
703 printf("\nChecking link status");
705 for (count = 0; count <= MAX_CHECK_TIME; count++) {
709 RTE_ETH_FOREACH_DEV(portid) {
712 if ((port_mask & (1 << portid)) == 0)
714 memset(&link, 0, sizeof(link));
715 rte_eth_link_get_nowait(portid, &link);
716 /* print link status if flag set */
717 if (print_flag == 1) {
718 if (link.link_status)
720 "Port%d Link Up. Speed %u Mbps -%s\n",
721 portid, link.link_speed,
722 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
723 ("full-duplex") : ("half-duplex\n"));
725 printf("Port %d Link Down\n", portid);
728 /* clear all_ports_up flag if any link down */
729 if (link.link_status == ETH_LINK_DOWN) {
734 /* after finally printing all link status, get out */
738 if (all_ports_up == 0) {
741 rte_delay_ms(CHECK_INTERVAL);
744 /* set the print_flag if all ports up or timeout */
745 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
753 signal_handler(int signum)
755 if (signum == SIGINT || signum == SIGTERM) {
756 printf("\n\nSignal %d received, preparing to exit...\n",
763 prepare_ptype_parser(uint16_t portid, uint16_t queueid)
766 printf("Port %d: softly parse packet type info\n", portid);
767 if (rte_eth_add_rx_callback(portid, queueid,
768 l3fwd_lkp.cb_parse_ptype,
772 printf("Failed to add rx callback: port=%d\n", portid);
776 if (l3fwd_lkp.check_ptype(portid))
779 printf("port %d cannot parse packet type, please add --%s\n",
780 portid, CMD_LINE_OPT_PARSE_PTYPE);
785 main(int argc, char **argv)
787 struct lcore_conf *qconf;
788 struct rte_eth_dev_info dev_info;
789 struct rte_eth_txconf *txconf;
792 uint16_t queueid, portid;
794 uint32_t n_tx_queue, nb_lcores;
795 uint8_t nb_rx_queue, queue, socketid;
798 ret = rte_eal_init(argc, argv);
800 rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
805 signal(SIGINT, signal_handler);
806 signal(SIGTERM, signal_handler);
808 /* pre-init dst MACs for all ports to 02:00:00:00:00:xx */
809 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
810 dest_eth_addr[portid] =
811 ETHER_LOCAL_ADMIN_ADDR + ((uint64_t)portid << 40);
812 *(uint64_t *)(val_eth + portid) = dest_eth_addr[portid];
815 /* parse application arguments (after the EAL ones) */
816 ret = parse_args(argc, argv);
818 rte_exit(EXIT_FAILURE, "Invalid L3FWD parameters\n");
820 if (check_lcore_params() < 0)
821 rte_exit(EXIT_FAILURE, "check_lcore_params failed\n");
823 ret = init_lcore_rx_queues();
825 rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n");
827 nb_ports = rte_eth_dev_count_avail();
829 if (check_port_config() < 0)
830 rte_exit(EXIT_FAILURE, "check_port_config failed\n");
832 nb_lcores = rte_lcore_count();
834 /* Setup function pointers for lookup method. */
835 setup_l3fwd_lookup_tables();
837 /* initialize all ports */
838 RTE_ETH_FOREACH_DEV(portid) {
839 struct rte_eth_conf local_port_conf = port_conf;
841 /* skip ports that are not enabled */
842 if ((enabled_port_mask & (1 << portid)) == 0) {
843 printf("\nSkipping disabled port %d\n", portid);
848 printf("Initializing port %d ... ", portid );
851 nb_rx_queue = get_port_n_rx_queues(portid);
852 n_tx_queue = nb_lcores;
853 if (n_tx_queue > MAX_TX_QUEUE_PER_PORT)
854 n_tx_queue = MAX_TX_QUEUE_PER_PORT;
855 printf("Creating queues: nb_rxq=%d nb_txq=%u... ",
856 nb_rx_queue, (unsigned)n_tx_queue );
858 rte_eth_dev_info_get(portid, &dev_info);
859 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
860 local_port_conf.txmode.offloads |=
861 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
863 local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
864 dev_info.flow_type_rss_offloads;
865 if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
866 port_conf.rx_adv_conf.rss_conf.rss_hf) {
867 printf("Port %u modified RSS hash function based on hardware support,"
868 "requested:%#"PRIx64" configured:%#"PRIx64"\n",
870 port_conf.rx_adv_conf.rss_conf.rss_hf,
871 local_port_conf.rx_adv_conf.rss_conf.rss_hf);
874 ret = rte_eth_dev_configure(portid, nb_rx_queue,
875 (uint16_t)n_tx_queue, &local_port_conf);
877 rte_exit(EXIT_FAILURE,
878 "Cannot configure device: err=%d, port=%d\n",
881 ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
884 rte_exit(EXIT_FAILURE,
885 "Cannot adjust number of descriptors: err=%d, "
886 "port=%d\n", ret, portid);
888 rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
889 print_ethaddr(" Address:", &ports_eth_addr[portid]);
891 print_ethaddr("Destination:",
892 (const struct ether_addr *)&dest_eth_addr[portid]);
896 * prepare src MACs for each port.
898 ether_addr_copy(&ports_eth_addr[portid],
899 (struct ether_addr *)(val_eth + portid) + 1);
902 ret = init_mem(NB_MBUF);
904 rte_exit(EXIT_FAILURE, "init_mem failed\n");
906 /* init one TX queue per couple (lcore,port) */
908 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
909 if (rte_lcore_is_enabled(lcore_id) == 0)
914 (uint8_t)rte_lcore_to_socket_id(lcore_id);
918 printf("txq=%u,%d,%d ", lcore_id, queueid, socketid);
921 txconf = &dev_info.default_txconf;
922 txconf->offloads = local_port_conf.txmode.offloads;
923 ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
926 rte_exit(EXIT_FAILURE,
927 "rte_eth_tx_queue_setup: err=%d, "
928 "port=%d\n", ret, portid);
930 qconf = &lcore_conf[lcore_id];
931 qconf->tx_queue_id[portid] = queueid;
934 qconf->tx_port_id[qconf->n_tx_port] = portid;
940 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
941 if (rte_lcore_is_enabled(lcore_id) == 0)
943 qconf = &lcore_conf[lcore_id];
944 printf("\nInitializing rx queues on lcore %u ... ", lcore_id );
947 for(queue = 0; queue < qconf->n_rx_queue; ++queue) {
948 struct rte_eth_dev *dev;
949 struct rte_eth_conf *conf;
950 struct rte_eth_rxconf rxq_conf;
952 portid = qconf->rx_queue_list[queue].port_id;
953 queueid = qconf->rx_queue_list[queue].queue_id;
954 dev = &rte_eth_devices[portid];
955 conf = &dev->data->dev_conf;
959 (uint8_t)rte_lcore_to_socket_id(lcore_id);
963 printf("rxq=%d,%d,%d ", portid, queueid, socketid);
966 rte_eth_dev_info_get(portid, &dev_info);
967 rxq_conf = dev_info.default_rxconf;
968 rxq_conf.offloads = conf->rxmode.offloads;
969 ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd,
972 pktmbuf_pool[socketid]);
974 rte_exit(EXIT_FAILURE,
975 "rte_eth_rx_queue_setup: err=%d, port=%d\n",
983 RTE_ETH_FOREACH_DEV(portid) {
984 if ((enabled_port_mask & (1 << portid)) == 0) {
988 ret = rte_eth_dev_start(portid);
990 rte_exit(EXIT_FAILURE,
991 "rte_eth_dev_start: err=%d, port=%d\n",
995 * If enabled, put device in promiscuous mode.
996 * This allows IO forwarding mode to forward packets
997 * to itself through 2 cross-connected ports of the
1001 rte_eth_promiscuous_enable(portid);
1006 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1007 if (rte_lcore_is_enabled(lcore_id) == 0)
1009 qconf = &lcore_conf[lcore_id];
1010 for (queue = 0; queue < qconf->n_rx_queue; ++queue) {
1011 portid = qconf->rx_queue_list[queue].port_id;
1012 queueid = qconf->rx_queue_list[queue].queue_id;
1013 if (prepare_ptype_parser(portid, queueid) == 0)
1014 rte_exit(EXIT_FAILURE, "ptype check fails\n");
1019 check_all_ports_link_status(enabled_port_mask);
1022 /* launch per-lcore init on every lcore */
1023 rte_eal_mp_remote_launch(l3fwd_lkp.main_loop, NULL, CALL_MASTER);
1024 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
1025 if (rte_eal_wait_lcore(lcore_id) < 0) {
1032 RTE_ETH_FOREACH_DEV(portid) {
1033 if ((enabled_port_mask & (1 << portid)) == 0)
1035 printf("Closing port %d...", portid);
1036 rte_eth_dev_stop(portid);
1037 rte_eth_dev_close(portid);