X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=examples%2Fl3fwd%2Fmain.c;h=84f171f18b3abd653d7ec4358139b57dc473178f;hb=c81e3f21d1ea05e5123278b15d9d5e1257b6ba99;hp=3ce1b6539bce585d33072e23c5515dd6c2d154ba;hpb=9d5ca5323910591f2cafbac5070fcb5b6d02d022;p=dpdk.git diff --git a/examples/l3fwd/main.c b/examples/l3fwd/main.c index 3ce1b6539b..84f171f18b 100644 --- a/examples/l3fwd/main.c +++ b/examples/l3fwd/main.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2016 Intel Corporation */ #include @@ -48,11 +19,10 @@ #include #include #include +#include #include #include -#include #include -#include #include #include #include @@ -61,11 +31,9 @@ #include #include #include -#include #include #include #include -#include #include #include #include @@ -78,12 +46,7 @@ #include #include "l3fwd.h" - -/* - * Configurable number of RX/TX ring descriptors - */ -#define RTE_TEST_RX_DESC_DEFAULT 128 -#define RTE_TEST_TX_DESC_DEFAULT 512 +#include "l3fwd_event.h" #define MAX_TX_QUEUE_PER_PORT RTE_MAX_ETHPORTS #define MAX_RX_QUEUE_PER_PORT 128 @@ -101,17 +64,19 @@ static int promiscuous_on; static int l3fwd_lpm_on; static int l3fwd_em_on; +/* Global variables. */ + static int numa_on = 1; /**< NUMA is enabled by default. */ static int parse_ptype; /**< Parse packet type using rx callback, and */ /**< disabled by default */ - -/* Global variables. */ +static int per_port_pool; /**< Use separate buffer pools per port; disabled */ + /**< by default */ volatile bool force_quit; /* ethernet addresses of ports */ uint64_t dest_eth_addr[RTE_MAX_ETHPORTS]; -struct ether_addr ports_eth_addr[RTE_MAX_ETHPORTS]; +struct rte_ether_addr ports_eth_addr[RTE_MAX_ETHPORTS]; xmm_t val_eth[RTE_MAX_ETHPORTS]; @@ -125,7 +90,7 @@ uint32_t hash_entry_number = HASH_ENTRY_NUMBER_DEFAULT; struct lcore_conf lcore_conf[RTE_MAX_LCORE]; struct lcore_params { - uint8_t port_id; + uint16_t port_id; uint8_t queue_id; uint8_t lcore_id; } __rte_cache_aligned; @@ -150,13 +115,9 @@ static uint16_t nb_lcore_params = sizeof(lcore_params_array_default) / static struct rte_eth_conf port_conf = { .rxmode = { .mq_mode = ETH_MQ_RX_RSS, - .max_rx_pkt_len = ETHER_MAX_LEN, + .max_rx_pkt_len = RTE_ETHER_MAX_LEN, .split_hdr_size = 0, - .header_split = 0, /**< Header Split disabled */ - .hw_ip_checksum = 1, /**< IP checksum offload enabled */ - .hw_vlan_filter = 0, /**< VLAN filtering disabled */ - .jumbo_frame = 0, /**< Jumbo Frame Support disabled */ - .hw_strip_crc = 0, /**< CRC stripped by hardware */ + .offloads = DEV_RX_OFFLOAD_CHECKSUM, }, .rx_adv_conf = { .rss_conf = { @@ -169,7 +130,8 @@ static struct rte_eth_conf port_conf = { }, }; -static struct rte_mempool * pktmbuf_pool[NB_SOCKETS]; +static struct rte_mempool *pktmbuf_pool[RTE_MAX_ETHPORTS][NB_SOCKETS]; +static uint8_t lkp_per_socket[NB_SOCKETS]; struct l3fwd_lkp_mode { void (*setup)(int); @@ -244,9 +206,9 @@ check_lcore_params(void) } static int -check_port_config(const unsigned nb_ports) +check_port_config(void) { - unsigned portid; + uint16_t portid; uint16_t i; for (i = 0; i < nb_lcore_params; ++i) { @@ -255,7 +217,7 @@ check_port_config(const unsigned nb_ports) printf("port %u is not enabled in port mask\n", portid); return -1; } - if (portid >= nb_ports) { + if (!rte_eth_dev_is_valid_port(portid)) { printf("port %u is not present on the board\n", portid); return -1; } @@ -264,7 +226,7 @@ check_port_config(const unsigned nb_ports) } static uint8_t -get_port_n_rx_queues(const uint8_t port) +get_port_n_rx_queues(const uint16_t port) { int queue = -1; uint16_t i; @@ -310,7 +272,7 @@ init_lcore_rx_queues(void) static void print_usage(const char *prgname) { - printf("%s [EAL options] --" + fprintf(stderr, "%s [EAL options] --" " -p PORTMASK" " [-P]" " [-E]" @@ -321,7 +283,10 @@ print_usage(const char *prgname) " [--no-numa]" " [--hash-entry-num]" " [--ipv6]" - " [--parse-ptype]\n\n" + " [--parse-ptype]" + " [--per-port-pool]" + " [--mode]" + " [--eventq-sched]\n\n" " -p PORTMASK: Hexadecimal bitmask of ports to configure\n" " -P : Enable promiscuous mode\n" @@ -335,7 +300,17 @@ print_usage(const char *prgname) " --no-numa: Disable numa awareness\n" " --hash-entry-num: Specify the hash entry number in hexadecimal to be setup\n" " --ipv6: Set if running ipv6 packets\n" - " --parse-ptype: Set to use software to analyze packet type\n\n", + " --parse-ptype: Set to use software to analyze packet type\n" + " --per-port-pool: Use separate buffer pool per port\n" + " --mode: Packet transfer mode for I/O, poll or eventdev\n" + " Default mode = poll\n" + " --eventq-sched: Event queue synchronization method\n" + " ordered, atomic or parallel.\n" + " Default: atomic\n" + " Valid only if --mode=eventdev\n" + " --event-eth-rxqs: Number of ethernet RX queues per device.\n" + " Default: 1\n" + " Valid only if --mode=eventdev\n\n", prgname); } @@ -446,7 +421,7 @@ parse_config(const char *q_arg) static void parse_eth_dest(const char *optarg) { - uint8_t portid; + uint16_t portid; char *port_end; uint8_t c, *dest, peer_addr[6]; @@ -471,8 +446,49 @@ parse_eth_dest(const char *optarg) *(uint64_t *)(val_eth + portid) = dest_eth_addr[portid]; } +static void +parse_mode(const char *optarg) +{ + struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc(); + + if (!strcmp(optarg, "poll")) + evt_rsrc->enabled = false; + else if (!strcmp(optarg, "eventdev")) + evt_rsrc->enabled = true; +} + +static void +parse_eventq_sched(const char *optarg) +{ + struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc(); + + if (!strcmp(optarg, "ordered")) + evt_rsrc->sched_type = RTE_SCHED_TYPE_ORDERED; + if (!strcmp(optarg, "atomic")) + evt_rsrc->sched_type = RTE_SCHED_TYPE_ATOMIC; + if (!strcmp(optarg, "parallel")) + evt_rsrc->sched_type = RTE_SCHED_TYPE_PARALLEL; +} + +static void +parse_event_eth_rx_queues(const char *eth_rx_queues) +{ + struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc(); + char *end = NULL; + uint8_t num_eth_rx_queues; + + /* parse decimal string */ + num_eth_rx_queues = strtoul(eth_rx_queues, &end, 10); + if ((eth_rx_queues[0] == '\0') || (end == NULL) || (*end != '\0')) + return; + + if (num_eth_rx_queues == 0) + return; + + evt_rsrc->eth_rx_queues = num_eth_rx_queues; +} + #define MAX_JUMBO_PKT_LEN 9600 -#define MEMPOOL_CACHE_SIZE 256 static const char short_options[] = "p:" /* portmask */ @@ -488,6 +504,10 @@ static const char short_options[] = #define CMD_LINE_OPT_ENABLE_JUMBO "enable-jumbo" #define CMD_LINE_OPT_HASH_ENTRY_NUM "hash-entry-num" #define CMD_LINE_OPT_PARSE_PTYPE "parse-ptype" +#define CMD_LINE_OPT_PER_PORT_POOL "per-port-pool" +#define CMD_LINE_OPT_MODE "mode" +#define CMD_LINE_OPT_EVENTQ_SYNC "eventq-sched" +#define CMD_LINE_OPT_EVENT_ETH_RX_QUEUES "event-eth-rxqs" enum { /* long options mapped to a short option */ @@ -501,6 +521,10 @@ enum { CMD_LINE_OPT_ENABLE_JUMBO_NUM, CMD_LINE_OPT_HASH_ENTRY_NUM_NUM, CMD_LINE_OPT_PARSE_PTYPE_NUM, + CMD_LINE_OPT_PARSE_PER_PORT_POOL, + CMD_LINE_OPT_MODE_NUM, + CMD_LINE_OPT_EVENTQ_SYNC_NUM, + CMD_LINE_OPT_EVENT_ETH_RX_QUEUES_NUM, }; static const struct option lgopts[] = { @@ -511,6 +535,11 @@ static const struct option lgopts[] = { {CMD_LINE_OPT_ENABLE_JUMBO, 0, 0, CMD_LINE_OPT_ENABLE_JUMBO_NUM}, {CMD_LINE_OPT_HASH_ENTRY_NUM, 1, 0, CMD_LINE_OPT_HASH_ENTRY_NUM_NUM}, {CMD_LINE_OPT_PARSE_PTYPE, 0, 0, CMD_LINE_OPT_PARSE_PTYPE_NUM}, + {CMD_LINE_OPT_PER_PORT_POOL, 0, 0, CMD_LINE_OPT_PARSE_PER_PORT_POOL}, + {CMD_LINE_OPT_MODE, 1, 0, CMD_LINE_OPT_MODE_NUM}, + {CMD_LINE_OPT_EVENTQ_SYNC, 1, 0, CMD_LINE_OPT_EVENTQ_SYNC_NUM}, + {CMD_LINE_OPT_EVENT_ETH_RX_QUEUES, 1, 0, + CMD_LINE_OPT_EVENT_ETH_RX_QUEUES_NUM}, {NULL, 0, 0, 0} }; @@ -521,11 +550,11 @@ static const struct option lgopts[] = { * RTE_MAX is used to ensure that NB_MBUF never goes below a minimum * value of 8192 */ -#define NB_MBUF RTE_MAX( \ - (nb_ports*nb_rx_queue*RTE_TEST_RX_DESC_DEFAULT + \ - nb_ports*nb_lcores*MAX_PKT_BURST + \ - nb_ports*n_tx_queue*RTE_TEST_TX_DESC_DEFAULT + \ - nb_lcores*MEMPOOL_CACHE_SIZE), \ +#define NB_MBUF(nports) RTE_MAX( \ + (nports*nb_rx_queue*nb_rxd + \ + nports*nb_lcores*MAX_PKT_BURST + \ + nports*n_tx_queue*nb_txd + \ + nb_lcores*MEMPOOL_CACHE_SIZE), \ (unsigned)8192) /* Parse the argument given in the command line of the application */ @@ -536,26 +565,14 @@ parse_args(int argc, char **argv) char **argvopt; int option_index; char *prgname = argv[0]; + uint8_t lcore_params = 0; + uint8_t eventq_sched = 0; + uint8_t eth_rx_q = 0; + struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc(); argvopt = argv; /* Error or normal output strings. */ - const char *str1 = "L3FWD: Invalid portmask"; - const char *str2 = "L3FWD: Promiscuous mode selected"; - const char *str3 = "L3FWD: Exact match selected"; - const char *str4 = "L3FWD: Longest-prefix match selected"; - const char *str5 = "L3FWD: Invalid config"; - const char *str6 = "L3FWD: NUMA is disabled"; - const char *str7 = "L3FWD: IPV6 is specified"; - const char *str8 = - "L3FWD: Jumbo frame is enabled - disabling simple TX path"; - const char *str9 = "L3FWD: Invalid packet length"; - const char *str10 = "L3FWD: Set jumbo frame max packet len to "; - const char *str11 = "L3FWD: Invalid hash entry number"; - const char *str12 = - "L3FWD: LPM and EM are mutually exclusive, select only one"; - const char *str13 = "L3FWD: LPM or EM none selected, default LPM on"; - while ((opt = getopt_long(argc, argvopt, short_options, lgopts, &option_index)) != EOF) { @@ -564,24 +581,21 @@ parse_args(int argc, char **argv) case 'p': enabled_port_mask = parse_portmask(optarg); if (enabled_port_mask == 0) { - printf("%s\n", str1); + fprintf(stderr, "Invalid portmask\n"); print_usage(prgname); return -1; } break; case 'P': - printf("%s\n", str2); promiscuous_on = 1; break; case 'E': - printf("%s\n", str3); l3fwd_em_on = 1; break; case 'L': - printf("%s\n", str4); l3fwd_lpm_on = 1; break; @@ -589,10 +603,11 @@ parse_args(int argc, char **argv) case CMD_LINE_OPT_CONFIG_NUM: ret = parse_config(optarg); if (ret) { - printf("%s\n", str5); + fprintf(stderr, "Invalid config\n"); print_usage(prgname); return -1; } + lcore_params = 1; break; case CMD_LINE_OPT_ETH_DEST_NUM: @@ -600,40 +615,36 @@ parse_args(int argc, char **argv) break; case CMD_LINE_OPT_NO_NUMA_NUM: - printf("%s\n", str6); numa_on = 0; break; case CMD_LINE_OPT_IPV6_NUM: - printf("%sn", str7); ipv6 = 1; break; case CMD_LINE_OPT_ENABLE_JUMBO_NUM: { - struct option lenopts = { + const struct option lenopts = { "max-pkt-len", required_argument, 0, 0 }; - printf("%s\n", str8); - port_conf.rxmode.jumbo_frame = 1; + port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; + port_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS; /* * if no max-pkt-len set, use the default - * value ETHER_MAX_LEN. + * value RTE_ETHER_MAX_LEN. */ if (getopt_long(argc, argvopt, "", &lenopts, &option_index) == 0) { ret = parse_max_pkt_len(optarg); - if ((ret < 64) || - (ret > MAX_JUMBO_PKT_LEN)) { - printf("%s\n", str9); + if (ret < 64 || ret > MAX_JUMBO_PKT_LEN) { + fprintf(stderr, + "invalid maximum packet length\n"); print_usage(prgname); return -1; } port_conf.rxmode.max_rx_pkt_len = ret; } - printf("%s %u\n", str10, - (unsigned int)port_conf.rxmode.max_rx_pkt_len); break; } @@ -642,7 +653,7 @@ parse_args(int argc, char **argv) if ((ret > 0) && (ret <= L3FWD_HASH_ENTRIES)) { hash_entry_number = ret; } else { - printf("%s\n", str11); + fprintf(stderr, "invalid hash entry number\n"); print_usage(prgname); return -1; } @@ -653,6 +664,25 @@ parse_args(int argc, char **argv) parse_ptype = 1; break; + case CMD_LINE_OPT_PARSE_PER_PORT_POOL: + printf("per port buffer pool is enabled\n"); + per_port_pool = 1; + break; + + case CMD_LINE_OPT_MODE_NUM: + parse_mode(optarg); + break; + + case CMD_LINE_OPT_EVENTQ_SYNC_NUM: + parse_eventq_sched(optarg); + eventq_sched = 1; + break; + + case CMD_LINE_OPT_EVENT_ETH_RX_QUEUES_NUM: + parse_event_eth_rx_queues(optarg); + eth_rx_q = 1; + break; + default: print_usage(prgname); return -1; @@ -661,7 +691,22 @@ parse_args(int argc, char **argv) /* If both LPM and EM are selected, return error. */ if (l3fwd_lpm_on && l3fwd_em_on) { - printf("%s\n", str12); + fprintf(stderr, "LPM and EM are mutually exclusive, select only one\n"); + return -1; + } + + if (evt_rsrc->enabled && lcore_params) { + fprintf(stderr, "lcore config is not valid when event mode is selected\n"); + return -1; + } + + if (!evt_rsrc->enabled && eth_rx_q) { + fprintf(stderr, "eth_rx_queues is valid only when event mode is selected\n"); + return -1; + } + + if (!evt_rsrc->enabled && eventq_sched) { + fprintf(stderr, "eventq_sched is valid only when event mode is selected\n"); return -1; } @@ -670,8 +715,8 @@ parse_args(int argc, char **argv) * as default match. */ if (!l3fwd_lpm_on && !l3fwd_em_on) { + fprintf(stderr, "LPM or EM none selected, default LPM on\n"); l3fwd_lpm_on = 1; - printf("%s\n", str13); } /* @@ -693,15 +738,15 @@ parse_args(int argc, char **argv) } static void -print_ethaddr(const char *name, const struct ether_addr *eth_addr) +print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr) { - char buf[ETHER_ADDR_FMT_SIZE]; - ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr); + char buf[RTE_ETHER_ADDR_FMT_SIZE]; + rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr); printf("%s%s", name, buf); } -static int -init_mem(unsigned nb_mbuf) +int +init_mem(uint16_t portid, unsigned int nb_mbuf) { struct lcore_conf *qconf; int socketid; @@ -723,13 +768,14 @@ init_mem(unsigned nb_mbuf) socketid, lcore_id, NB_SOCKETS); } - if (pktmbuf_pool[socketid] == NULL) { - snprintf(s, sizeof(s), "mbuf_pool_%d", socketid); - pktmbuf_pool[socketid] = + if (pktmbuf_pool[portid][socketid] == NULL) { + snprintf(s, sizeof(s), "mbuf_pool_%d:%d", + portid, socketid); + pktmbuf_pool[portid][socketid] = rte_pktmbuf_pool_create(s, nb_mbuf, MEMPOOL_CACHE_SIZE, 0, RTE_MBUF_DEFAULT_BUF_SIZE, socketid); - if (pktmbuf_pool[socketid] == NULL) + if (pktmbuf_pool[portid][socketid] == NULL) rte_exit(EXIT_FAILURE, "Cannot init mbuf pool on socket %d\n", socketid); @@ -737,8 +783,13 @@ init_mem(unsigned nb_mbuf) printf("Allocated mbuf pool on socket %d\n", socketid); - /* Setup either LPM or EM(f.e Hash). */ - l3fwd_lkp.setup(socketid); + /* Setup either LPM or EM(f.e Hash). But, only once per + * available socket. + */ + if (!lkp_per_socket[socketid]) { + l3fwd_lkp.setup(socketid); + lkp_per_socket[socketid] = 1; + } } qconf = &lcore_conf[lcore_id]; qconf->ipv4_lookup_struct = @@ -751,12 +802,14 @@ init_mem(unsigned nb_mbuf) /* Check the link status of all ports in up to 9s, and print them finally */ static void -check_all_ports_link_status(uint8_t port_num, uint32_t port_mask) +check_all_ports_link_status(uint32_t port_mask) { #define CHECK_INTERVAL 100 /* 100ms */ #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ - uint8_t portid, count, all_ports_up, print_flag = 0; + uint16_t portid; + uint8_t count, all_ports_up, print_flag = 0; struct rte_eth_link link; + int ret; printf("\nChecking link status"); fflush(stdout); @@ -764,24 +817,30 @@ check_all_ports_link_status(uint8_t port_num, uint32_t port_mask) if (force_quit) return; all_ports_up = 1; - for (portid = 0; portid < port_num; portid++) { + RTE_ETH_FOREACH_DEV(portid) { if (force_quit) return; if ((port_mask & (1 << portid)) == 0) continue; memset(&link, 0, sizeof(link)); - rte_eth_link_get_nowait(portid, &link); + ret = rte_eth_link_get_nowait(portid, &link); + if (ret < 0) { + all_ports_up = 0; + if (print_flag == 1) + printf("Port %u link get failed: %s\n", + portid, rte_strerror(-ret)); + continue; + } /* print link status if flag set */ if (print_flag == 1) { if (link.link_status) - printf("Port %d Link Up - speed %u " - "Mbps - %s\n", (uint8_t)portid, - (unsigned)link.link_speed, + printf( + "Port%d Link Up. Speed %u Mbps -%s\n", + portid, link.link_speed, (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? - ("full-duplex") : ("half-duplex\n")); + ("full-duplex") : ("half-duplex")); else - printf("Port %d Link Down\n", - (uint8_t)portid); + printf("Port %d Link Down\n", portid); continue; } /* clear all_ports_up flag if any link down */ @@ -819,7 +878,7 @@ signal_handler(int signum) } static int -prepare_ptype_parser(uint8_t portid, uint16_t queueid) +prepare_ptype_parser(uint16_t portid, uint16_t queueid) { if (parse_ptype) { printf("Port %d: softly parse packet type info\n", portid); @@ -840,41 +899,18 @@ prepare_ptype_parser(uint8_t portid, uint16_t queueid) return 0; } -int -main(int argc, char **argv) +static void +l3fwd_poll_resource_setup(void) { - struct lcore_conf *qconf; + uint8_t nb_rx_queue, queue, socketid; struct rte_eth_dev_info dev_info; + uint32_t n_tx_queue, nb_lcores; struct rte_eth_txconf *txconf; + struct lcore_conf *qconf; + uint16_t queueid, portid; + unsigned int nb_ports; + unsigned int lcore_id; int ret; - unsigned nb_ports; - uint16_t queueid; - unsigned lcore_id; - uint32_t n_tx_queue, nb_lcores; - uint8_t portid, nb_rx_queue, queue, socketid; - - /* init EAL */ - ret = rte_eal_init(argc, argv); - if (ret < 0) - rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n"); - argc -= ret; - argv += ret; - - force_quit = false; - signal(SIGINT, signal_handler); - signal(SIGTERM, signal_handler); - - /* pre-init dst MACs for all ports to 02:00:00:00:00:xx */ - for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) { - dest_eth_addr[portid] = - ETHER_LOCAL_ADMIN_ADDR + ((uint64_t)portid << 40); - *(uint64_t *)(val_eth + portid) = dest_eth_addr[portid]; - } - - /* parse application arguments (after the EAL ones) */ - ret = parse_args(argc, argv); - if (ret < 0) - rte_exit(EXIT_FAILURE, "Invalid L3FWD parameters\n"); if (check_lcore_params() < 0) rte_exit(EXIT_FAILURE, "check_lcore_params failed\n"); @@ -883,18 +919,17 @@ main(int argc, char **argv) if (ret < 0) rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n"); - nb_ports = rte_eth_dev_count(); + nb_ports = rte_eth_dev_count_avail(); - if (check_port_config(nb_ports) < 0) + if (check_port_config() < 0) rte_exit(EXIT_FAILURE, "check_port_config failed\n"); nb_lcores = rte_lcore_count(); - /* Setup function pointers for lookup method. */ - setup_l3fwd_lookup_tables(); - /* initialize all ports */ - for (portid = 0; portid < nb_ports; portid++) { + RTE_ETH_FOREACH_DEV(portid) { + struct rte_eth_conf local_port_conf = port_conf; + /* skip ports that are not enabled */ if ((enabled_port_mask & (1 << portid)) == 0) { printf("\nSkipping disabled port %d\n", portid); @@ -911,28 +946,69 @@ main(int argc, char **argv) n_tx_queue = MAX_TX_QUEUE_PER_PORT; printf("Creating queues: nb_rxq=%d nb_txq=%u... ", nb_rx_queue, (unsigned)n_tx_queue ); + + ret = rte_eth_dev_info_get(portid, &dev_info); + if (ret != 0) + rte_exit(EXIT_FAILURE, + "Error during getting device (port %u) info: %s\n", + portid, strerror(-ret)); + + if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE) + local_port_conf.txmode.offloads |= + DEV_TX_OFFLOAD_MBUF_FAST_FREE; + + local_port_conf.rx_adv_conf.rss_conf.rss_hf &= + dev_info.flow_type_rss_offloads; + if (local_port_conf.rx_adv_conf.rss_conf.rss_hf != + port_conf.rx_adv_conf.rss_conf.rss_hf) { + printf("Port %u modified RSS hash function based on hardware support," + "requested:%#"PRIx64" configured:%#"PRIx64"\n", + portid, + port_conf.rx_adv_conf.rss_conf.rss_hf, + local_port_conf.rx_adv_conf.rss_conf.rss_hf); + } + ret = rte_eth_dev_configure(portid, nb_rx_queue, - (uint16_t)n_tx_queue, &port_conf); + (uint16_t)n_tx_queue, &local_port_conf); if (ret < 0) rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%d\n", ret, portid); - rte_eth_macaddr_get(portid, &ports_eth_addr[portid]); + ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd, + &nb_txd); + if (ret < 0) + rte_exit(EXIT_FAILURE, + "Cannot adjust number of descriptors: err=%d, " + "port=%d\n", ret, portid); + + ret = rte_eth_macaddr_get(portid, &ports_eth_addr[portid]); + if (ret < 0) + rte_exit(EXIT_FAILURE, + "Cannot get MAC address: err=%d, port=%d\n", + ret, portid); + print_ethaddr(" Address:", &ports_eth_addr[portid]); printf(", "); print_ethaddr("Destination:", - (const struct ether_addr *)&dest_eth_addr[portid]); + (const struct rte_ether_addr *)&dest_eth_addr[portid]); printf(", "); /* * prepare src MACs for each port. */ - ether_addr_copy(&ports_eth_addr[portid], - (struct ether_addr *)(val_eth + portid) + 1); + rte_ether_addr_copy(&ports_eth_addr[portid], + (struct rte_ether_addr *)(val_eth + portid) + 1); /* init memory */ - ret = init_mem(NB_MBUF); + if (!per_port_pool) { + /* portid = 0; this is *not* signifying the first port, + * rather, it signifies that portid is ignored. + */ + ret = init_mem(0, NB_MBUF(nb_ports)); + } else { + ret = init_mem(portid, NB_MBUF(1)); + } if (ret < 0) rte_exit(EXIT_FAILURE, "init_mem failed\n"); @@ -951,10 +1027,8 @@ main(int argc, char **argv) printf("txq=%u,%d,%d ", lcore_id, queueid, socketid); fflush(stdout); - rte_eth_dev_info_get(portid, &dev_info); txconf = &dev_info.default_txconf; - if (port_conf.rxmode.jumbo_frame) - txconf->txq_flags = 0; + txconf->offloads = local_port_conf.txmode.offloads; ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd, socketid, txconf); if (ret < 0) @@ -980,6 +1054,8 @@ main(int argc, char **argv) fflush(stdout); /* init RX queues */ for(queue = 0; queue < qconf->n_rx_queue; ++queue) { + struct rte_eth_rxconf rxq_conf; + portid = qconf->rx_queue_list[queue].port_id; queueid = qconf->rx_queue_list[queue].queue_id; @@ -992,21 +1068,172 @@ main(int argc, char **argv) printf("rxq=%d,%d,%d ", portid, queueid, socketid); fflush(stdout); - ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd, - socketid, - NULL, - pktmbuf_pool[socketid]); + ret = rte_eth_dev_info_get(portid, &dev_info); + if (ret != 0) + rte_exit(EXIT_FAILURE, + "Error during getting device (port %u) info: %s\n", + portid, strerror(-ret)); + + rxq_conf = dev_info.default_rxconf; + rxq_conf.offloads = port_conf.rxmode.offloads; + if (!per_port_pool) + ret = rte_eth_rx_queue_setup(portid, queueid, + nb_rxd, socketid, + &rxq_conf, + pktmbuf_pool[0][socketid]); + else + ret = rte_eth_rx_queue_setup(portid, queueid, + nb_rxd, socketid, + &rxq_conf, + pktmbuf_pool[portid][socketid]); if (ret < 0) rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup: err=%d, port=%d\n", ret, portid); } } +} - printf("\n"); +static inline int +l3fwd_service_enable(uint32_t service_id) +{ + uint8_t min_service_count = UINT8_MAX; + uint32_t slcore_array[RTE_MAX_LCORE]; + unsigned int slcore = 0; + uint8_t service_count; + int32_t slcore_count; + + if (!rte_service_lcore_count()) + return -ENOENT; + + slcore_count = rte_service_lcore_list(slcore_array, RTE_MAX_LCORE); + if (slcore_count < 0) + return -ENOENT; + /* Get the core which has least number of services running. */ + while (slcore_count--) { + /* Reset default mapping */ + rte_service_map_lcore_set(service_id, + slcore_array[slcore_count], 0); + service_count = rte_service_lcore_count_services( + slcore_array[slcore_count]); + if (service_count < min_service_count) { + slcore = slcore_array[slcore_count]; + min_service_count = service_count; + } + } + if (rte_service_map_lcore_set(service_id, slcore, 1)) + return -ENOENT; + rte_service_lcore_start(slcore); + + return 0; +} + +static void +l3fwd_event_service_setup(void) +{ + struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc(); + struct rte_event_dev_info evdev_info; + uint32_t service_id, caps; + int ret, i; + + rte_event_dev_info_get(evt_rsrc->event_d_id, &evdev_info); + if (!(evdev_info.event_dev_cap & RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED)) { + ret = rte_event_dev_service_id_get(evt_rsrc->event_d_id, + &service_id); + if (ret != -ESRCH && ret != 0) + rte_exit(EXIT_FAILURE, + "Error in starting eventdev service\n"); + l3fwd_service_enable(service_id); + } + + for (i = 0; i < evt_rsrc->rx_adptr.nb_rx_adptr; i++) { + ret = rte_event_eth_rx_adapter_caps_get(evt_rsrc->event_d_id, + evt_rsrc->rx_adptr.rx_adptr[i], &caps); + if (ret < 0) + rte_exit(EXIT_FAILURE, + "Failed to get Rx adapter[%d] caps\n", + evt_rsrc->rx_adptr.rx_adptr[i]); + ret = rte_event_eth_rx_adapter_service_id_get( + evt_rsrc->event_d_id, + &service_id); + if (ret != -ESRCH && ret != 0) + rte_exit(EXIT_FAILURE, + "Error in starting Rx adapter[%d] service\n", + evt_rsrc->rx_adptr.rx_adptr[i]); + l3fwd_service_enable(service_id); + } + + for (i = 0; i < evt_rsrc->tx_adptr.nb_tx_adptr; i++) { + ret = rte_event_eth_tx_adapter_caps_get(evt_rsrc->event_d_id, + evt_rsrc->tx_adptr.tx_adptr[i], &caps); + if (ret < 0) + rte_exit(EXIT_FAILURE, + "Failed to get Rx adapter[%d] caps\n", + evt_rsrc->tx_adptr.tx_adptr[i]); + ret = rte_event_eth_tx_adapter_service_id_get( + evt_rsrc->event_d_id, + &service_id); + if (ret != -ESRCH && ret != 0) + rte_exit(EXIT_FAILURE, + "Error in starting Rx adapter[%d] service\n", + evt_rsrc->tx_adptr.tx_adptr[i]); + l3fwd_service_enable(service_id); + } +} + +int +main(int argc, char **argv) +{ + struct l3fwd_event_resources *evt_rsrc; + struct lcore_conf *qconf; + uint16_t queueid, portid; + unsigned int lcore_id; + uint8_t queue; + int i, ret; + + /* init EAL */ + ret = rte_eal_init(argc, argv); + if (ret < 0) + rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n"); + argc -= ret; + argv += ret; + + force_quit = false; + signal(SIGINT, signal_handler); + signal(SIGTERM, signal_handler); + + /* pre-init dst MACs for all ports to 02:00:00:00:00:xx */ + for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) { + dest_eth_addr[portid] = + RTE_ETHER_LOCAL_ADMIN_ADDR + ((uint64_t)portid << 40); + *(uint64_t *)(val_eth + portid) = dest_eth_addr[portid]; + } + + evt_rsrc = l3fwd_get_eventdev_rsrc(); + /* parse application arguments (after the EAL ones) */ + ret = parse_args(argc, argv); + if (ret < 0) + rte_exit(EXIT_FAILURE, "Invalid L3FWD parameters\n"); + + /* Setup function pointers for lookup method. */ + setup_l3fwd_lookup_tables(); + + evt_rsrc->per_port_pool = per_port_pool; + evt_rsrc->pkt_pool = pktmbuf_pool; + evt_rsrc->port_mask = enabled_port_mask; + /* Configure eventdev parameters if user has requested */ + if (evt_rsrc->enabled) { + l3fwd_event_resource_setup(&port_conf); + if (l3fwd_em_on) + l3fwd_lkp.main_loop = evt_rsrc->ops.em_event_loop; + else + l3fwd_lkp.main_loop = evt_rsrc->ops.lpm_event_loop; + l3fwd_event_service_setup(); + } else + l3fwd_poll_resource_setup(); /* start ports */ - for (portid = 0; portid < nb_ports; portid++) { + RTE_ETH_FOREACH_DEV(portid) { if ((enabled_port_mask & (1 << portid)) == 0) { continue; } @@ -1023,8 +1250,13 @@ main(int argc, char **argv) * to itself through 2 cross-connected ports of the * target machine. */ - if (promiscuous_on) - rte_eth_promiscuous_enable(portid); + if (promiscuous_on) { + ret = rte_eth_promiscuous_enable(portid); + if (ret != 0) + rte_exit(EXIT_FAILURE, + "rte_eth_promiscuous_enable: err=%s, port=%u\n", + rte_strerror(-ret), portid); + } } printf("\n"); @@ -1041,27 +1273,46 @@ main(int argc, char **argv) } } - - check_all_ports_link_status((uint8_t)nb_ports, enabled_port_mask); + check_all_ports_link_status(enabled_port_mask); ret = 0; /* launch per-lcore init on every lcore */ rte_eal_mp_remote_launch(l3fwd_lkp.main_loop, NULL, CALL_MASTER); - RTE_LCORE_FOREACH_SLAVE(lcore_id) { - if (rte_eal_wait_lcore(lcore_id) < 0) { - ret = -1; - break; + if (evt_rsrc->enabled) { + for (i = 0; i < evt_rsrc->rx_adptr.nb_rx_adptr; i++) + rte_event_eth_rx_adapter_stop( + evt_rsrc->rx_adptr.rx_adptr[i]); + for (i = 0; i < evt_rsrc->tx_adptr.nb_tx_adptr; i++) + rte_event_eth_tx_adapter_stop( + evt_rsrc->tx_adptr.tx_adptr[i]); + + RTE_ETH_FOREACH_DEV(portid) { + if ((enabled_port_mask & (1 << portid)) == 0) + continue; + rte_eth_dev_stop(portid); } - } - /* stop ports */ - for (portid = 0; portid < nb_ports; portid++) { - if ((enabled_port_mask & (1 << portid)) == 0) - continue; - printf("Closing port %d...", portid); - rte_eth_dev_stop(portid); - rte_eth_dev_close(portid); - printf(" Done\n"); + rte_eal_mp_wait_lcore(); + RTE_ETH_FOREACH_DEV(portid) { + if ((enabled_port_mask & (1 << portid)) == 0) + continue; + rte_eth_dev_close(portid); + } + + rte_event_dev_stop(evt_rsrc->event_d_id); + rte_event_dev_close(evt_rsrc->event_d_id); + + } else { + rte_eal_mp_wait_lcore(); + + RTE_ETH_FOREACH_DEV(portid) { + if ((enabled_port_mask & (1 << portid)) == 0) + continue; + printf("Closing port %d...", portid); + rte_eth_dev_stop(portid); + rte_eth_dev_close(portid); + printf(" Done\n"); + } } printf("Bye...\n");