X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=app%2Ftest-pmd%2Fconfig.c;h=d6caa1f0b27222206ea7e2d72f0899ca6ba8de81;hb=68629be3a622ee53cd5b40c8447ae9b083ff3f6c;hp=fe31a9d52ff5ee8914531242366f1734c7ae5178;hpb=2f622174bf863563ed17aa9d298978bf88af75ce;p=dpdk.git diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c index fe31a9d52f..d6caa1f0b2 100644 --- a/app/test-pmd/config.c +++ b/app/test-pmd/config.c @@ -27,7 +27,6 @@ #include #include #include -#include #include #include #include @@ -38,20 +37,24 @@ #include #include #include +#include #include -#ifdef RTE_LIBRTE_IXGBE_PMD +#ifdef RTE_NET_IXGBE #include #endif -#ifdef RTE_LIBRTE_I40E_PMD +#ifdef RTE_NET_I40E #include #endif -#ifdef RTE_LIBRTE_BNXT_PMD +#ifdef RTE_NET_BNXT #include #endif +#ifdef RTE_LIB_GRO #include +#endif #include #include "testpmd.h" +#include "cmdline_mtr.h" #define ETHDEV_FWVERS_LEN 32 @@ -84,58 +87,63 @@ static const struct { }; const struct rss_type_info rss_type_table[] = { - { "all", ETH_RSS_ETH | ETH_RSS_VLAN | ETH_RSS_IP | ETH_RSS_TCP | - ETH_RSS_UDP | ETH_RSS_SCTP | ETH_RSS_L2_PAYLOAD | - ETH_RSS_L2TPV3 | ETH_RSS_ESP | ETH_RSS_AH | ETH_RSS_PFCP | - ETH_RSS_GTPU}, + { "all", RTE_ETH_RSS_ETH | RTE_ETH_RSS_VLAN | RTE_ETH_RSS_IP | RTE_ETH_RSS_TCP | + RTE_ETH_RSS_UDP | RTE_ETH_RSS_SCTP | RTE_ETH_RSS_L2_PAYLOAD | + RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_ESP | RTE_ETH_RSS_AH | RTE_ETH_RSS_PFCP | + RTE_ETH_RSS_GTPU | RTE_ETH_RSS_ECPRI | RTE_ETH_RSS_MPLS | RTE_ETH_RSS_L2TPV2}, { "none", 0 }, - { "eth", ETH_RSS_ETH }, - { "l2-src-only", ETH_RSS_L2_SRC_ONLY }, - { "l2-dst-only", ETH_RSS_L2_DST_ONLY }, - { "vlan", ETH_RSS_VLAN }, - { "s-vlan", ETH_RSS_S_VLAN }, - { "c-vlan", ETH_RSS_C_VLAN }, - { "ipv4", ETH_RSS_IPV4 }, - { "ipv4-frag", ETH_RSS_FRAG_IPV4 }, - { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP }, - { "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP }, - { "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP }, - { "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER }, - { "ipv6", ETH_RSS_IPV6 }, - { "ipv6-frag", ETH_RSS_FRAG_IPV6 }, - { "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP }, - { "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP }, - { "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP }, - { "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER }, - { "l2-payload", ETH_RSS_L2_PAYLOAD }, - { "ipv6-ex", ETH_RSS_IPV6_EX }, - { "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX }, - { "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX }, - { "port", ETH_RSS_PORT }, - { "vxlan", ETH_RSS_VXLAN }, - { "geneve", ETH_RSS_GENEVE }, - { "nvgre", ETH_RSS_NVGRE }, - { "ip", ETH_RSS_IP }, - { "udp", ETH_RSS_UDP }, - { "tcp", ETH_RSS_TCP }, - { "sctp", ETH_RSS_SCTP }, - { "tunnel", ETH_RSS_TUNNEL }, + { "eth", RTE_ETH_RSS_ETH }, + { "l2-src-only", RTE_ETH_RSS_L2_SRC_ONLY }, + { "l2-dst-only", RTE_ETH_RSS_L2_DST_ONLY }, + { "vlan", RTE_ETH_RSS_VLAN }, + { "s-vlan", RTE_ETH_RSS_S_VLAN }, + { "c-vlan", RTE_ETH_RSS_C_VLAN }, + { "ipv4", RTE_ETH_RSS_IPV4 }, + { "ipv4-frag", RTE_ETH_RSS_FRAG_IPV4 }, + { "ipv4-tcp", RTE_ETH_RSS_NONFRAG_IPV4_TCP }, + { "ipv4-udp", RTE_ETH_RSS_NONFRAG_IPV4_UDP }, + { "ipv4-sctp", RTE_ETH_RSS_NONFRAG_IPV4_SCTP }, + { "ipv4-other", RTE_ETH_RSS_NONFRAG_IPV4_OTHER }, + { "ipv6", RTE_ETH_RSS_IPV6 }, + { "ipv6-frag", RTE_ETH_RSS_FRAG_IPV6 }, + { "ipv6-tcp", RTE_ETH_RSS_NONFRAG_IPV6_TCP }, + { "ipv6-udp", RTE_ETH_RSS_NONFRAG_IPV6_UDP }, + { "ipv6-sctp", RTE_ETH_RSS_NONFRAG_IPV6_SCTP }, + { "ipv6-other", RTE_ETH_RSS_NONFRAG_IPV6_OTHER }, + { "l2-payload", RTE_ETH_RSS_L2_PAYLOAD }, + { "ipv6-ex", RTE_ETH_RSS_IPV6_EX }, + { "ipv6-tcp-ex", RTE_ETH_RSS_IPV6_TCP_EX }, + { "ipv6-udp-ex", RTE_ETH_RSS_IPV6_UDP_EX }, + { "port", RTE_ETH_RSS_PORT }, + { "vxlan", RTE_ETH_RSS_VXLAN }, + { "geneve", RTE_ETH_RSS_GENEVE }, + { "nvgre", RTE_ETH_RSS_NVGRE }, + { "ip", RTE_ETH_RSS_IP }, + { "udp", RTE_ETH_RSS_UDP }, + { "tcp", RTE_ETH_RSS_TCP }, + { "sctp", RTE_ETH_RSS_SCTP }, + { "tunnel", RTE_ETH_RSS_TUNNEL }, { "l3-pre32", RTE_ETH_RSS_L3_PRE32 }, { "l3-pre40", RTE_ETH_RSS_L3_PRE40 }, { "l3-pre48", RTE_ETH_RSS_L3_PRE48 }, { "l3-pre56", RTE_ETH_RSS_L3_PRE56 }, { "l3-pre64", RTE_ETH_RSS_L3_PRE64 }, { "l3-pre96", RTE_ETH_RSS_L3_PRE96 }, - { "l3-src-only", ETH_RSS_L3_SRC_ONLY }, - { "l3-dst-only", ETH_RSS_L3_DST_ONLY }, - { "l4-src-only", ETH_RSS_L4_SRC_ONLY }, - { "l4-dst-only", ETH_RSS_L4_DST_ONLY }, - { "esp", ETH_RSS_ESP }, - { "ah", ETH_RSS_AH }, - { "l2tpv3", ETH_RSS_L2TPV3 }, - { "pfcp", ETH_RSS_PFCP }, - { "pppoe", ETH_RSS_PPPOE }, - { "gtpu", ETH_RSS_GTPU }, + { "l3-src-only", RTE_ETH_RSS_L3_SRC_ONLY }, + { "l3-dst-only", RTE_ETH_RSS_L3_DST_ONLY }, + { "l4-src-only", RTE_ETH_RSS_L4_SRC_ONLY }, + { "l4-dst-only", RTE_ETH_RSS_L4_DST_ONLY }, + { "esp", RTE_ETH_RSS_ESP }, + { "ah", RTE_ETH_RSS_AH }, + { "l2tpv3", RTE_ETH_RSS_L2TPV3 }, + { "pfcp", RTE_ETH_RSS_PFCP }, + { "pppoe", RTE_ETH_RSS_PPPOE }, + { "gtpu", RTE_ETH_RSS_GTPU }, + { "ecpri", RTE_ETH_RSS_ECPRI }, + { "mpls", RTE_ETH_RSS_MPLS }, + { "ipv4-chksum", RTE_ETH_RSS_IPV4_CHKSUM }, + { "l4-chksum", RTE_ETH_RSS_L4_CHKSUM }, + { "l2tpv2", RTE_ETH_RSS_L2TPV2 }, { NULL, 0 }, }; @@ -169,6 +177,65 @@ print_ethaddr(const char *name, struct rte_ether_addr *eth_addr) printf("%s%s", name, buf); } +static void +nic_xstats_display_periodic(portid_t port_id) +{ + struct xstat_display_info *xstats_info; + uint64_t *prev_values, *curr_values; + uint64_t diff_value, value_rate; + struct timespec cur_time; + uint64_t *ids_supp; + size_t ids_supp_sz; + uint64_t diff_ns; + unsigned int i; + int rc; + + xstats_info = &ports[port_id].xstats_info; + + ids_supp_sz = xstats_info->ids_supp_sz; + if (ids_supp_sz == 0) + return; + + printf("\n"); + + ids_supp = xstats_info->ids_supp; + prev_values = xstats_info->prev_values; + curr_values = xstats_info->curr_values; + + rc = rte_eth_xstats_get_by_id(port_id, ids_supp, curr_values, + ids_supp_sz); + if (rc != (int)ids_supp_sz) { + fprintf(stderr, + "Failed to get values of %zu xstats for port %u - return code %d\n", + ids_supp_sz, port_id, rc); + return; + } + + diff_ns = 0; + if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) { + uint64_t ns; + + ns = cur_time.tv_sec * NS_PER_SEC; + ns += cur_time.tv_nsec; + + if (xstats_info->prev_ns != 0) + diff_ns = ns - xstats_info->prev_ns; + xstats_info->prev_ns = ns; + } + + printf("%-31s%-17s%s\n", " ", "Value", "Rate (since last show)"); + for (i = 0; i < ids_supp_sz; i++) { + diff_value = (curr_values[i] > prev_values[i]) ? + (curr_values[i] - prev_values[i]) : 0; + prev_values[i] = curr_values[i]; + value_rate = diff_ns > 0 ? + (double)diff_value / diff_ns * NS_PER_SEC : 0; + + printf(" %-25s%12"PRIu64" %15"PRIu64"\n", + xstats_display[i].name, curr_values[i], value_rate); + } +} + void nic_stats_display(portid_t port_id) { @@ -182,59 +249,29 @@ nic_stats_display(portid_t port_id) diff_ns; uint64_t mpps_rx, mpps_tx, mbps_rx, mbps_tx; struct rte_eth_stats stats; - struct rte_port *port = &ports[port_id]; - uint8_t i; - static const char *nic_stats_border = "########################"; + int ret; if (port_id_is_invalid(port_id, ENABLED_WARN)) { print_valid_ports(); return; } - rte_eth_stats_get(port_id, &stats); + ret = rte_eth_stats_get(port_id, &stats); + if (ret != 0) { + fprintf(stderr, + "%s: Error: failed to get stats (port %u): %d", + __func__, port_id, ret); + return; + } printf("\n %s NIC statistics for port %-2d %s\n", nic_stats_border, port_id, nic_stats_border); - if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { - printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " - "%-"PRIu64"\n", - stats.ipackets, stats.imissed, stats.ibytes); - printf(" RX-errors: %-"PRIu64"\n", stats.ierrors); - printf(" RX-nombuf: %-10"PRIu64"\n", - stats.rx_nombuf); - printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " - "%-"PRIu64"\n", - stats.opackets, stats.oerrors, stats.obytes); - } - else { - printf(" RX-packets: %10"PRIu64" RX-errors: %10"PRIu64 - " RX-bytes: %10"PRIu64"\n", - stats.ipackets, stats.ierrors, stats.ibytes); - printf(" RX-errors: %10"PRIu64"\n", stats.ierrors); - printf(" RX-nombuf: %10"PRIu64"\n", - stats.rx_nombuf); - printf(" TX-packets: %10"PRIu64" TX-errors: %10"PRIu64 - " TX-bytes: %10"PRIu64"\n", - stats.opackets, stats.oerrors, stats.obytes); - } - - if (port->rx_queue_stats_mapping_enabled) { - printf("\n"); - for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { - printf(" Stats reg %2d RX-packets: %10"PRIu64 - " RX-errors: %10"PRIu64 - " RX-bytes: %10"PRIu64"\n", - i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]); - } - } - if (port->tx_queue_stats_mapping_enabled) { - printf("\n"); - for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { - printf(" Stats reg %2d TX-packets: %10"PRIu64 - " TX-bytes: %10"PRIu64"\n", - i, stats.q_opackets[i], stats.q_obytes[i]); - } - } + printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " + "%-"PRIu64"\n", stats.ipackets, stats.imissed, stats.ibytes); + printf(" RX-errors: %-"PRIu64"\n", stats.ierrors); + printf(" RX-nombuf: %-10"PRIu64"\n", stats.rx_nombuf); + printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " + "%-"PRIu64"\n", stats.opackets, stats.oerrors, stats.obytes); diff_ns = 0; if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) { @@ -275,6 +312,9 @@ nic_stats_display(portid_t port_id) PRIu64" Tx-bps: %12"PRIu64"\n", mpps_rx, mbps_rx * 8, mpps_tx, mbps_tx * 8); + if (xstats_display_num > 0) + nic_xstats_display_periodic(port_id); + printf(" %s############################%s\n", nic_stats_border, nic_stats_border); } @@ -291,8 +331,9 @@ nic_stats_clear(portid_t port_id) ret = rte_eth_stats_reset(port_id); if (ret != 0) { - printf("%s: Error: failed to reset stats (port %u): %s", - __func__, port_id, strerror(-ret)); + fprintf(stderr, + "%s: Error: failed to reset stats (port %u): %s", + __func__, port_id, strerror(-ret)); return; } @@ -300,8 +341,9 @@ nic_stats_clear(portid_t port_id) if (ret != 0) { if (ret < 0) ret = -ret; - printf("%s: Error: failed to get stats (port %u): %s", - __func__, port_id, strerror(ret)); + fprintf(stderr, + "%s: Error: failed to get stats (port %u): %s", + __func__, port_id, strerror(ret)); return; } printf("\n NIC statistics for port %d cleared\n", port_id); @@ -320,26 +362,26 @@ nic_xstats_display(portid_t port_id) } printf("###### NIC extended statistics for port %-2d\n", port_id); if (!rte_eth_dev_is_valid_port(port_id)) { - printf("Error: Invalid port number %i\n", port_id); + fprintf(stderr, "Error: Invalid port number %i\n", port_id); return; } /* Get count */ cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0); if (cnt_xstats < 0) { - printf("Error: Cannot get count of xstats\n"); + fprintf(stderr, "Error: Cannot get count of xstats\n"); return; } /* Get id-name lookup table */ xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats); if (xstats_names == NULL) { - printf("Cannot allocate memory for xstats lookup\n"); + fprintf(stderr, "Cannot allocate memory for xstats lookup\n"); return; } if (cnt_xstats != rte_eth_xstats_get_names( port_id, xstats_names, cnt_xstats)) { - printf("Error: Cannot get xstats lookup\n"); + fprintf(stderr, "Error: Cannot get xstats lookup\n"); free(xstats_names); return; } @@ -347,12 +389,12 @@ nic_xstats_display(portid_t port_id) /* Get stats themselves */ xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats); if (xstats == NULL) { - printf("Cannot allocate memory for xstats\n"); + fprintf(stderr, "Cannot allocate memory for xstats\n"); free(xstats_names); return; } if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) { - printf("Error: Unable to get xstats\n"); + fprintf(stderr, "Error: Unable to get xstats\n"); free(xstats_names); free(xstats); return; @@ -382,8 +424,9 @@ nic_xstats_clear(portid_t port_id) ret = rte_eth_xstats_reset(port_id); if (ret != 0) { - printf("%s: Error: failed to reset xstats (port %u): %s", - __func__, port_id, strerror(-ret)); + fprintf(stderr, + "%s: Error: failed to reset xstats (port %u): %s\n", + __func__, port_id, strerror(-ret)); return; } @@ -391,58 +434,23 @@ nic_xstats_clear(portid_t port_id) if (ret != 0) { if (ret < 0) ret = -ret; - printf("%s: Error: failed to get stats (port %u): %s", - __func__, port_id, strerror(ret)); + fprintf(stderr, "%s: Error: failed to get stats (port %u): %s", + __func__, port_id, strerror(ret)); return; } } -void -nic_stats_mapping_display(portid_t port_id) -{ - struct rte_port *port = &ports[port_id]; - uint16_t i; - - static const char *nic_stats_mapping_border = "########################"; - - if (port_id_is_invalid(port_id, ENABLED_WARN)) { - print_valid_ports(); - return; - } - - if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { - printf("Port id %d - either does not support queue statistic mapping or" - " no queue statistic mapping set\n", port_id); - return; - } - - printf("\n %s NIC statistics mapping for port %-2d %s\n", - nic_stats_mapping_border, port_id, nic_stats_mapping_border); - - if (port->rx_queue_stats_mapping_enabled) { - for (i = 0; i < nb_rx_queue_stats_mappings; i++) { - if (rx_queue_stats_mappings[i].port_id == port_id) { - printf(" RX-queue %2d mapped to Stats Reg %2d\n", - rx_queue_stats_mappings[i].queue_id, - rx_queue_stats_mappings[i].stats_counter_id); - } - } - printf("\n"); - } - - - if (port->tx_queue_stats_mapping_enabled) { - for (i = 0; i < nb_tx_queue_stats_mappings; i++) { - if (tx_queue_stats_mappings[i].port_id == port_id) { - printf(" TX-queue %2d mapped to Stats Reg %2d\n", - tx_queue_stats_mappings[i].queue_id, - tx_queue_stats_mappings[i].stats_counter_id); - } - } - } - - printf(" %s####################################%s\n", - nic_stats_mapping_border, nic_stats_mapping_border); +static const char * +get_queue_state_name(uint8_t queue_state) +{ + if (queue_state == RTE_ETH_QUEUE_STATE_STOPPED) + return "stopped"; + else if (queue_state == RTE_ETH_QUEUE_STATE_STARTED) + return "started"; + else if (queue_state == RTE_ETH_QUEUE_STATE_HAIRPIN) + return "hairpin"; + else + return "unknown"; } void @@ -455,8 +463,8 @@ rx_queue_infos_display(portid_t port_id, uint16_t queue_id) rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo); if (rc != 0) { - printf("Failed to retrieve information for port: %u, " - "RX queue: %hu\nerror desc: %s(%d)\n", + fprintf(stderr, + "Failed to retrieve information for port: %u, RX queue: %hu\nerror desc: %s(%d)\n", port_id, queue_id, strerror(-rc), rc); return; } @@ -475,6 +483,7 @@ rx_queue_infos_display(portid_t port_id, uint16_t queue_id) (qinfo.conf.rx_deferred_start != 0) ? "on" : "off"); printf("\nRX scattered packets: %s", (qinfo.scattered_rx != 0) ? "on" : "off"); + printf("\nRx queue state: %s", get_queue_state_name(qinfo.queue_state)); if (qinfo.rx_buf_size != 0) printf("\nRX buffer size: %hu", qinfo.rx_buf_size); printf("\nNumber of RXDs: %hu", qinfo.nb_desc); @@ -498,8 +507,8 @@ tx_queue_infos_display(portid_t port_id, uint16_t queue_id) rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo); if (rc != 0) { - printf("Failed to retrieve information for port: %u, " - "TX queue: %hu\nerror desc: %s(%d)\n", + fprintf(stderr, + "Failed to retrieve information for port: %u, TX queue: %hu\nerror desc: %s(%d)\n", port_id, queue_id, strerror(-rc), rc); return; } @@ -515,6 +524,7 @@ tx_queue_infos_display(portid_t port_id, uint16_t queue_id) printf("\nTX deferred start: %s", (qinfo.conf.tx_deferred_start != 0) ? "on" : "off"); printf("\nNumber of TXDs: %hu", qinfo.nb_desc); + printf("\nTx queue state: %s", get_queue_state_name(qinfo.queue_state)); if (rte_eth_tx_burst_mode_get(port_id, queue_id, &mode) == 0) printf("\nBurst mode: %s%s", @@ -536,39 +546,39 @@ static void device_infos_display_speeds(uint32_t speed_capa) { printf("\n\tDevice speed capability:"); - if (speed_capa == ETH_LINK_SPEED_AUTONEG) + if (speed_capa == RTE_ETH_LINK_SPEED_AUTONEG) printf(" Autonegotiate (all speeds)"); - if (speed_capa & ETH_LINK_SPEED_FIXED) + if (speed_capa & RTE_ETH_LINK_SPEED_FIXED) printf(" Disable autonegotiate (fixed speed) "); - if (speed_capa & ETH_LINK_SPEED_10M_HD) + if (speed_capa & RTE_ETH_LINK_SPEED_10M_HD) printf(" 10 Mbps half-duplex "); - if (speed_capa & ETH_LINK_SPEED_10M) + if (speed_capa & RTE_ETH_LINK_SPEED_10M) printf(" 10 Mbps full-duplex "); - if (speed_capa & ETH_LINK_SPEED_100M_HD) + if (speed_capa & RTE_ETH_LINK_SPEED_100M_HD) printf(" 100 Mbps half-duplex "); - if (speed_capa & ETH_LINK_SPEED_100M) + if (speed_capa & RTE_ETH_LINK_SPEED_100M) printf(" 100 Mbps full-duplex "); - if (speed_capa & ETH_LINK_SPEED_1G) + if (speed_capa & RTE_ETH_LINK_SPEED_1G) printf(" 1 Gbps "); - if (speed_capa & ETH_LINK_SPEED_2_5G) + if (speed_capa & RTE_ETH_LINK_SPEED_2_5G) printf(" 2.5 Gbps "); - if (speed_capa & ETH_LINK_SPEED_5G) + if (speed_capa & RTE_ETH_LINK_SPEED_5G) printf(" 5 Gbps "); - if (speed_capa & ETH_LINK_SPEED_10G) + if (speed_capa & RTE_ETH_LINK_SPEED_10G) printf(" 10 Gbps "); - if (speed_capa & ETH_LINK_SPEED_20G) + if (speed_capa & RTE_ETH_LINK_SPEED_20G) printf(" 20 Gbps "); - if (speed_capa & ETH_LINK_SPEED_25G) + if (speed_capa & RTE_ETH_LINK_SPEED_25G) printf(" 25 Gbps "); - if (speed_capa & ETH_LINK_SPEED_40G) + if (speed_capa & RTE_ETH_LINK_SPEED_40G) printf(" 40 Gbps "); - if (speed_capa & ETH_LINK_SPEED_50G) + if (speed_capa & RTE_ETH_LINK_SPEED_50G) printf(" 50 Gbps "); - if (speed_capa & ETH_LINK_SPEED_56G) + if (speed_capa & RTE_ETH_LINK_SPEED_56G) printf(" 56 Gbps "); - if (speed_capa & ETH_LINK_SPEED_100G) + if (speed_capa & RTE_ETH_LINK_SPEED_100G) printf(" 100 Gbps "); - if (speed_capa & ETH_LINK_SPEED_200G) + if (speed_capa & RTE_ETH_LINK_SPEED_200G) printf(" 200 Gbps "); } @@ -591,9 +601,7 @@ device_infos_display(const char *identifier) goto skip_parse; if (rte_devargs_parsef(&da, "%s", identifier)) { - printf("cannot parse identifier\n"); - if (da.args) - free(da.args); + fprintf(stderr, "cannot parse identifier\n"); return; } @@ -641,6 +649,30 @@ skip_parse: } } }; + rte_devargs_reset(&da); +} + +static void +print_dev_capabilities(uint64_t capabilities) +{ + uint64_t single_capa; + int begin; + int end; + int bit; + + if (capabilities == 0) + return; + + begin = __builtin_ctzll(capabilities); + end = sizeof(capabilities) * CHAR_BIT - __builtin_clzll(capabilities); + + single_capa = 1ULL << begin; + for (bit = begin; bit < end; bit++) { + if (capabilities & single_capa) + printf(" %s", + rte_eth_dev_capability_name(single_capa)); + single_capa <<= 1; + } } void @@ -690,7 +722,7 @@ port_infos_display(portid_t port_id) printf("\nConnect to socket: %u", port->socket_id); if (port_numa[port_id] != NUMA_NO_CONFIG) { - mp = mbuf_pool_find(port_numa[port_id]); + mp = mbuf_pool_find(port_numa[port_id], 0); if (mp) printf("\nmemory allocation on the socket: %d", port_numa[port_id]); @@ -699,8 +731,10 @@ port_infos_display(portid_t port_id) printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down")); printf("Link speed: %s\n", rte_eth_link_speed_to_str(link.link_speed)); - printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? + printf("Link duplex: %s\n", (link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ? ("full-duplex") : ("half-duplex")); + printf("Autoneg status: %s\n", (link.link_autoneg == RTE_ETH_LINK_AUTONEG) ? + ("On") : ("Off")); if (!rte_eth_dev_get_mtu(port_id, &mtu)) printf("MTU: %u\n", mtu); @@ -717,22 +751,22 @@ port_infos_display(portid_t port_id) vlan_offload = rte_eth_dev_get_vlan_offload(port_id); if (vlan_offload >= 0){ printf("VLAN offload: \n"); - if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD) + if (vlan_offload & RTE_ETH_VLAN_STRIP_OFFLOAD) printf(" strip on, "); else printf(" strip off, "); - if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD) + if (vlan_offload & RTE_ETH_VLAN_FILTER_OFFLOAD) printf("filter on, "); else printf("filter off, "); - if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) + if (vlan_offload & RTE_ETH_VLAN_EXTEND_OFFLOAD) printf("extend on, "); else printf("extend off, "); - if (vlan_offload & ETH_QINQ_STRIP_OFFLOAD) + if (vlan_offload & RTE_ETH_QINQ_STRIP_OFFLOAD) printf("qinq strip on\n"); else printf("qinq strip off\n"); @@ -792,6 +826,9 @@ port_infos_display(portid_t port_id) printf("Max segment number per MTU/TSO: %hu\n", dev_info.tx_desc_lim.nb_mtu_seg_max); + printf("Device capabilities: 0x%"PRIx64"(", dev_info.dev_capa); + print_dev_capabilities(dev_info.dev_capa); + printf(" )\n"); /* Show switch info only if valid switch domain and port id is set */ if (dev_info.switch_info.domain_id != RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { @@ -802,6 +839,9 @@ port_infos_display(portid_t port_id) dev_info.switch_info.domain_id); printf("Switch Port Id: %u\n", dev_info.switch_info.port_id); + if ((dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) != 0) + printf("Switch Rx domain: %u\n", + dev_info.switch_info.rx_domain); } } @@ -843,10 +883,8 @@ port_summary_display(portid_t port_id) if (ret != 0) return; - printf("%-4d %02X:%02X:%02X:%02X:%02X:%02X %-12s %-14s %-8s %s\n", - port_id, mac_addr.addr_bytes[0], mac_addr.addr_bytes[1], - mac_addr.addr_bytes[2], mac_addr.addr_bytes[3], - mac_addr.addr_bytes[4], mac_addr.addr_bytes[5], name, + printf("%-4d " RTE_ETHER_ADDR_PRT_FMT " %-12s %-14s %-8s %s\n", + port_id, RTE_ETHER_ADDR_BYTES(&mac_addr), name, dev_info.driver_name, (link.link_status) ? ("up") : ("down"), rte_eth_link_speed_to_str(link.link_speed)); } @@ -865,46 +903,54 @@ port_eeprom_display(portid_t port_id) if (len_eeprom < 0) { switch (len_eeprom) { case -ENODEV: - printf("port index %d invalid\n", port_id); + fprintf(stderr, "port index %d invalid\n", port_id); break; case -ENOTSUP: - printf("operation not supported by device\n"); + fprintf(stderr, "operation not supported by device\n"); break; case -EIO: - printf("device is removed\n"); + fprintf(stderr, "device is removed\n"); break; default: - printf("Unable to get EEPROM: %d\n", len_eeprom); + fprintf(stderr, "Unable to get EEPROM: %d\n", + len_eeprom); break; } return; } - char buf[len_eeprom]; einfo.offset = 0; einfo.length = len_eeprom; - einfo.data = buf; + einfo.data = calloc(1, len_eeprom); + if (!einfo.data) { + fprintf(stderr, + "Allocation of port %u eeprom data failed\n", + port_id); + return; + } ret = rte_eth_dev_get_eeprom(port_id, &einfo); if (ret != 0) { switch (ret) { case -ENODEV: - printf("port index %d invalid\n", port_id); + fprintf(stderr, "port index %d invalid\n", port_id); break; case -ENOTSUP: - printf("operation not supported by device\n"); + fprintf(stderr, "operation not supported by device\n"); break; case -EIO: - printf("device is removed\n"); + fprintf(stderr, "device is removed\n"); break; default: - printf("Unable to get EEPROM: %d\n", ret); + fprintf(stderr, "Unable to get EEPROM: %d\n", ret); break; } + free(einfo.data); return; } rte_hexdump(stdout, "hexdump", einfo.data, einfo.length); printf("Finish -- Port: %d EEPROM length: %d bytes\n", port_id, len_eeprom); + free(einfo.data); } void @@ -924,327 +970,56 @@ port_module_eeprom_display(portid_t port_id) if (ret != 0) { switch (ret) { case -ENODEV: - printf("port index %d invalid\n", port_id); + fprintf(stderr, "port index %d invalid\n", port_id); break; case -ENOTSUP: - printf("operation not supported by device\n"); + fprintf(stderr, "operation not supported by device\n"); break; case -EIO: - printf("device is removed\n"); + fprintf(stderr, "device is removed\n"); break; default: - printf("Unable to get module EEPROM: %d\n", ret); + fprintf(stderr, "Unable to get module EEPROM: %d\n", + ret); break; } return; } - char buf[minfo.eeprom_len]; einfo.offset = 0; einfo.length = minfo.eeprom_len; - einfo.data = buf; + einfo.data = calloc(1, minfo.eeprom_len); + if (!einfo.data) { + fprintf(stderr, + "Allocation of port %u eeprom data failed\n", + port_id); + return; + } ret = rte_eth_dev_get_module_eeprom(port_id, &einfo); if (ret != 0) { switch (ret) { case -ENODEV: - printf("port index %d invalid\n", port_id); + fprintf(stderr, "port index %d invalid\n", port_id); break; case -ENOTSUP: - printf("operation not supported by device\n"); + fprintf(stderr, "operation not supported by device\n"); break; case -EIO: - printf("device is removed\n"); + fprintf(stderr, "device is removed\n"); break; default: - printf("Unable to get module EEPROM: %d\n", ret); + fprintf(stderr, "Unable to get module EEPROM: %d\n", + ret); break; } + free(einfo.data); return; } rte_hexdump(stdout, "hexdump", einfo.data, einfo.length); printf("Finish -- Port: %d MODULE EEPROM length: %d bytes\n", port_id, einfo.length); -} - -void -port_offload_cap_display(portid_t port_id) -{ - struct rte_eth_dev_info dev_info; - static const char *info_border = "************"; - int ret; - - if (port_id_is_invalid(port_id, ENABLED_WARN)) - return; - - ret = eth_dev_info_get_print_err(port_id, &dev_info); - if (ret != 0) - return; - - printf("\n%s Port %d supported offload features: %s\n", - info_border, port_id, info_border); - - if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_STRIP) { - printf("VLAN stripped: "); - if (ports[port_id].dev_conf.rxmode.offloads & - DEV_RX_OFFLOAD_VLAN_STRIP) - printf("on\n"); - else - printf("off\n"); - } - - if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_QINQ_STRIP) { - printf("Double VLANs stripped: "); - if (ports[port_id].dev_conf.rxmode.offloads & - DEV_RX_OFFLOAD_QINQ_STRIP) - printf("on\n"); - else - printf("off\n"); - } - - if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_IPV4_CKSUM) { - printf("RX IPv4 checksum: "); - if (ports[port_id].dev_conf.rxmode.offloads & - DEV_RX_OFFLOAD_IPV4_CKSUM) - printf("on\n"); - else - printf("off\n"); - } - - if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_UDP_CKSUM) { - printf("RX UDP checksum: "); - if (ports[port_id].dev_conf.rxmode.offloads & - DEV_RX_OFFLOAD_UDP_CKSUM) - printf("on\n"); - else - printf("off\n"); - } - - if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_CKSUM) { - printf("RX TCP checksum: "); - if (ports[port_id].dev_conf.rxmode.offloads & - DEV_RX_OFFLOAD_TCP_CKSUM) - printf("on\n"); - else - printf("off\n"); - } - - if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SCTP_CKSUM) { - printf("RX SCTP checksum: "); - if (ports[port_id].dev_conf.rxmode.offloads & - DEV_RX_OFFLOAD_SCTP_CKSUM) - printf("on\n"); - else - printf("off\n"); - } - - if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) { - printf("RX Outer IPv4 checksum: "); - if (ports[port_id].dev_conf.rxmode.offloads & - DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) - printf("on\n"); - else - printf("off\n"); - } - - if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_UDP_CKSUM) { - printf("RX Outer UDP checksum: "); - if (ports[port_id].dev_conf.rxmode.offloads & - DEV_RX_OFFLOAD_OUTER_UDP_CKSUM) - printf("on\n"); - else - printf("off\n"); - } - - if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO) { - printf("Large receive offload: "); - if (ports[port_id].dev_conf.rxmode.offloads & - DEV_RX_OFFLOAD_TCP_LRO) - printf("on\n"); - else - printf("off\n"); - } - - if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TIMESTAMP) { - printf("HW timestamp: "); - if (ports[port_id].dev_conf.rxmode.offloads & - DEV_RX_OFFLOAD_TIMESTAMP) - printf("on\n"); - else - printf("off\n"); - } - - if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_KEEP_CRC) { - printf("Rx Keep CRC: "); - if (ports[port_id].dev_conf.rxmode.offloads & - DEV_RX_OFFLOAD_KEEP_CRC) - printf("on\n"); - else - printf("off\n"); - } - - if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SECURITY) { - printf("RX offload security: "); - if (ports[port_id].dev_conf.rxmode.offloads & - DEV_RX_OFFLOAD_SECURITY) - printf("on\n"); - else - printf("off\n"); - } - - if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) { - printf("VLAN insert: "); - if (ports[port_id].dev_conf.txmode.offloads & - DEV_TX_OFFLOAD_VLAN_INSERT) - printf("on\n"); - else - printf("off\n"); - } - - if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) { - printf("Double VLANs insert: "); - if (ports[port_id].dev_conf.txmode.offloads & - DEV_TX_OFFLOAD_QINQ_INSERT) - printf("on\n"); - else - printf("off\n"); - } - - if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) { - printf("TX IPv4 checksum: "); - if (ports[port_id].dev_conf.txmode.offloads & - DEV_TX_OFFLOAD_IPV4_CKSUM) - printf("on\n"); - else - printf("off\n"); - } - - if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) { - printf("TX UDP checksum: "); - if (ports[port_id].dev_conf.txmode.offloads & - DEV_TX_OFFLOAD_UDP_CKSUM) - printf("on\n"); - else - printf("off\n"); - } - - if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) { - printf("TX TCP checksum: "); - if (ports[port_id].dev_conf.txmode.offloads & - DEV_TX_OFFLOAD_TCP_CKSUM) - printf("on\n"); - else - printf("off\n"); - } - - if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM) { - printf("TX SCTP checksum: "); - if (ports[port_id].dev_conf.txmode.offloads & - DEV_TX_OFFLOAD_SCTP_CKSUM) - printf("on\n"); - else - printf("off\n"); - } - - if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) { - printf("TX Outer IPv4 checksum: "); - if (ports[port_id].dev_conf.txmode.offloads & - DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) - printf("on\n"); - else - printf("off\n"); - } - - if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) { - printf("TX TCP segmentation: "); - if (ports[port_id].dev_conf.txmode.offloads & - DEV_TX_OFFLOAD_TCP_TSO) - printf("on\n"); - else - printf("off\n"); - } - - if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TSO) { - printf("TX UDP segmentation: "); - if (ports[port_id].dev_conf.txmode.offloads & - DEV_TX_OFFLOAD_UDP_TSO) - printf("on\n"); - else - printf("off\n"); - } - - if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO) { - printf("TSO for VXLAN tunnel packet: "); - if (ports[port_id].dev_conf.txmode.offloads & - DEV_TX_OFFLOAD_VXLAN_TNL_TSO) - printf("on\n"); - else - printf("off\n"); - } - - if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GRE_TNL_TSO) { - printf("TSO for GRE tunnel packet: "); - if (ports[port_id].dev_conf.txmode.offloads & - DEV_TX_OFFLOAD_GRE_TNL_TSO) - printf("on\n"); - else - printf("off\n"); - } - - if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPIP_TNL_TSO) { - printf("TSO for IPIP tunnel packet: "); - if (ports[port_id].dev_conf.txmode.offloads & - DEV_TX_OFFLOAD_IPIP_TNL_TSO) - printf("on\n"); - else - printf("off\n"); - } - - if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO) { - printf("TSO for GENEVE tunnel packet: "); - if (ports[port_id].dev_conf.txmode.offloads & - DEV_TX_OFFLOAD_GENEVE_TNL_TSO) - printf("on\n"); - else - printf("off\n"); - } - - if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IP_TNL_TSO) { - printf("IP tunnel TSO: "); - if (ports[port_id].dev_conf.txmode.offloads & - DEV_TX_OFFLOAD_IP_TNL_TSO) - printf("on\n"); - else - printf("off\n"); - } - - if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TNL_TSO) { - printf("UDP tunnel TSO: "); - if (ports[port_id].dev_conf.txmode.offloads & - DEV_TX_OFFLOAD_UDP_TNL_TSO) - printf("on\n"); - else - printf("off\n"); - } - - if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) { - printf("TX Outer UDP checksum: "); - if (ports[port_id].dev_conf.txmode.offloads & - DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) - printf("on\n"); - else - printf("off\n"); - } - - if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP) { - printf("Tx scheduling on timestamp: "); - if (ports[port_id].dev_conf.txmode.offloads & - DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP) - printf("on\n"); - else - printf("off\n"); - } - + free(einfo.data); } int @@ -1260,7 +1035,7 @@ port_id_is_invalid(portid_t port_id, enum print_warning warning) return 0; if (warning == ENABLED_WARN) - printf("Invalid port %d\n", port_id); + fprintf(stderr, "Invalid port %d\n", port_id); return 1; } @@ -1281,7 +1056,7 @@ vlan_id_is_invalid(uint16_t vlan_id) { if (vlan_id < 4096) return 0; - printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id); + fprintf(stderr, "Invalid vlan_id %d (must be < 4096)\n", vlan_id); return 1; } @@ -1293,14 +1068,14 @@ port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off) uint64_t pci_len; if (reg_off & 0x3) { - printf("Port register offset 0x%X not aligned on a 4-byte " - "boundary\n", - (unsigned)reg_off); + fprintf(stderr, + "Port register offset 0x%X not aligned on a 4-byte boundary\n", + (unsigned int)reg_off); return 1; } if (!ports[port_id].dev_info.device) { - printf("Invalid device\n"); + fprintf(stderr, "Invalid device\n"); return 0; } @@ -1308,15 +1083,16 @@ port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off) if (bus && !strcmp(bus->name, "pci")) { pci_dev = RTE_DEV_TO_PCI(ports[port_id].dev_info.device); } else { - printf("Not a PCI device\n"); + fprintf(stderr, "Not a PCI device\n"); return 1; } pci_len = pci_dev->mem_resource[0].len; if (reg_off >= pci_len) { - printf("Port %d: register offset %u (0x%X) out of port PCI " - "resource (length=%"PRIu64")\n", - port_id, (unsigned)reg_off, (unsigned)reg_off, pci_len); + fprintf(stderr, + "Port %d: register offset %u (0x%X) out of port PCI resource (length=%"PRIu64")\n", + port_id, (unsigned int)reg_off, (unsigned int)reg_off, + pci_len); return 1; } return 0; @@ -1327,7 +1103,7 @@ reg_bit_pos_is_invalid(uint8_t bit_pos) { if (bit_pos <= 31) return 0; - printf("Invalid bit position %d (must be <= 31)\n", bit_pos); + fprintf(stderr, "Invalid bit position %d (must be <= 31)\n", bit_pos); return 1; } @@ -1414,7 +1190,8 @@ port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos, if (reg_bit_pos_is_invalid(bit_pos)) return; if (bit_v > 1) { - printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v); + fprintf(stderr, "Invalid bit value %d (must be 0 or 1)\n", + (int) bit_v); return; } reg_v = port_id_pci_reg_read(port_id, reg_off); @@ -1454,7 +1231,7 @@ port_reg_bit_field_set(portid_t port_id, uint32_t reg_off, max_v = 0xFFFFFFFF; if (value > max_v) { - printf("Invalid value %u (0x%x) must be < %u (0x%x)\n", + fprintf(stderr, "Invalid value %u (0x%x) must be < %u (0x%x)\n", (unsigned)value, (unsigned)value, (unsigned)max_v, (unsigned)max_v); return; @@ -1477,89 +1254,247 @@ port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v) display_port_reg_value(port_id, reg_off, reg_v); } +static uint32_t +eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu) +{ + uint32_t overhead_len; + + if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu) + overhead_len = max_rx_pktlen - max_mtu; + else + overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; + + return overhead_len; +} + +static int +eth_dev_validate_mtu(uint16_t port_id, uint16_t mtu) +{ + struct rte_eth_dev_info dev_info; + uint32_t overhead_len; + uint32_t frame_size; + int ret; + + ret = rte_eth_dev_info_get(port_id, &dev_info); + if (ret != 0) + return ret; + + if (mtu < dev_info.min_mtu) { + fprintf(stderr, + "MTU (%u) < device min MTU (%u) for port_id %u\n", + mtu, dev_info.min_mtu, port_id); + return -EINVAL; + } + if (mtu > dev_info.max_mtu) { + fprintf(stderr, + "MTU (%u) > device max MTU (%u) for port_id %u\n", + mtu, dev_info.max_mtu, port_id); + return -EINVAL; + } + + overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen, + dev_info.max_mtu); + frame_size = mtu + overhead_len; + if (frame_size > dev_info.max_rx_pktlen) { + fprintf(stderr, + "Frame size (%u) > device max frame size (%u) for port_id %u\n", + frame_size, dev_info.max_rx_pktlen, port_id); + return -EINVAL; + } + + return 0; +} + void port_mtu_set(portid_t port_id, uint16_t mtu) { + struct rte_port *port = &ports[port_id]; int diag; - struct rte_port *rte_port = &ports[port_id]; - struct rte_eth_dev_info dev_info; - uint16_t eth_overhead; - int ret; if (port_id_is_invalid(port_id, ENABLED_WARN)) return; - ret = eth_dev_info_get_print_err(port_id, &dev_info); - if (ret != 0) + diag = eth_dev_validate_mtu(port_id, mtu); + if (diag != 0) return; - if (mtu > dev_info.max_mtu || mtu < dev_info.min_mtu) { - printf("Set MTU failed. MTU:%u is not in valid range, min:%u - max:%u\n", - mtu, dev_info.min_mtu, dev_info.max_mtu); - return; - } - diag = rte_eth_dev_set_mtu(port_id, mtu); - if (diag) - printf("Set MTU failed. diag=%d\n", diag); - else if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) { - /* - * Ether overhead in driver is equal to the difference of - * max_rx_pktlen and max_mtu in rte_eth_dev_info when the - * device supports jumbo frame. - */ - eth_overhead = dev_info.max_rx_pktlen - dev_info.max_mtu; - if (mtu > RTE_ETHER_MAX_LEN - eth_overhead) { - rte_port->dev_conf.rxmode.offloads |= - DEV_RX_OFFLOAD_JUMBO_FRAME; - rte_port->dev_conf.rxmode.max_rx_pkt_len = - mtu + eth_overhead; - } else - rte_port->dev_conf.rxmode.offloads &= - ~DEV_RX_OFFLOAD_JUMBO_FRAME; + if (port->need_reconfig == 0) { + diag = rte_eth_dev_set_mtu(port_id, mtu); + if (diag != 0) { + fprintf(stderr, "Set MTU failed. diag=%d\n", diag); + return; + } } + + port->dev_conf.rxmode.mtu = mtu; } /* Generic flow management functions. */ -/** Generate a port_flow entry from attributes/pattern/actions. */ -static struct port_flow * -port_flow_new(const struct rte_flow_attr *attr, - const struct rte_flow_item *pattern, - const struct rte_flow_action *actions, - struct rte_flow_error *error) +static struct port_flow_tunnel * +port_flow_locate_tunnel_id(struct rte_port *port, uint32_t port_tunnel_id) { - const struct rte_flow_conv_rule rule = { - .attr_ro = attr, - .pattern_ro = pattern, - .actions_ro = actions, - }; - struct port_flow *pf; - int ret; + struct port_flow_tunnel *flow_tunnel; - ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, error); - if (ret < 0) - return NULL; - pf = calloc(1, offsetof(struct port_flow, rule) + ret); - if (!pf) { - rte_flow_error_set - (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "calloc() failed"); - return NULL; + LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) { + if (flow_tunnel->id == port_tunnel_id) + goto out; } - if (rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &pf->rule, ret, &rule, - error) >= 0) - return pf; - free(pf); - return NULL; + flow_tunnel = NULL; + +out: + return flow_tunnel; } -/** Print a message out of a flow error. */ -static int -port_flow_complain(struct rte_flow_error *error) +const char * +port_flow_tunnel_type(struct rte_flow_tunnel *tunnel) { - static const char *const errstrlist[] = { - [RTE_FLOW_ERROR_TYPE_NONE] = "no error", - [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified", + const char *type; + switch (tunnel->type) { + default: + type = "unknown"; + break; + case RTE_FLOW_ITEM_TYPE_VXLAN: + type = "vxlan"; + break; + case RTE_FLOW_ITEM_TYPE_GRE: + type = "gre"; + break; + case RTE_FLOW_ITEM_TYPE_NVGRE: + type = "nvgre"; + break; + case RTE_FLOW_ITEM_TYPE_GENEVE: + type = "geneve"; + break; + } + + return type; +} + +struct port_flow_tunnel * +port_flow_locate_tunnel(uint16_t port_id, struct rte_flow_tunnel *tun) +{ + struct rte_port *port = &ports[port_id]; + struct port_flow_tunnel *flow_tunnel; + + LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) { + if (!memcmp(&flow_tunnel->tunnel, tun, sizeof(*tun))) + goto out; + } + flow_tunnel = NULL; + +out: + return flow_tunnel; +} + +void port_flow_tunnel_list(portid_t port_id) +{ + struct rte_port *port = &ports[port_id]; + struct port_flow_tunnel *flt; + + LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { + printf("port %u tunnel #%u type=%s", + port_id, flt->id, port_flow_tunnel_type(&flt->tunnel)); + if (flt->tunnel.tun_id) + printf(" id=%" PRIu64, flt->tunnel.tun_id); + printf("\n"); + } +} + +void port_flow_tunnel_destroy(portid_t port_id, uint32_t tunnel_id) +{ + struct rte_port *port = &ports[port_id]; + struct port_flow_tunnel *flt; + + LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { + if (flt->id == tunnel_id) + break; + } + if (flt) { + LIST_REMOVE(flt, chain); + free(flt); + printf("port %u: flow tunnel #%u destroyed\n", + port_id, tunnel_id); + } +} + +void port_flow_tunnel_create(portid_t port_id, const struct tunnel_ops *ops) +{ + struct rte_port *port = &ports[port_id]; + enum rte_flow_item_type type; + struct port_flow_tunnel *flt; + + if (!strcmp(ops->type, "vxlan")) + type = RTE_FLOW_ITEM_TYPE_VXLAN; + else if (!strcmp(ops->type, "gre")) + type = RTE_FLOW_ITEM_TYPE_GRE; + else if (!strcmp(ops->type, "nvgre")) + type = RTE_FLOW_ITEM_TYPE_NVGRE; + else if (!strcmp(ops->type, "geneve")) + type = RTE_FLOW_ITEM_TYPE_GENEVE; + else { + fprintf(stderr, "cannot offload \"%s\" tunnel type\n", + ops->type); + return; + } + LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { + if (flt->tunnel.type == type) + break; + } + if (!flt) { + flt = calloc(1, sizeof(*flt)); + if (!flt) { + fprintf(stderr, "failed to allocate port flt object\n"); + return; + } + flt->tunnel.type = type; + flt->id = LIST_EMPTY(&port->flow_tunnel_list) ? 1 : + LIST_FIRST(&port->flow_tunnel_list)->id + 1; + LIST_INSERT_HEAD(&port->flow_tunnel_list, flt, chain); + } + printf("port %d: flow tunnel #%u type %s\n", + port_id, flt->id, ops->type); +} + +/** Generate a port_flow entry from attributes/pattern/actions. */ +static struct port_flow * +port_flow_new(const struct rte_flow_attr *attr, + const struct rte_flow_item *pattern, + const struct rte_flow_action *actions, + struct rte_flow_error *error) +{ + const struct rte_flow_conv_rule rule = { + .attr_ro = attr, + .pattern_ro = pattern, + .actions_ro = actions, + }; + struct port_flow *pf; + int ret; + + ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, error); + if (ret < 0) + return NULL; + pf = calloc(1, offsetof(struct port_flow, rule) + ret); + if (!pf) { + rte_flow_error_set + (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "calloc() failed"); + return NULL; + } + if (rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &pf->rule, ret, &rule, + error) >= 0) + return pf; + free(pf); + return NULL; +} + +/** Print a message out of a flow error. */ +static int +port_flow_complain(struct rte_flow_error *error) +{ + static const char *const errstrlist[] = { + [RTE_FLOW_ERROR_TYPE_NONE] = "no error", + [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified", [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)", [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field", [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field", @@ -1585,101 +1520,1379 @@ port_flow_complain(struct rte_flow_error *error) errstr = "unknown type"; else errstr = errstrlist[error->type]; - printf("%s(): Caught PMD error type %d (%s): %s%s: %s\n", __func__, - error->type, errstr, - error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ", - error->cause), buf) : "", - error->message ? error->message : "(no stated reason)", - rte_strerror(err)); + fprintf(stderr, "%s(): Caught PMD error type %d (%s): %s%s: %s\n", + __func__, error->type, errstr, + error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ", + error->cause), buf) : "", + error->message ? error->message : "(no stated reason)", + rte_strerror(err)); + + switch (error->type) { + case RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER: + fprintf(stderr, "The status suggests the use of \"transfer\" " + "as the possible cause of the failure. Make " + "sure that the flow in question and its " + "indirect components (if any) are managed " + "via \"transfer\" proxy port. Use command " + "\"show port (port_id) flow transfer proxy\" " + "to figure out the proxy port ID\n"); + break; + default: + break; + } + return -err; } -static void -rss_config_display(struct rte_flow_action_rss *rss_conf) +static void +rss_config_display(struct rte_flow_action_rss *rss_conf) +{ + uint8_t i; + + if (rss_conf == NULL) { + fprintf(stderr, "Invalid rule\n"); + return; + } + + printf("RSS:\n" + " queues:"); + if (rss_conf->queue_num == 0) + printf(" none"); + for (i = 0; i < rss_conf->queue_num; i++) + printf(" %d", rss_conf->queue[i]); + printf("\n"); + + printf(" function: "); + switch (rss_conf->func) { + case RTE_ETH_HASH_FUNCTION_DEFAULT: + printf("default\n"); + break; + case RTE_ETH_HASH_FUNCTION_TOEPLITZ: + printf("toeplitz\n"); + break; + case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR: + printf("simple_xor\n"); + break; + case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ: + printf("symmetric_toeplitz\n"); + break; + default: + printf("Unknown function\n"); + return; + } + + printf(" types:\n"); + if (rss_conf->types == 0) { + printf(" none\n"); + return; + } + for (i = 0; rss_type_table[i].str; i++) { + if ((rss_conf->types & + rss_type_table[i].rss_type) == + rss_type_table[i].rss_type && + rss_type_table[i].rss_type != 0) + printf(" %s\n", rss_type_table[i].str); + } +} + +static struct port_indirect_action * +action_get_by_id(portid_t port_id, uint32_t id) +{ + struct rte_port *port; + struct port_indirect_action **ppia; + struct port_indirect_action *pia = NULL; + + if (port_id_is_invalid(port_id, ENABLED_WARN) || + port_id == (portid_t)RTE_PORT_ALL) + return NULL; + port = &ports[port_id]; + ppia = &port->actions_list; + while (*ppia) { + if ((*ppia)->id == id) { + pia = *ppia; + break; + } + ppia = &(*ppia)->next; + } + if (!pia) + fprintf(stderr, + "Failed to find indirect action #%u on port %u\n", + id, port_id); + return pia; +} + +static int +action_alloc(portid_t port_id, uint32_t id, + struct port_indirect_action **action) +{ + struct rte_port *port; + struct port_indirect_action **ppia; + struct port_indirect_action *pia = NULL; + + *action = NULL; + if (port_id_is_invalid(port_id, ENABLED_WARN) || + port_id == (portid_t)RTE_PORT_ALL) + return -EINVAL; + port = &ports[port_id]; + if (id == UINT32_MAX) { + /* taking first available ID */ + if (port->actions_list) { + if (port->actions_list->id == UINT32_MAX - 1) { + fprintf(stderr, + "Highest indirect action ID is already assigned, delete it first\n"); + return -ENOMEM; + } + id = port->actions_list->id + 1; + } else { + id = 0; + } + } + pia = calloc(1, sizeof(*pia)); + if (!pia) { + fprintf(stderr, + "Allocation of port %u indirect action failed\n", + port_id); + return -ENOMEM; + } + ppia = &port->actions_list; + while (*ppia && (*ppia)->id > id) + ppia = &(*ppia)->next; + if (*ppia && (*ppia)->id == id) { + fprintf(stderr, + "Indirect action #%u is already assigned, delete it first\n", + id); + free(pia); + return -EINVAL; + } + pia->next = *ppia; + pia->id = id; + *ppia = pia; + *action = pia; + return 0; +} + +static int +template_alloc(uint32_t id, struct port_template **template, + struct port_template **list) +{ + struct port_template *lst = *list; + struct port_template **ppt; + struct port_template *pt = NULL; + + *template = NULL; + if (id == UINT32_MAX) { + /* taking first available ID */ + if (lst) { + if (lst->id == UINT32_MAX - 1) { + printf("Highest template ID is already" + " assigned, delete it first\n"); + return -ENOMEM; + } + id = lst->id + 1; + } else { + id = 0; + } + } + pt = calloc(1, sizeof(*pt)); + if (!pt) { + printf("Allocation of port template failed\n"); + return -ENOMEM; + } + ppt = list; + while (*ppt && (*ppt)->id > id) + ppt = &(*ppt)->next; + if (*ppt && (*ppt)->id == id) { + printf("Template #%u is already assigned," + " delete it first\n", id); + free(pt); + return -EINVAL; + } + pt->next = *ppt; + pt->id = id; + *ppt = pt; + *template = pt; + return 0; +} + +static int +table_alloc(uint32_t id, struct port_table **table, + struct port_table **list) +{ + struct port_table *lst = *list; + struct port_table **ppt; + struct port_table *pt = NULL; + + *table = NULL; + if (id == UINT32_MAX) { + /* taking first available ID */ + if (lst) { + if (lst->id == UINT32_MAX - 1) { + printf("Highest table ID is already" + " assigned, delete it first\n"); + return -ENOMEM; + } + id = lst->id + 1; + } else { + id = 0; + } + } + pt = calloc(1, sizeof(*pt)); + if (!pt) { + printf("Allocation of table failed\n"); + return -ENOMEM; + } + ppt = list; + while (*ppt && (*ppt)->id > id) + ppt = &(*ppt)->next; + if (*ppt && (*ppt)->id == id) { + printf("Table #%u is already assigned," + " delete it first\n", id); + free(pt); + return -EINVAL; + } + pt->next = *ppt; + pt->id = id; + *ppt = pt; + *table = pt; + return 0; +} + +/** Get info about flow management resources. */ +int +port_flow_get_info(portid_t port_id) +{ + struct rte_flow_port_info port_info; + struct rte_flow_queue_info queue_info; + struct rte_flow_error error; + + if (port_id_is_invalid(port_id, ENABLED_WARN) || + port_id == (portid_t)RTE_PORT_ALL) + return -EINVAL; + /* Poisoning to make sure PMDs update it in case of error. */ + memset(&error, 0x99, sizeof(error)); + memset(&port_info, 0, sizeof(port_info)); + memset(&queue_info, 0, sizeof(queue_info)); + if (rte_flow_info_get(port_id, &port_info, &queue_info, &error)) + return port_flow_complain(&error); + printf("Flow engine resources on port %u:\n" + "Number of queues: %d\n" + "Size of queues: %d\n" + "Number of counters: %d\n" + "Number of aging objects: %d\n" + "Number of meter actions: %d\n", + port_id, port_info.max_nb_queues, + queue_info.max_size, + port_info.max_nb_counters, + port_info.max_nb_aging_objects, + port_info.max_nb_meters); + return 0; +} + +/** Configure flow management resources. */ +int +port_flow_configure(portid_t port_id, + const struct rte_flow_port_attr *port_attr, + uint16_t nb_queue, + const struct rte_flow_queue_attr *queue_attr) +{ + struct rte_port *port; + struct rte_flow_error error; + const struct rte_flow_queue_attr *attr_list[nb_queue]; + int std_queue; + + if (port_id_is_invalid(port_id, ENABLED_WARN) || + port_id == (portid_t)RTE_PORT_ALL) + return -EINVAL; + port = &ports[port_id]; + port->queue_nb = nb_queue; + port->queue_sz = queue_attr->size; + for (std_queue = 0; std_queue < nb_queue; std_queue++) + attr_list[std_queue] = queue_attr; + /* Poisoning to make sure PMDs update it in case of error. */ + memset(&error, 0x66, sizeof(error)); + if (rte_flow_configure(port_id, port_attr, nb_queue, attr_list, &error)) + return port_flow_complain(&error); + printf("Configure flows on port %u: " + "number of queues %d with %d elements\n", + port_id, nb_queue, queue_attr->size); + return 0; +} + +/** Create indirect action */ +int +port_action_handle_create(portid_t port_id, uint32_t id, + const struct rte_flow_indir_action_conf *conf, + const struct rte_flow_action *action) +{ + struct port_indirect_action *pia; + int ret; + struct rte_flow_error error; + + ret = action_alloc(port_id, id, &pia); + if (ret) + return ret; + if (action->type == RTE_FLOW_ACTION_TYPE_AGE) { + struct rte_flow_action_age *age = + (struct rte_flow_action_age *)(uintptr_t)(action->conf); + + pia->age_type = ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION; + age->context = &pia->age_type; + } else if (action->type == RTE_FLOW_ACTION_TYPE_CONNTRACK) { + struct rte_flow_action_conntrack *ct = + (struct rte_flow_action_conntrack *)(uintptr_t)(action->conf); + + memcpy(ct, &conntrack_context, sizeof(*ct)); + } + /* Poisoning to make sure PMDs update it in case of error. */ + memset(&error, 0x22, sizeof(error)); + pia->handle = rte_flow_action_handle_create(port_id, conf, action, + &error); + if (!pia->handle) { + uint32_t destroy_id = pia->id; + port_action_handle_destroy(port_id, 1, &destroy_id); + return port_flow_complain(&error); + } + pia->type = action->type; + printf("Indirect action #%u created\n", pia->id); + return 0; +} + +/** Destroy indirect action */ +int +port_action_handle_destroy(portid_t port_id, + uint32_t n, + const uint32_t *actions) +{ + struct rte_port *port; + struct port_indirect_action **tmp; + uint32_t c = 0; + int ret = 0; + + if (port_id_is_invalid(port_id, ENABLED_WARN) || + port_id == (portid_t)RTE_PORT_ALL) + return -EINVAL; + port = &ports[port_id]; + tmp = &port->actions_list; + while (*tmp) { + uint32_t i; + + for (i = 0; i != n; ++i) { + struct rte_flow_error error; + struct port_indirect_action *pia = *tmp; + + if (actions[i] != pia->id) + continue; + /* + * Poisoning to make sure PMDs update it in case + * of error. + */ + memset(&error, 0x33, sizeof(error)); + + if (pia->handle && rte_flow_action_handle_destroy( + port_id, pia->handle, &error)) { + ret = port_flow_complain(&error); + continue; + } + *tmp = pia->next; + printf("Indirect action #%u destroyed\n", pia->id); + free(pia); + break; + } + if (i == n) + tmp = &(*tmp)->next; + ++c; + } + return ret; +} + +int +port_action_handle_flush(portid_t port_id) +{ + struct rte_port *port; + struct port_indirect_action **tmp; + int ret = 0; + + if (port_id_is_invalid(port_id, ENABLED_WARN) || + port_id == (portid_t)RTE_PORT_ALL) + return -EINVAL; + port = &ports[port_id]; + tmp = &port->actions_list; + while (*tmp != NULL) { + struct rte_flow_error error; + struct port_indirect_action *pia = *tmp; + + /* Poisoning to make sure PMDs update it in case of error. */ + memset(&error, 0x44, sizeof(error)); + if (pia->handle != NULL && + rte_flow_action_handle_destroy + (port_id, pia->handle, &error) != 0) { + printf("Indirect action #%u not destroyed\n", pia->id); + ret = port_flow_complain(&error); + tmp = &pia->next; + } else { + *tmp = pia->next; + free(pia); + } + } + return ret; +} + +/** Get indirect action by port + id */ +struct rte_flow_action_handle * +port_action_handle_get_by_id(portid_t port_id, uint32_t id) +{ + + struct port_indirect_action *pia = action_get_by_id(port_id, id); + + return (pia) ? pia->handle : NULL; +} + +/** Update indirect action */ +int +port_action_handle_update(portid_t port_id, uint32_t id, + const struct rte_flow_action *action) +{ + struct rte_flow_error error; + struct rte_flow_action_handle *action_handle; + struct port_indirect_action *pia; + const void *update; + + action_handle = port_action_handle_get_by_id(port_id, id); + if (!action_handle) + return -EINVAL; + pia = action_get_by_id(port_id, id); + if (!pia) + return -EINVAL; + switch (pia->type) { + case RTE_FLOW_ACTION_TYPE_CONNTRACK: + update = action->conf; + break; + default: + update = action; + break; + } + if (rte_flow_action_handle_update(port_id, action_handle, update, + &error)) { + return port_flow_complain(&error); + } + printf("Indirect action #%u updated\n", id); + return 0; +} + +int +port_action_handle_query(portid_t port_id, uint32_t id) +{ + struct rte_flow_error error; + struct port_indirect_action *pia; + union { + struct rte_flow_query_count count; + struct rte_flow_query_age age; + struct rte_flow_action_conntrack ct; + } query; + + pia = action_get_by_id(port_id, id); + if (!pia) + return -EINVAL; + switch (pia->type) { + case RTE_FLOW_ACTION_TYPE_AGE: + case RTE_FLOW_ACTION_TYPE_COUNT: + break; + default: + fprintf(stderr, + "Indirect action %u (type: %d) on port %u doesn't support query\n", + id, pia->type, port_id); + return -ENOTSUP; + } + /* Poisoning to make sure PMDs update it in case of error. */ + memset(&error, 0x55, sizeof(error)); + memset(&query, 0, sizeof(query)); + if (rte_flow_action_handle_query(port_id, pia->handle, &query, &error)) + return port_flow_complain(&error); + switch (pia->type) { + case RTE_FLOW_ACTION_TYPE_AGE: + printf("Indirect AGE action:\n" + " aged: %u\n" + " sec_since_last_hit_valid: %u\n" + " sec_since_last_hit: %" PRIu32 "\n", + query.age.aged, + query.age.sec_since_last_hit_valid, + query.age.sec_since_last_hit); + break; + case RTE_FLOW_ACTION_TYPE_COUNT: + printf("Indirect COUNT action:\n" + " hits_set: %u\n" + " bytes_set: %u\n" + " hits: %" PRIu64 "\n" + " bytes: %" PRIu64 "\n", + query.count.hits_set, + query.count.bytes_set, + query.count.hits, + query.count.bytes); + break; + case RTE_FLOW_ACTION_TYPE_CONNTRACK: + printf("Conntrack Context:\n" + " Peer: %u, Flow dir: %s, Enable: %u\n" + " Live: %u, SACK: %u, CACK: %u\n" + " Packet dir: %s, Liberal: %u, State: %u\n" + " Factor: %u, Retrans: %u, TCP flags: %u\n" + " Last Seq: %u, Last ACK: %u\n" + " Last Win: %u, Last End: %u\n", + query.ct.peer_port, + query.ct.is_original_dir ? "Original" : "Reply", + query.ct.enable, query.ct.live_connection, + query.ct.selective_ack, query.ct.challenge_ack_passed, + query.ct.last_direction ? "Original" : "Reply", + query.ct.liberal_mode, query.ct.state, + query.ct.max_ack_window, query.ct.retransmission_limit, + query.ct.last_index, query.ct.last_seq, + query.ct.last_ack, query.ct.last_window, + query.ct.last_end); + printf(" Original Dir:\n" + " scale: %u, fin: %u, ack seen: %u\n" + " unacked data: %u\n Sent end: %u," + " Reply end: %u, Max win: %u, Max ACK: %u\n", + query.ct.original_dir.scale, + query.ct.original_dir.close_initiated, + query.ct.original_dir.last_ack_seen, + query.ct.original_dir.data_unacked, + query.ct.original_dir.sent_end, + query.ct.original_dir.reply_end, + query.ct.original_dir.max_win, + query.ct.original_dir.max_ack); + printf(" Reply Dir:\n" + " scale: %u, fin: %u, ack seen: %u\n" + " unacked data: %u\n Sent end: %u," + " Reply end: %u, Max win: %u, Max ACK: %u\n", + query.ct.reply_dir.scale, + query.ct.reply_dir.close_initiated, + query.ct.reply_dir.last_ack_seen, + query.ct.reply_dir.data_unacked, + query.ct.reply_dir.sent_end, + query.ct.reply_dir.reply_end, + query.ct.reply_dir.max_win, + query.ct.reply_dir.max_ack); + break; + default: + fprintf(stderr, + "Indirect action %u (type: %d) on port %u doesn't support query\n", + id, pia->type, port_id); + break; + } + return 0; +} + +static struct port_flow_tunnel * +port_flow_tunnel_offload_cmd_prep(portid_t port_id, + const struct rte_flow_item *pattern, + const struct rte_flow_action *actions, + const struct tunnel_ops *tunnel_ops) +{ + int ret; + struct rte_port *port; + struct port_flow_tunnel *pft; + struct rte_flow_error error; + + port = &ports[port_id]; + pft = port_flow_locate_tunnel_id(port, tunnel_ops->id); + if (!pft) { + fprintf(stderr, "failed to locate port flow tunnel #%u\n", + tunnel_ops->id); + return NULL; + } + if (tunnel_ops->actions) { + uint32_t num_actions; + const struct rte_flow_action *aptr; + + ret = rte_flow_tunnel_decap_set(port_id, &pft->tunnel, + &pft->pmd_actions, + &pft->num_pmd_actions, + &error); + if (ret) { + port_flow_complain(&error); + return NULL; + } + for (aptr = actions, num_actions = 1; + aptr->type != RTE_FLOW_ACTION_TYPE_END; + aptr++, num_actions++); + pft->actions = malloc( + (num_actions + pft->num_pmd_actions) * + sizeof(actions[0])); + if (!pft->actions) { + rte_flow_tunnel_action_decap_release( + port_id, pft->actions, + pft->num_pmd_actions, &error); + return NULL; + } + rte_memcpy(pft->actions, pft->pmd_actions, + pft->num_pmd_actions * sizeof(actions[0])); + rte_memcpy(pft->actions + pft->num_pmd_actions, actions, + num_actions * sizeof(actions[0])); + } + if (tunnel_ops->items) { + uint32_t num_items; + const struct rte_flow_item *iptr; + + ret = rte_flow_tunnel_match(port_id, &pft->tunnel, + &pft->pmd_items, + &pft->num_pmd_items, + &error); + if (ret) { + port_flow_complain(&error); + return NULL; + } + for (iptr = pattern, num_items = 1; + iptr->type != RTE_FLOW_ITEM_TYPE_END; + iptr++, num_items++); + pft->items = malloc((num_items + pft->num_pmd_items) * + sizeof(pattern[0])); + if (!pft->items) { + rte_flow_tunnel_item_release( + port_id, pft->pmd_items, + pft->num_pmd_items, &error); + return NULL; + } + rte_memcpy(pft->items, pft->pmd_items, + pft->num_pmd_items * sizeof(pattern[0])); + rte_memcpy(pft->items + pft->num_pmd_items, pattern, + num_items * sizeof(pattern[0])); + } + + return pft; +} + +static void +port_flow_tunnel_offload_cmd_release(portid_t port_id, + const struct tunnel_ops *tunnel_ops, + struct port_flow_tunnel *pft) +{ + struct rte_flow_error error; + + if (tunnel_ops->actions) { + free(pft->actions); + rte_flow_tunnel_action_decap_release( + port_id, pft->pmd_actions, + pft->num_pmd_actions, &error); + pft->actions = NULL; + pft->pmd_actions = NULL; + } + if (tunnel_ops->items) { + free(pft->items); + rte_flow_tunnel_item_release(port_id, pft->pmd_items, + pft->num_pmd_items, + &error); + pft->items = NULL; + pft->pmd_items = NULL; + } +} + +/** Add port meter policy */ +int +port_meter_policy_add(portid_t port_id, uint32_t policy_id, + const struct rte_flow_action *actions) +{ + struct rte_mtr_error error; + const struct rte_flow_action *act = actions; + const struct rte_flow_action *start; + struct rte_mtr_meter_policy_params policy; + uint32_t i = 0, act_n; + int ret; + + for (i = 0; i < RTE_COLORS; i++) { + for (act_n = 0, start = act; + act->type != RTE_FLOW_ACTION_TYPE_END; act++) + act_n++; + if (act_n && act->type == RTE_FLOW_ACTION_TYPE_END) + policy.actions[i] = start; + else + policy.actions[i] = NULL; + act++; + } + ret = rte_mtr_meter_policy_add(port_id, + policy_id, + &policy, &error); + if (ret) + print_mtr_err_msg(&error); + return ret; +} + +/** Validate flow rule. */ +int +port_flow_validate(portid_t port_id, + const struct rte_flow_attr *attr, + const struct rte_flow_item *pattern, + const struct rte_flow_action *actions, + const struct tunnel_ops *tunnel_ops) +{ + struct rte_flow_error error; + struct port_flow_tunnel *pft = NULL; + int ret; + + /* Poisoning to make sure PMDs update it in case of error. */ + memset(&error, 0x11, sizeof(error)); + if (tunnel_ops->enabled) { + pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern, + actions, tunnel_ops); + if (!pft) + return -ENOENT; + if (pft->items) + pattern = pft->items; + if (pft->actions) + actions = pft->actions; + } + ret = rte_flow_validate(port_id, attr, pattern, actions, &error); + if (tunnel_ops->enabled) + port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft); + if (ret) + return port_flow_complain(&error); + printf("Flow rule validated\n"); + return 0; +} + +/** Return age action structure if exists, otherwise NULL. */ +static struct rte_flow_action_age * +age_action_get(const struct rte_flow_action *actions) +{ + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_AGE: + return (struct rte_flow_action_age *) + (uintptr_t)actions->conf; + default: + break; + } + } + return NULL; +} + +/** Create pattern template */ +int +port_flow_pattern_template_create(portid_t port_id, uint32_t id, + const struct rte_flow_pattern_template_attr *attr, + const struct rte_flow_item *pattern) +{ + struct rte_port *port; + struct port_template *pit; + int ret; + struct rte_flow_error error; + + if (port_id_is_invalid(port_id, ENABLED_WARN) || + port_id == (portid_t)RTE_PORT_ALL) + return -EINVAL; + port = &ports[port_id]; + ret = template_alloc(id, &pit, &port->pattern_templ_list); + if (ret) + return ret; + /* Poisoning to make sure PMDs update it in case of error. */ + memset(&error, 0x22, sizeof(error)); + pit->template.pattern_template = rte_flow_pattern_template_create(port_id, + attr, pattern, &error); + if (!pit->template.pattern_template) { + uint32_t destroy_id = pit->id; + port_flow_pattern_template_destroy(port_id, 1, &destroy_id); + return port_flow_complain(&error); + } + printf("Pattern template #%u created\n", pit->id); + return 0; +} + +/** Destroy pattern template */ +int +port_flow_pattern_template_destroy(portid_t port_id, uint32_t n, + const uint32_t *template) +{ + struct rte_port *port; + struct port_template **tmp; + uint32_t c = 0; + int ret = 0; + + if (port_id_is_invalid(port_id, ENABLED_WARN) || + port_id == (portid_t)RTE_PORT_ALL) + return -EINVAL; + port = &ports[port_id]; + tmp = &port->pattern_templ_list; + while (*tmp) { + uint32_t i; + + for (i = 0; i != n; ++i) { + struct rte_flow_error error; + struct port_template *pit = *tmp; + + if (template[i] != pit->id) + continue; + /* + * Poisoning to make sure PMDs update it in case + * of error. + */ + memset(&error, 0x33, sizeof(error)); + + if (pit->template.pattern_template && + rte_flow_pattern_template_destroy(port_id, + pit->template.pattern_template, + &error)) { + ret = port_flow_complain(&error); + continue; + } + *tmp = pit->next; + printf("Pattern template #%u destroyed\n", pit->id); + free(pit); + break; + } + if (i == n) + tmp = &(*tmp)->next; + ++c; + } + return ret; +} + +/** Create actions template */ +int +port_flow_actions_template_create(portid_t port_id, uint32_t id, + const struct rte_flow_actions_template_attr *attr, + const struct rte_flow_action *actions, + const struct rte_flow_action *masks) +{ + struct rte_port *port; + struct port_template *pat; + int ret; + struct rte_flow_error error; + + if (port_id_is_invalid(port_id, ENABLED_WARN) || + port_id == (portid_t)RTE_PORT_ALL) + return -EINVAL; + port = &ports[port_id]; + ret = template_alloc(id, &pat, &port->actions_templ_list); + if (ret) + return ret; + /* Poisoning to make sure PMDs update it in case of error. */ + memset(&error, 0x22, sizeof(error)); + pat->template.actions_template = rte_flow_actions_template_create(port_id, + attr, actions, masks, &error); + if (!pat->template.actions_template) { + uint32_t destroy_id = pat->id; + port_flow_actions_template_destroy(port_id, 1, &destroy_id); + return port_flow_complain(&error); + } + printf("Actions template #%u created\n", pat->id); + return 0; +} + +/** Destroy actions template */ +int +port_flow_actions_template_destroy(portid_t port_id, uint32_t n, + const uint32_t *template) +{ + struct rte_port *port; + struct port_template **tmp; + uint32_t c = 0; + int ret = 0; + + if (port_id_is_invalid(port_id, ENABLED_WARN) || + port_id == (portid_t)RTE_PORT_ALL) + return -EINVAL; + port = &ports[port_id]; + tmp = &port->actions_templ_list; + while (*tmp) { + uint32_t i; + + for (i = 0; i != n; ++i) { + struct rte_flow_error error; + struct port_template *pat = *tmp; + + if (template[i] != pat->id) + continue; + /* + * Poisoning to make sure PMDs update it in case + * of error. + */ + memset(&error, 0x33, sizeof(error)); + + if (pat->template.actions_template && + rte_flow_actions_template_destroy(port_id, + pat->template.actions_template, &error)) { + ret = port_flow_complain(&error); + continue; + } + *tmp = pat->next; + printf("Actions template #%u destroyed\n", pat->id); + free(pat); + break; + } + if (i == n) + tmp = &(*tmp)->next; + ++c; + } + return ret; +} + +/** Create table */ +int +port_flow_template_table_create(portid_t port_id, uint32_t id, + const struct rte_flow_template_table_attr *table_attr, + uint32_t nb_pattern_templates, uint32_t *pattern_templates, + uint32_t nb_actions_templates, uint32_t *actions_templates) +{ + struct rte_port *port; + struct port_table *pt; + struct port_template *temp = NULL; + int ret; + uint32_t i; + struct rte_flow_error error; + struct rte_flow_pattern_template + *flow_pattern_templates[nb_pattern_templates]; + struct rte_flow_actions_template + *flow_actions_templates[nb_actions_templates]; + + if (port_id_is_invalid(port_id, ENABLED_WARN) || + port_id == (portid_t)RTE_PORT_ALL) + return -EINVAL; + port = &ports[port_id]; + for (i = 0; i < nb_pattern_templates; ++i) { + bool found = false; + temp = port->pattern_templ_list; + while (temp) { + if (pattern_templates[i] == temp->id) { + flow_pattern_templates[i] = + temp->template.pattern_template; + found = true; + break; + } + temp = temp->next; + } + if (!found) { + printf("Pattern template #%u is invalid\n", + pattern_templates[i]); + return -EINVAL; + } + } + for (i = 0; i < nb_actions_templates; ++i) { + bool found = false; + temp = port->actions_templ_list; + while (temp) { + if (actions_templates[i] == temp->id) { + flow_actions_templates[i] = + temp->template.actions_template; + found = true; + break; + } + temp = temp->next; + } + if (!found) { + printf("Actions template #%u is invalid\n", + actions_templates[i]); + return -EINVAL; + } + } + ret = table_alloc(id, &pt, &port->table_list); + if (ret) + return ret; + /* Poisoning to make sure PMDs update it in case of error. */ + memset(&error, 0x22, sizeof(error)); + pt->table = rte_flow_template_table_create(port_id, table_attr, + flow_pattern_templates, nb_pattern_templates, + flow_actions_templates, nb_actions_templates, + &error); + + if (!pt->table) { + uint32_t destroy_id = pt->id; + port_flow_template_table_destroy(port_id, 1, &destroy_id); + return port_flow_complain(&error); + } + pt->nb_pattern_templates = nb_pattern_templates; + pt->nb_actions_templates = nb_actions_templates; + printf("Template table #%u created\n", pt->id); + return 0; +} + +/** Destroy table */ +int +port_flow_template_table_destroy(portid_t port_id, + uint32_t n, const uint32_t *table) +{ + struct rte_port *port; + struct port_table **tmp; + uint32_t c = 0; + int ret = 0; + + if (port_id_is_invalid(port_id, ENABLED_WARN) || + port_id == (portid_t)RTE_PORT_ALL) + return -EINVAL; + port = &ports[port_id]; + tmp = &port->table_list; + while (*tmp) { + uint32_t i; + + for (i = 0; i != n; ++i) { + struct rte_flow_error error; + struct port_table *pt = *tmp; + + if (table[i] != pt->id) + continue; + /* + * Poisoning to make sure PMDs update it in case + * of error. + */ + memset(&error, 0x33, sizeof(error)); + + if (pt->table && + rte_flow_template_table_destroy(port_id, + pt->table, + &error)) { + ret = port_flow_complain(&error); + continue; + } + *tmp = pt->next; + printf("Template table #%u destroyed\n", pt->id); + free(pt); + break; + } + if (i == n) + tmp = &(*tmp)->next; + ++c; + } + return ret; +} + +/** Enqueue create flow rule operation. */ +int +port_queue_flow_create(portid_t port_id, queueid_t queue_id, + bool postpone, uint32_t table_id, + uint32_t pattern_idx, uint32_t actions_idx, + const struct rte_flow_item *pattern, + const struct rte_flow_action *actions) +{ + struct rte_flow_op_attr op_attr = { .postpone = postpone }; + struct rte_flow *flow; + struct rte_port *port; + struct port_flow *pf; + struct port_table *pt; + uint32_t id = 0; + bool found; + struct rte_flow_error error = { RTE_FLOW_ERROR_TYPE_NONE, NULL, NULL }; + struct rte_flow_action_age *age = age_action_get(actions); + + port = &ports[port_id]; + if (port->flow_list) { + if (port->flow_list->id == UINT32_MAX) { + printf("Highest rule ID is already assigned," + " delete it first"); + return -ENOMEM; + } + id = port->flow_list->id + 1; + } + + if (queue_id >= port->queue_nb) { + printf("Queue #%u is invalid\n", queue_id); + return -EINVAL; + } + + found = false; + pt = port->table_list; + while (pt) { + if (table_id == pt->id) { + found = true; + break; + } + pt = pt->next; + } + if (!found) { + printf("Table #%u is invalid\n", table_id); + return -EINVAL; + } + + if (pattern_idx >= pt->nb_pattern_templates) { + printf("Pattern template index #%u is invalid," + " %u templates present in the table\n", + pattern_idx, pt->nb_pattern_templates); + return -EINVAL; + } + if (actions_idx >= pt->nb_actions_templates) { + printf("Actions template index #%u is invalid," + " %u templates present in the table\n", + actions_idx, pt->nb_actions_templates); + return -EINVAL; + } + + pf = port_flow_new(NULL, pattern, actions, &error); + if (!pf) + return port_flow_complain(&error); + if (age) { + pf->age_type = ACTION_AGE_CONTEXT_TYPE_FLOW; + age->context = &pf->age_type; + } + /* Poisoning to make sure PMDs update it in case of error. */ + memset(&error, 0x11, sizeof(error)); + flow = rte_flow_async_create(port_id, queue_id, &op_attr, pt->table, + pattern, pattern_idx, actions, actions_idx, NULL, &error); + if (!flow) { + uint32_t flow_id = pf->id; + port_queue_flow_destroy(port_id, queue_id, true, 1, &flow_id); + return port_flow_complain(&error); + } + + pf->next = port->flow_list; + pf->id = id; + pf->flow = flow; + port->flow_list = pf; + printf("Flow rule #%u creation enqueued\n", pf->id); + return 0; +} + +/** Enqueue number of destroy flow rules operations. */ +int +port_queue_flow_destroy(portid_t port_id, queueid_t queue_id, + bool postpone, uint32_t n, const uint32_t *rule) { - uint8_t i; + struct rte_flow_op_attr op_attr = { .postpone = postpone }; + struct rte_port *port; + struct port_flow **tmp; + uint32_t c = 0; + int ret = 0; - if (rss_conf == NULL) { - printf("Invalid rule\n"); - return; + if (port_id_is_invalid(port_id, ENABLED_WARN) || + port_id == (portid_t)RTE_PORT_ALL) + return -EINVAL; + port = &ports[port_id]; + + if (queue_id >= port->queue_nb) { + printf("Queue #%u is invalid\n", queue_id); + return -EINVAL; } - printf("RSS:\n" - " queues:"); - if (rss_conf->queue_num == 0) - printf(" none"); - for (i = 0; i < rss_conf->queue_num; i++) - printf(" %d", rss_conf->queue[i]); - printf("\n"); + tmp = &port->flow_list; + while (*tmp) { + uint32_t i; - printf(" function: "); - switch (rss_conf->func) { - case RTE_ETH_HASH_FUNCTION_DEFAULT: - printf("default\n"); - break; - case RTE_ETH_HASH_FUNCTION_TOEPLITZ: - printf("toeplitz\n"); - break; - case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR: - printf("simple_xor\n"); - break; - case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ: - printf("symmetric_toeplitz\n"); - break; - default: - printf("Unknown function\n"); - return; - } + for (i = 0; i != n; ++i) { + struct rte_flow_error error; + struct port_flow *pf = *tmp; - printf(" types:\n"); - if (rss_conf->types == 0) { - printf(" none\n"); - return; - } - for (i = 0; rss_type_table[i].str; i++) { - if ((rss_conf->types & - rss_type_table[i].rss_type) == - rss_type_table[i].rss_type && - rss_type_table[i].rss_type != 0) - printf(" %s\n", rss_type_table[i].str); + if (rule[i] != pf->id) + continue; + /* + * Poisoning to make sure PMD + * update it in case of error. + */ + memset(&error, 0x33, sizeof(error)); + if (rte_flow_async_destroy(port_id, queue_id, &op_attr, + pf->flow, NULL, &error)) { + ret = port_flow_complain(&error); + continue; + } + printf("Flow rule #%u destruction enqueued\n", pf->id); + *tmp = pf->next; + free(pf); + break; + } + if (i == n) + tmp = &(*tmp)->next; + ++c; } + return ret; } -/** Validate flow rule. */ +/** Enqueue indirect action create operation. */ int -port_flow_validate(portid_t port_id, - const struct rte_flow_attr *attr, - const struct rte_flow_item *pattern, - const struct rte_flow_action *actions) +port_queue_action_handle_create(portid_t port_id, uint32_t queue_id, + bool postpone, uint32_t id, + const struct rte_flow_indir_action_conf *conf, + const struct rte_flow_action *action) { + const struct rte_flow_op_attr attr = { .postpone = postpone}; + struct rte_port *port; + struct port_indirect_action *pia; + int ret; struct rte_flow_error error; + ret = action_alloc(port_id, id, &pia); + if (ret) + return ret; + + port = &ports[port_id]; + if (queue_id >= port->queue_nb) { + printf("Queue #%u is invalid\n", queue_id); + return -EINVAL; + } + + if (action->type == RTE_FLOW_ACTION_TYPE_AGE) { + struct rte_flow_action_age *age = + (struct rte_flow_action_age *)(uintptr_t)(action->conf); + + pia->age_type = ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION; + age->context = &pia->age_type; + } /* Poisoning to make sure PMDs update it in case of error. */ - memset(&error, 0x11, sizeof(error)); - if (rte_flow_validate(port_id, attr, pattern, actions, &error)) + memset(&error, 0x88, sizeof(error)); + pia->handle = rte_flow_async_action_handle_create(port_id, queue_id, + &attr, conf, action, NULL, &error); + if (!pia->handle) { + uint32_t destroy_id = pia->id; + port_queue_action_handle_destroy(port_id, queue_id, + postpone, 1, &destroy_id); return port_flow_complain(&error); - printf("Flow rule validated\n"); + } + pia->type = action->type; + printf("Indirect action #%u creation queued\n", pia->id); return 0; } -/** Update age action context by port_flow pointer. */ -void -update_age_action_context(const struct rte_flow_action *actions, - struct port_flow *pf) +/** Enqueue indirect action destroy operation. */ +int +port_queue_action_handle_destroy(portid_t port_id, + uint32_t queue_id, bool postpone, + uint32_t n, const uint32_t *actions) { - struct rte_flow_action_age *age = NULL; + const struct rte_flow_op_attr attr = { .postpone = postpone}; + struct rte_port *port; + struct port_indirect_action **tmp; + uint32_t c = 0; + int ret = 0; - for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { - switch (actions->type) { - case RTE_FLOW_ACTION_TYPE_AGE: - age = (struct rte_flow_action_age *) - (uintptr_t)actions->conf; - age->context = pf; - return; - default: + if (port_id_is_invalid(port_id, ENABLED_WARN) || + port_id == (portid_t)RTE_PORT_ALL) + return -EINVAL; + port = &ports[port_id]; + + if (queue_id >= port->queue_nb) { + printf("Queue #%u is invalid\n", queue_id); + return -EINVAL; + } + + tmp = &port->actions_list; + while (*tmp) { + uint32_t i; + + for (i = 0; i != n; ++i) { + struct rte_flow_error error; + struct port_indirect_action *pia = *tmp; + + if (actions[i] != pia->id) + continue; + /* + * Poisoning to make sure PMDs update it in case + * of error. + */ + memset(&error, 0x99, sizeof(error)); + + if (pia->handle && + rte_flow_async_action_handle_destroy(port_id, + queue_id, &attr, pia->handle, NULL, &error)) { + ret = port_flow_complain(&error); + continue; + } + *tmp = pia->next; + printf("Indirect action #%u destruction queued\n", + pia->id); + free(pia); break; } + if (i == n) + tmp = &(*tmp)->next; + ++c; + } + return ret; +} + +/** Enqueue indirect action update operation. */ +int +port_queue_action_handle_update(portid_t port_id, + uint32_t queue_id, bool postpone, uint32_t id, + const struct rte_flow_action *action) +{ + const struct rte_flow_op_attr attr = { .postpone = postpone}; + struct rte_port *port; + struct rte_flow_error error; + struct rte_flow_action_handle *action_handle; + + action_handle = port_action_handle_get_by_id(port_id, id); + if (!action_handle) + return -EINVAL; + + port = &ports[port_id]; + if (queue_id >= port->queue_nb) { + printf("Queue #%u is invalid\n", queue_id); + return -EINVAL; + } + + if (rte_flow_async_action_handle_update(port_id, queue_id, &attr, + action_handle, action, NULL, &error)) { + return port_flow_complain(&error); + } + printf("Indirect action #%u update queued\n", id); + return 0; +} + +/** Push all the queue operations in the queue to the NIC. */ +int +port_queue_flow_push(portid_t port_id, queueid_t queue_id) +{ + struct rte_port *port; + struct rte_flow_error error; + int ret = 0; + + if (port_id_is_invalid(port_id, ENABLED_WARN) || + port_id == (portid_t)RTE_PORT_ALL) + return -EINVAL; + port = &ports[port_id]; + + if (queue_id >= port->queue_nb) { + printf("Queue #%u is invalid\n", queue_id); + return -EINVAL; + } + + memset(&error, 0x55, sizeof(error)); + ret = rte_flow_push(port_id, queue_id, &error); + if (ret < 0) { + printf("Failed to push operations in the queue\n"); + return -EINVAL; + } + printf("Queue #%u operations pushed\n", queue_id); + return ret; +} + +/** Pull queue operation results from the queue. */ +int +port_queue_flow_pull(portid_t port_id, queueid_t queue_id) +{ + struct rte_port *port; + struct rte_flow_op_result *res; + struct rte_flow_error error; + int ret = 0; + int success = 0; + int i; + + if (port_id_is_invalid(port_id, ENABLED_WARN) || + port_id == (portid_t)RTE_PORT_ALL) + return -EINVAL; + port = &ports[port_id]; + + if (queue_id >= port->queue_nb) { + printf("Queue #%u is invalid\n", queue_id); + return -EINVAL; + } + + res = calloc(port->queue_sz, sizeof(struct rte_flow_op_result)); + if (!res) { + printf("Failed to allocate memory for pulled results\n"); + return -ENOMEM; + } + + memset(&error, 0x66, sizeof(error)); + ret = rte_flow_pull(port_id, queue_id, res, + port->queue_sz, &error); + if (ret < 0) { + printf("Failed to pull a operation results\n"); + free(res); + return -EINVAL; + } + + for (i = 0; i < ret; i++) { + if (res[i].status == RTE_FLOW_OP_SUCCESS) + success++; } + printf("Queue #%u pulled %u operations (%u failed, %u succeeded)\n", + queue_id, ret, ret - success, success); + free(res); + return ret; } /** Create flow rule. */ @@ -1687,31 +2900,50 @@ int port_flow_create(portid_t port_id, const struct rte_flow_attr *attr, const struct rte_flow_item *pattern, - const struct rte_flow_action *actions) + const struct rte_flow_action *actions, + const struct tunnel_ops *tunnel_ops) { struct rte_flow *flow; struct rte_port *port; struct port_flow *pf; uint32_t id = 0; struct rte_flow_error error; + struct port_flow_tunnel *pft = NULL; + struct rte_flow_action_age *age = age_action_get(actions); port = &ports[port_id]; if (port->flow_list) { if (port->flow_list->id == UINT32_MAX) { - printf("Highest rule ID is already assigned, delete" - " it first"); + fprintf(stderr, + "Highest rule ID is already assigned, delete it first"); return -ENOMEM; } id = port->flow_list->id + 1; } + if (tunnel_ops->enabled) { + pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern, + actions, tunnel_ops); + if (!pft) + return -ENOENT; + if (pft->items) + pattern = pft->items; + if (pft->actions) + actions = pft->actions; + } pf = port_flow_new(attr, pattern, actions, &error); if (!pf) return port_flow_complain(&error); - update_age_action_context(actions, pf); + if (age) { + pf->age_type = ACTION_AGE_CONTEXT_TYPE_FLOW; + age->context = &pf->age_type; + } /* Poisoning to make sure PMDs update it in case of error. */ memset(&error, 0x22, sizeof(error)); flow = rte_flow_create(port_id, attr, pattern, actions, &error); if (!flow) { + if (tunnel_ops->enabled) + port_flow_tunnel_offload_cmd_release(port_id, + tunnel_ops, pft); free(pf); return port_flow_complain(&error); } @@ -1719,6 +2951,8 @@ port_flow_create(portid_t port_id, pf->id = id; pf->flow = flow; port->flow_list = pf; + if (tunnel_ops->enabled) + port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft); printf("Flow rule #%u created\n", pf->id); return 0; } @@ -1772,19 +3006,22 @@ int port_flow_flush(portid_t port_id) { struct rte_flow_error error; - struct rte_port *port = &ports[port_id]; + struct rte_port *port; int ret = 0; + if (port_id_is_invalid(port_id, ENABLED_WARN) || + port_id == (portid_t)RTE_PORT_ALL) + return -EINVAL; + + port = &ports[port_id]; + if (port->flow_list == NULL) return ret; /* Poisoning to make sure PMDs update it in case of error. */ memset(&error, 0x44, sizeof(error)); if (rte_flow_flush(port_id, &error)) { - ret = port_flow_complain(&error); - if (port_id_is_invalid(port_id, DISABLED_WARN) || - port_id == (portid_t)RTE_PORT_ALL) - return ret; + port_flow_complain(&error); } while (port->flow_list) { @@ -1796,26 +3033,58 @@ port_flow_flush(portid_t port_id) return ret; } -/** Dump all flow rules. */ +/** Dump flow rules. */ int -port_flow_dump(portid_t port_id, const char *file_name) +port_flow_dump(portid_t port_id, bool dump_all, uint32_t rule_id, + const char *file_name) { int ret = 0; FILE *file = stdout; struct rte_flow_error error; + struct rte_port *port; + struct port_flow *pflow; + struct rte_flow *tmpFlow = NULL; + bool found = false; + + if (port_id_is_invalid(port_id, ENABLED_WARN) || + port_id == (portid_t)RTE_PORT_ALL) + return -EINVAL; + + if (!dump_all) { + port = &ports[port_id]; + pflow = port->flow_list; + while (pflow) { + if (rule_id != pflow->id) { + pflow = pflow->next; + } else { + tmpFlow = pflow->flow; + if (tmpFlow) + found = true; + break; + } + } + if (found == false) { + fprintf(stderr, "Failed to dump to flow %d\n", rule_id); + return -EINVAL; + } + } if (file_name && strlen(file_name)) { file = fopen(file_name, "w"); if (!file) { - printf("Failed to create file %s: %s\n", file_name, - strerror(errno)); + fprintf(stderr, "Failed to create file %s: %s\n", + file_name, strerror(errno)); return -errno; } } - ret = rte_flow_dev_dump(port_id, file, &error); + + if (!dump_all) + ret = rte_flow_dev_dump(port_id, tmpFlow, file, &error); + else + ret = rte_flow_dev_dump(port_id, NULL, file, &error); if (ret) { port_flow_complain(&error); - printf("Failed to dump flow: %s\n", strerror(-ret)); + fprintf(stderr, "Failed to dump flow: %s\n", strerror(-ret)); } else printf("Flow dump finished\n"); if (file_name && strlen(file_name)) @@ -1847,7 +3116,7 @@ port_flow_query(portid_t port_id, uint32_t rule, if (pf->id == rule) break; if (!pf) { - printf("Flow rule #%u not found\n", rule); + fprintf(stderr, "Flow rule #%u not found\n", rule); return -ENOENT; } ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, @@ -1861,7 +3130,7 @@ port_flow_query(portid_t port_id, uint32_t rule, case RTE_FLOW_ACTION_TYPE_AGE: break; default: - printf("Cannot query action type %d (%s)\n", + fprintf(stderr, "Cannot query action type %d (%s)\n", action->type, name); return -ENOTSUP; } @@ -1897,8 +3166,9 @@ port_flow_query(portid_t port_id, uint32_t rule, query.age.sec_since_last_hit); break; default: - printf("Cannot display result for action type %d (%s)\n", - action->type, name); + fprintf(stderr, + "Cannot display result for action type %d (%s)\n", + action->type, name); break; } return 0; @@ -1911,7 +3181,11 @@ port_flow_aged(portid_t port_id, uint8_t destroy) void **contexts; int nb_context, total = 0, idx; struct rte_flow_error error; - struct port_flow *pf; + enum age_action_context_type *type; + union { + struct port_flow *pf; + struct port_indirect_action *pia; + } ctx; if (port_id_is_invalid(port_id, ENABLED_WARN) || port_id == (portid_t)RTE_PORT_ALL) @@ -1926,48 +3200,55 @@ port_flow_aged(portid_t port_id, uint8_t destroy) return; contexts = malloc(sizeof(void *) * total); if (contexts == NULL) { - printf("Cannot allocate contexts for aged flow\n"); + fprintf(stderr, "Cannot allocate contexts for aged flow\n"); return; } - printf("ID\tGroup\tPrio\tAttr\n"); + printf("%-20s\tID\tGroup\tPrio\tAttr\n", "Type"); nb_context = rte_flow_get_aged_flows(port_id, contexts, total, &error); if (nb_context != total) { - printf("Port:%d get aged flows count(%d) != total(%d)\n", + fprintf(stderr, + "Port:%d get aged flows count(%d) != total(%d)\n", port_id, nb_context, total); free(contexts); return; } + total = 0; for (idx = 0; idx < nb_context; idx++) { - pf = (struct port_flow *)contexts[idx]; - if (!pf) { - printf("Error: get Null context in port %u\n", port_id); + if (!contexts[idx]) { + fprintf(stderr, "Error: get Null context in port %u\n", + port_id); continue; } - printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t\n", - pf->id, - pf->rule.attr->group, - pf->rule.attr->priority, - pf->rule.attr->ingress ? 'i' : '-', - pf->rule.attr->egress ? 'e' : '-', - pf->rule.attr->transfer ? 't' : '-'); - } - if (destroy) { - int ret; - uint32_t flow_id; - - total = 0; - printf("\n"); - for (idx = 0; idx < nb_context; idx++) { - pf = (struct port_flow *)contexts[idx]; - if (!pf) - continue; - flow_id = pf->id; - ret = port_flow_destroy(port_id, 1, &flow_id); - if (!ret) + type = (enum age_action_context_type *)contexts[idx]; + switch (*type) { + case ACTION_AGE_CONTEXT_TYPE_FLOW: + ctx.pf = container_of(type, struct port_flow, age_type); + printf("%-20s\t%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 + "\t%c%c%c\t\n", + "Flow", + ctx.pf->id, + ctx.pf->rule.attr->group, + ctx.pf->rule.attr->priority, + ctx.pf->rule.attr->ingress ? 'i' : '-', + ctx.pf->rule.attr->egress ? 'e' : '-', + ctx.pf->rule.attr->transfer ? 't' : '-'); + if (destroy && !port_flow_destroy(port_id, 1, + &ctx.pf->id)) total++; + break; + case ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION: + ctx.pia = container_of(type, + struct port_indirect_action, age_type); + printf("%-20s\t%" PRIu32 "\n", "Indirect action", + ctx.pia->id); + break; + default: + fprintf(stderr, "Error: invalid context type %u\n", + port_id); + break; } - printf("%d flows be destroyed\n", total); } + printf("\n%d flows destroyed\n", total); free(contexts); } @@ -2028,7 +3309,9 @@ port_flow_list(portid_t port_id, uint32_t n, const uint32_t *group) pf->rule.attr->egress ? 'e' : '-', pf->rule.attr->transfer ? 't' : '-'); while (item->type != RTE_FLOW_ITEM_TYPE_END) { - if (rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR, + if ((uint32_t)item->type > INT_MAX) + name = "PMD_INTERNAL"; + else if (rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR, &name, sizeof(name), (void *)(uintptr_t)item->type, NULL) <= 0) @@ -2039,7 +3322,9 @@ port_flow_list(portid_t port_id, uint32_t n, const uint32_t *group) } printf("=>"); while (action->type != RTE_FLOW_ACTION_TYPE_END) { - if (rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, + if ((uint32_t)action->type > INT_MAX) + name = "PMD_INTERNAL"; + else if (rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, &name, sizeof(name), (void *)(uintptr_t)action->type, NULL) <= 0) @@ -2076,7 +3361,8 @@ rx_queue_id_is_invalid(queueid_t rxq_id) { if (rxq_id < nb_rxq) return 0; - printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq); + fprintf(stderr, "Invalid RX queue %d (must be < nb_rxq=%d)\n", + rxq_id, nb_rxq); return 1; } @@ -2085,7 +3371,8 @@ tx_queue_id_is_invalid(queueid_t txq_id) { if (txq_id < nb_txq) return 0; - printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq); + fprintf(stderr, "Invalid TX queue %d (must be < nb_txq=%d)\n", + txq_id, nb_txq); return 1; } @@ -2166,8 +3453,8 @@ rx_desc_id_is_invalid(portid_t port_id, queueid_t rxq_id, uint16_t rxdesc_id) if (rxdesc_id < ring_size) return 0; - printf("Invalid RX descriptor %u (must be < ring_size=%u)\n", - rxdesc_id, ring_size); + fprintf(stderr, "Invalid RX descriptor %u (must be < ring_size=%u)\n", + rxdesc_id, ring_size); return 1; } @@ -2184,8 +3471,8 @@ tx_desc_id_is_invalid(portid_t port_id, queueid_t txq_id, uint16_t txdesc_id) if (txdesc_id < ring_size) return 0; - printf("Invalid TX descriptor %u (must be < ring_size=%u)\n", - txdesc_id, ring_size); + fprintf(stderr, "Invalid TX descriptor %u (must be < ring_size=%u)\n", + txdesc_id, ring_size); return 1; } @@ -2199,9 +3486,9 @@ ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id) port_id, q_id, ring_name); mz = rte_memzone_lookup(mz_name); if (mz == NULL) - printf("%s ring memory zoneof (port %d, queue %d) not" - "found (zone name = %s\n", - ring_name, port_id, q_id, mz_name); + fprintf(stderr, + "%s ring memory zoneof (port %d, queue %d) not found (zone name = %s\n", + ring_name, port_id, q_id, mz_name); return mz; } @@ -2356,8 +3643,8 @@ rxtx_config_display(void) nb_fwd_lcores, nb_fwd_ports); RTE_ETH_FOREACH_DEV(pid) { - struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf[0]; - struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf[0]; + struct rte_eth_rxconf *rx_conf = &ports[pid].rxq[0].conf; + struct rte_eth_txconf *tx_conf = &ports[pid].txq[0].conf; uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0]; uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0]; struct rte_eth_rxq_info rx_qinfo; @@ -2408,7 +3695,12 @@ rxtx_config_display(void) printf(" RX threshold registers: pthresh=%d hthresh=%d " " wthresh=%d\n", pthresh_tmp, hthresh_tmp, wthresh_tmp); - printf(" RX Offloads=0x%"PRIx64"\n", offloads_tmp); + printf(" RX Offloads=0x%"PRIx64, offloads_tmp); + if (rx_conf->share_group > 0) + printf(" share_group=%u share_qid=%u", + rx_conf->share_group, + rx_conf->share_qid); + printf("\n"); } /* per tx queue config only for first queue to be less verbose */ @@ -2459,13 +3751,15 @@ port_rss_reta_info(portid_t port_id, ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries); if (ret != 0) { - printf("Failed to get RSS RETA info, return code = %d\n", ret); + fprintf(stderr, + "Failed to get RSS RETA info, return code = %d\n", + ret); return; } for (i = 0; i < nb_entries; i++) { - idx = i / RTE_RETA_GROUP_SIZE; - shift = i % RTE_RETA_GROUP_SIZE; + idx = i / RTE_ETH_RETA_GROUP_SIZE; + shift = i % RTE_ETH_RETA_GROUP_SIZE; if (!(reta_conf[idx].mask & (1ULL << shift))) continue; printf("RSS RETA configuration: hash index=%u, queue=%u\n", @@ -2474,7 +3768,7 @@ port_rss_reta_info(portid_t port_id, } /* - * Displays the RSS hash functions of a port, and, optionaly, the RSS hash + * Displays the RSS hash functions of a port, and, optionally, the RSS hash * key of the port. */ void @@ -2500,7 +3794,8 @@ port_rss_hash_conf_show(portid_t port_id, int show_rss_key) dev_info.hash_key_size <= sizeof(rss_key)) hash_key_size = dev_info.hash_key_size; else { - printf("dev_info did not provide a valid hash key size\n"); + fprintf(stderr, + "dev_info did not provide a valid hash key size\n"); return; } @@ -2511,13 +3806,13 @@ port_rss_hash_conf_show(portid_t port_id, int show_rss_key) if (diag != 0) { switch (diag) { case -ENODEV: - printf("port index %d invalid\n", port_id); + fprintf(stderr, "port index %d invalid\n", port_id); break; case -ENOTSUP: - printf("operation not supported by device\n"); + fprintf(stderr, "operation not supported by device\n"); break; default: - printf("operation failed - diag=%d\n", diag); + fprintf(stderr, "operation failed - diag=%d\n", diag); break; } return; @@ -2529,7 +3824,9 @@ port_rss_hash_conf_show(portid_t port_id, int show_rss_key) } printf("RSS functions:\n "); for (i = 0; rss_type_table[i].str; i++) { - if (rss_hf & rss_type_table[i].rss_type) + if (rss_type_table[i].rss_type == 0) + continue; + if ((rss_hf & rss_type_table[i].rss_type) == rss_type_table[i].rss_type) printf("%s ", rss_type_table[i].str); } printf("\n"); @@ -2543,14 +3840,14 @@ port_rss_hash_conf_show(portid_t port_id, int show_rss_key) void port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key, - uint hash_key_len) + uint8_t hash_key_len) { struct rte_eth_rss_conf rss_conf; int diag; unsigned int i; rss_conf.rss_key = NULL; - rss_conf.rss_key_len = hash_key_len; + rss_conf.rss_key_len = 0; rss_conf.rss_hf = 0; for (i = 0; rss_type_table[i].str; i++) { if (!strcmp(rss_type_table[i].str, rss_type)) @@ -2559,22 +3856,128 @@ port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key, diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); if (diag == 0) { rss_conf.rss_key = hash_key; + rss_conf.rss_key_len = hash_key_len; diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf); } - if (diag == 0) - return; + if (diag == 0) + return; + + switch (diag) { + case -ENODEV: + fprintf(stderr, "port index %d invalid\n", port_id); + break; + case -ENOTSUP: + fprintf(stderr, "operation not supported by device\n"); + break; + default: + fprintf(stderr, "operation failed - diag=%d\n", diag); + break; + } +} + +/* + * Check whether a shared rxq scheduled on other lcores. + */ +static bool +fwd_stream_on_other_lcores(uint16_t domain_id, lcoreid_t src_lc, + portid_t src_port, queueid_t src_rxq, + uint32_t share_group, queueid_t share_rxq) +{ + streamid_t sm_id; + streamid_t nb_fs_per_lcore; + lcoreid_t nb_fc; + lcoreid_t lc_id; + struct fwd_stream *fs; + struct rte_port *port; + struct rte_eth_dev_info *dev_info; + struct rte_eth_rxconf *rxq_conf; + + nb_fc = cur_fwd_config.nb_fwd_lcores; + /* Check remaining cores. */ + for (lc_id = src_lc + 1; lc_id < nb_fc; lc_id++) { + sm_id = fwd_lcores[lc_id]->stream_idx; + nb_fs_per_lcore = fwd_lcores[lc_id]->stream_nb; + for (; sm_id < fwd_lcores[lc_id]->stream_idx + nb_fs_per_lcore; + sm_id++) { + fs = fwd_streams[sm_id]; + port = &ports[fs->rx_port]; + dev_info = &port->dev_info; + rxq_conf = &port->rxq[fs->rx_queue].conf; + if ((dev_info->dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) + == 0 || rxq_conf->share_group == 0) + /* Not shared rxq. */ + continue; + if (domain_id != port->dev_info.switch_info.domain_id) + continue; + if (rxq_conf->share_group != share_group) + continue; + if (rxq_conf->share_qid != share_rxq) + continue; + printf("Shared Rx queue group %u queue %hu can't be scheduled on different cores:\n", + share_group, share_rxq); + printf(" lcore %hhu Port %hu queue %hu\n", + src_lc, src_port, src_rxq); + printf(" lcore %hhu Port %hu queue %hu\n", + lc_id, fs->rx_port, fs->rx_queue); + printf("Please use --nb-cores=%hu to limit number of forwarding cores\n", + nb_rxq); + return true; + } + } + return false; +} - switch (diag) { - case -ENODEV: - printf("port index %d invalid\n", port_id); - break; - case -ENOTSUP: - printf("operation not supported by device\n"); - break; - default: - printf("operation failed - diag=%d\n", diag); - break; +/* + * Check shared rxq configuration. + * + * Shared group must not being scheduled on different core. + */ +bool +pkt_fwd_shared_rxq_check(void) +{ + streamid_t sm_id; + streamid_t nb_fs_per_lcore; + lcoreid_t nb_fc; + lcoreid_t lc_id; + struct fwd_stream *fs; + uint16_t domain_id; + struct rte_port *port; + struct rte_eth_dev_info *dev_info; + struct rte_eth_rxconf *rxq_conf; + + if (rxq_share == 0) + return true; + nb_fc = cur_fwd_config.nb_fwd_lcores; + /* + * Check streams on each core, make sure the same switch domain + + * group + queue doesn't get scheduled on other cores. + */ + for (lc_id = 0; lc_id < nb_fc; lc_id++) { + sm_id = fwd_lcores[lc_id]->stream_idx; + nb_fs_per_lcore = fwd_lcores[lc_id]->stream_nb; + for (; sm_id < fwd_lcores[lc_id]->stream_idx + nb_fs_per_lcore; + sm_id++) { + fs = fwd_streams[sm_id]; + /* Update lcore info stream being scheduled. */ + fs->lcore = fwd_lcores[lc_id]; + port = &ports[fs->rx_port]; + dev_info = &port->dev_info; + rxq_conf = &port->rxq[fs->rx_queue].conf; + if ((dev_info->dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) + == 0 || rxq_conf->share_group == 0) + /* Not shared rxq. */ + continue; + /* Check shared rxq not scheduled on remaining cores. */ + domain_id = port->dev_info.switch_info.domain_id; + if (fwd_stream_on_other_lcores(domain_id, lc_id, + fs->rx_port, + fs->rx_queue, + rxq_conf->share_group, + rxq_conf->share_qid)) + return false; + } } + return true; } /* @@ -2634,10 +4037,8 @@ fwd_topology_tx_port_get(portid_t rxp) if (rxp + 1 < cur_fwd_config.nb_fwd_ports) return rxp + 1; if (warning_once) { - printf("\nWarning! port-topology=paired" - " and odd forward ports number," - " the last port will pair with" - " itself.\n\n"); + fprintf(stderr, + "\nWarning! port-topology=paired and odd forward ports number, the last port will pair with itself.\n\n"); warning_once = 0; } return rxp; @@ -2696,6 +4097,8 @@ rss_fwd_config_setup(void) queueid_t rxq; queueid_t nb_q; streamid_t sm_id; + int start; + int end; nb_q = nb_rxq; if (nb_q > nb_txq) @@ -2713,7 +4116,21 @@ rss_fwd_config_setup(void) init_fwd_streams(); setup_fwd_config_of_each_lcore(&cur_fwd_config); - rxp = 0; rxq = 0; + + if (proc_id > 0 && nb_q % num_procs != 0) + printf("Warning! queue numbers should be multiple of processes, or packet loss will happen.\n"); + + /** + * In multi-process, All queues are allocated to different + * processes based on num_procs and proc_id. For example: + * if supports 4 queues(nb_q), 2 processes(num_procs), + * the 0~1 queue for primary process. + * the 2~3 queue for secondary process. + */ + start = proc_id * nb_q / num_procs; + end = start + nb_q / num_procs; + rxp = 0; + rxq = start; for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { struct fwd_stream *fs; @@ -2730,7 +4147,24 @@ rss_fwd_config_setup(void) continue; rxp = 0; rxq++; + if (rxq >= end) + rxq = start; + } +} + +static uint16_t +get_fwd_port_total_tc_num(void) +{ + struct rte_eth_dcb_info dcb_info; + uint16_t total_tc_num = 0; + unsigned int i; + + for (i = 0; i < nb_fwd_ports; i++) { + (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[i], &dcb_info); + total_tc_num += dcb_info.nb_tcs; } + + return total_tc_num; } /** @@ -2752,12 +4186,43 @@ dcb_fwd_config_setup(void) lcoreid_t lc_id; uint16_t nb_rx_queue, nb_tx_queue; uint16_t i, j, k, sm_id = 0; + uint16_t total_tc_num; + struct rte_port *port; uint8_t tc = 0; + portid_t pid; + int ret; + + /* + * The fwd_config_setup() is called when the port is RTE_PORT_STARTED + * or RTE_PORT_STOPPED. + * + * Re-configure ports to get updated mapping between tc and queue in + * case the queue number of the port is changed. Skip for started ports + * since modifying queue number and calling dev_configure need to stop + * ports first. + */ + for (pid = 0; pid < nb_fwd_ports; pid++) { + if (port_is_started(pid) == 1) + continue; + + port = &ports[pid]; + ret = rte_eth_dev_configure(pid, nb_rxq, nb_txq, + &port->dev_conf); + if (ret < 0) { + fprintf(stderr, + "Failed to re-configure port %d, ret = %d.\n", + pid, ret); + return; + } + } cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; cur_fwd_config.nb_fwd_ports = nb_fwd_ports; cur_fwd_config.nb_fwd_streams = (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); + total_tc_num = get_fwd_port_total_tc_num(); + if (cur_fwd_config.nb_fwd_lcores > total_tc_num) + cur_fwd_config.nb_fwd_lcores = total_tc_num; /* reinitialize forwarding streams */ init_fwd_streams(); @@ -2770,7 +4235,7 @@ dcb_fwd_config_setup(void) for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { fwd_lcores[lc_id]->stream_nb = 0; fwd_lcores[lc_id]->stream_idx = sm_id; - for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) { + for (i = 0; i < RTE_ETH_MAX_VMDQ_POOL; i++) { /* if the nb_queue is zero, means this tc is * not enabled on the POOL */ @@ -2879,6 +4344,10 @@ icmp_echo_config_setup(void) void fwd_config_setup(void) { + struct rte_port *port; + portid_t pt_id; + unsigned int i; + cur_fwd_config.fwd_eng = cur_fwd_eng; if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) { icmp_echo_config_setup(); @@ -2886,9 +4355,24 @@ fwd_config_setup(void) } if ((nb_rxq > 1) && (nb_txq > 1)){ - if (dcb_config) + if (dcb_config) { + for (i = 0; i < nb_fwd_ports; i++) { + pt_id = fwd_ports_ids[i]; + port = &ports[pt_id]; + if (!port->dcb_flag) { + fprintf(stderr, + "In DCB mode, all forwarding ports must be configured in this mode.\n"); + return; + } + } + if (nb_fwd_lcores == 1) { + fprintf(stderr, + "In DCB mode,the nb forwarding cores should be larger than 1.\n"); + return; + } + dcb_fwd_config_setup(); - else + } else rss_fwd_config_setup(); } else @@ -2959,11 +4443,12 @@ set_fwd_eth_peer(portid_t port_id, char *peer_addr) { struct rte_ether_addr new_peer_addr; if (!rte_eth_dev_is_valid_port(port_id)) { - printf("Error: Invalid port number %i\n", port_id); + fprintf(stderr, "Error: Invalid port number %i\n", port_id); return; } if (rte_ether_unformat_addr(peer_addr, &new_peer_addr) < 0) { - printf("Error: Invalid ethernet address: %s\n", peer_addr); + fprintf(stderr, "Error: Invalid ethernet address: %s\n", + peer_addr); return; } peer_eth_addrs[port_id] = new_peer_addr; @@ -2981,14 +4466,13 @@ set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc) for (i = 0; i < nb_lc; i++) { lcore_cpuid = lcorelist[i]; if (! rte_lcore_is_enabled(lcore_cpuid)) { - printf("lcore %u not enabled\n", lcore_cpuid); + fprintf(stderr, "lcore %u not enabled\n", lcore_cpuid); return -1; } - if (lcore_cpuid == rte_get_master_lcore()) { - printf("lcore %u cannot be masked on for running " - "packet forwarding, which is the master lcore " - "and reserved for command line parsing only\n", - lcore_cpuid); + if (lcore_cpuid == rte_get_main_lcore()) { + fprintf(stderr, + "lcore %u cannot be masked on for running packet forwarding, which is the main lcore and reserved for command line parsing only\n", + lcore_cpuid); return -1; } if (record_now) @@ -3017,7 +4501,7 @@ set_fwd_lcores_mask(uint64_t lcoremask) unsigned int i; if (lcoremask == 0) { - printf("Invalid NULL mask of cores\n"); + fprintf(stderr, "Invalid NULL mask of cores\n"); return -1; } nb_lc = 0; @@ -3032,10 +4516,14 @@ set_fwd_lcores_mask(uint64_t lcoremask) void set_fwd_lcores_number(uint16_t nb_lc) { + if (test_done == 0) { + fprintf(stderr, "Please stop forwarding first\n"); + return; + } if (nb_lc > nb_cfg_lcores) { - printf("nb fwd cores %u > %u (max. number of configured " - "lcores) - ignored\n", - (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores); + fprintf(stderr, + "nb fwd cores %u > %u (max. number of configured lcores) - ignored\n", + (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores); return; } nb_fwd_lcores = (lcoreid_t) nb_lc; @@ -3186,7 +4674,7 @@ set_fwd_ports_mask(uint64_t portmask) unsigned int i; if (portmask == 0) { - printf("Invalid NULL mask of ports\n"); + fprintf(stderr, "Invalid NULL mask of ports\n"); return; } nb_pt = 0; @@ -3202,9 +4690,9 @@ void set_fwd_ports_number(uint16_t nb_pt) { if (nb_pt > nb_cfg_ports) { - printf("nb fwd ports %u > %u (number of configured " - "ports) - ignored\n", - (unsigned int) nb_pt, (unsigned int) nb_cfg_ports); + fprintf(stderr, + "nb fwd ports %u > %u (number of configured ports) - ignored\n", + (unsigned int) nb_pt, (unsigned int) nb_cfg_ports); return; } nb_fwd_ports = (portid_t) nb_pt; @@ -3232,9 +4720,9 @@ void set_nb_pkt_per_burst(uint16_t nb) { if (nb > MAX_PKT_BURST) { - printf("nb pkt per burst: %u > %u (maximum packet per burst) " - " ignored\n", - (unsigned int) nb, (unsigned int) MAX_PKT_BURST); + fprintf(stderr, + "nb pkt per burst: %u > %u (maximum packet per burst) ignored\n", + (unsigned int) nb, (unsigned int) MAX_PKT_BURST); return; } nb_pkt_per_burst = nb; @@ -3265,17 +4753,18 @@ set_tx_pkt_split(const char *name) return; } } - printf("unknown value: \"%s\"\n", name); + fprintf(stderr, "unknown value: \"%s\"\n", name); } int -parse_fec_mode(const char *name, uint32_t *mode) +parse_fec_mode(const char *name, uint32_t *fec_capa) { uint8_t i; for (i = 0; i < RTE_DIM(fec_mode_name); i++) { if (strcmp(fec_mode_name[i].name, name) == 0) { - *mode = RTE_ETH_FEC_MODE_TO_CAPA(fec_mode_name[i].mode); + *fec_capa = + RTE_ETH_FEC_MODE_TO_CAPA(fec_mode_name[i].mode); return 0; } } @@ -3293,7 +4782,7 @@ show_fec_capability(unsigned int num, struct rte_eth_fec_capa *speed_fec_capa) printf("%s : ", rte_eth_link_speed_to_str(speed_fec_capa[i].speed)); - for (j = RTE_ETH_FEC_AUTO; j < RTE_DIM(fec_mode_name); j++) { + for (j = 0; j < RTE_DIM(fec_mode_name); j++) { if (RTE_ETH_FEC_MODE_TO_CAPA(j) & speed_fec_capa[i].capa) printf("%s ", fec_mode_name[j].name); @@ -3302,6 +4791,94 @@ show_fec_capability(unsigned int num, struct rte_eth_fec_capa *speed_fec_capa) } } +void +show_rx_pkt_offsets(void) +{ + uint32_t i, n; + + n = rx_pkt_nb_offs; + printf("Number of offsets: %u\n", n); + if (n) { + printf("Segment offsets: "); + for (i = 0; i != n - 1; i++) + printf("%hu,", rx_pkt_seg_offsets[i]); + printf("%hu\n", rx_pkt_seg_lengths[i]); + } +} + +void +set_rx_pkt_offsets(unsigned int *seg_offsets, unsigned int nb_offs) +{ + unsigned int i; + + if (nb_offs >= MAX_SEGS_BUFFER_SPLIT) { + printf("nb segments per RX packets=%u >= " + "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_offs); + return; + } + + /* + * No extra check here, the segment length will be checked by PMD + * in the extended queue setup. + */ + for (i = 0; i < nb_offs; i++) { + if (seg_offsets[i] >= UINT16_MAX) { + printf("offset[%u]=%u > UINT16_MAX - give up\n", + i, seg_offsets[i]); + return; + } + } + + for (i = 0; i < nb_offs; i++) + rx_pkt_seg_offsets[i] = (uint16_t) seg_offsets[i]; + + rx_pkt_nb_offs = (uint8_t) nb_offs; +} + +void +show_rx_pkt_segments(void) +{ + uint32_t i, n; + + n = rx_pkt_nb_segs; + printf("Number of segments: %u\n", n); + if (n) { + printf("Segment sizes: "); + for (i = 0; i != n - 1; i++) + printf("%hu,", rx_pkt_seg_lengths[i]); + printf("%hu\n", rx_pkt_seg_lengths[i]); + } +} + +void +set_rx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs) +{ + unsigned int i; + + if (nb_segs >= MAX_SEGS_BUFFER_SPLIT) { + printf("nb segments per RX packets=%u >= " + "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_segs); + return; + } + + /* + * No extra check here, the segment length will be checked by PMD + * in the extended queue setup. + */ + for (i = 0; i < nb_segs; i++) { + if (seg_lengths[i] >= UINT16_MAX) { + printf("length[%u]=%u > UINT16_MAX - give up\n", + i, seg_lengths[i]); + return; + } + } + + for (i = 0; i < nb_segs; i++) + rx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; + + rx_pkt_nb_segs = (uint8_t) nb_segs; +} + void show_tx_pkt_segments(void) { @@ -3330,13 +4907,15 @@ nb_segs_is_invalid(unsigned int nb_segs) RTE_ETH_FOREACH_DEV(port_id) { for (queue_id = 0; queue_id < nb_txq; queue_id++) { ret = get_tx_ring_size(port_id, queue_id, &ring_size); - - if (ret) - return true; - + if (ret) { + /* Port may not be initialized yet, can't say + * the port is invalid in this stage. + */ + continue; + } if (ring_size < nb_segs) { - printf("nb segments per TX packets=%u >= " - "TX queue(%u) ring_size=%u - ignored\n", + printf("nb segments per TX packets=%u >= TX " + "queue(%u) ring_size=%u - txpkts ignored\n", nb_segs, queue_id, ring_size); return true; } @@ -3347,32 +4926,49 @@ nb_segs_is_invalid(unsigned int nb_segs) } void -set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs) +set_tx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs) { uint16_t tx_pkt_len; - unsigned i; + unsigned int i; + + /* + * For single segment settings failed check is ignored. + * It is a very basic capability to send the single segment + * packets, suppose it is always supported. + */ + if (nb_segs > 1 && nb_segs_is_invalid(nb_segs)) { + fprintf(stderr, + "Tx segment size(%u) is not supported - txpkts ignored\n", + nb_segs); + return; + } - if (nb_segs_is_invalid(nb_segs)) + if (nb_segs > RTE_MAX_SEGS_PER_PKT) { + fprintf(stderr, + "Tx segment size(%u) is bigger than max number of segment(%u)\n", + nb_segs, RTE_MAX_SEGS_PER_PKT); return; + } /* * Check that each segment length is greater or equal than - * the mbuf data sise. + * the mbuf data size. * Check also that the total packet length is greater or equal than the * size of an empty UDP/IP packet (sizeof(struct rte_ether_hdr) + * 20 + 8). */ tx_pkt_len = 0; for (i = 0; i < nb_segs; i++) { - if (seg_lengths[i] > (unsigned) mbuf_data_size) { - printf("length[%u]=%u > mbuf_data_size=%u - give up\n", - i, seg_lengths[i], (unsigned) mbuf_data_size); + if (seg_lengths[i] > mbuf_data_size[0]) { + fprintf(stderr, + "length[%u]=%u > mbuf_data_size=%u - give up\n", + i, seg_lengths[i], mbuf_data_size[0]); return; } tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]); } if (tx_pkt_len < (sizeof(struct rte_ether_hdr) + 20 + 8)) { - printf("total packet length=%u < %d - give up\n", + fprintf(stderr, "total packet length=%u < %d - give up\n", (unsigned) tx_pkt_len, (int)(sizeof(struct rte_ether_hdr) + 20 + 8)); return; @@ -3395,64 +4991,28 @@ show_tx_pkt_times(void) void set_tx_pkt_times(unsigned int *tx_times) { - uint16_t port_id; - int offload_found = 0; - int offset; - int flag; - - static const struct rte_mbuf_dynfield desc_offs = { - .name = RTE_MBUF_DYNFIELD_TIMESTAMP_NAME, - .size = sizeof(uint64_t), - .align = __alignof__(uint64_t), - }; - static const struct rte_mbuf_dynflag desc_flag = { - .name = RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, - }; - - RTE_ETH_FOREACH_DEV(port_id) { - struct rte_eth_dev_info dev_info = { 0 }; - int ret; - - ret = rte_eth_dev_info_get(port_id, &dev_info); - if (ret == 0 && dev_info.tx_offload_capa & - DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP) { - offload_found = 1; - break; - } - } - if (!offload_found) { - printf("No device supporting Tx timestamp scheduling found, " - "dynamic flag and field not registered\n"); - return; - } - offset = rte_mbuf_dynfield_register(&desc_offs); - if (offset < 0 && rte_errno != EEXIST) - printf("Dynamic timestamp field registration error: %d", - rte_errno); - flag = rte_mbuf_dynflag_register(&desc_flag); - if (flag < 0 && rte_errno != EEXIST) - printf("Dynamic timestamp flag registration error: %d", - rte_errno); tx_pkt_times_inter = tx_times[0]; tx_pkt_times_intra = tx_times[1]; } +#ifdef RTE_LIB_GRO void setup_gro(const char *onoff, portid_t port_id) { if (!rte_eth_dev_is_valid_port(port_id)) { - printf("invalid port id %u\n", port_id); + fprintf(stderr, "invalid port id %u\n", port_id); return; } if (test_done == 0) { - printf("Before enable/disable GRO," - " please stop forwarding first\n"); + fprintf(stderr, + "Before enable/disable GRO, please stop forwarding first\n"); return; } if (strcmp(onoff, "on") == 0) { if (gro_ports[port_id].enable != 0) { - printf("Port %u has enabled GRO. Please" - " disable GRO first\n", port_id); + fprintf(stderr, + "Port %u has enabled GRO. Please disable GRO first\n", + port_id); return; } if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { @@ -3465,7 +5025,7 @@ setup_gro(const char *onoff, portid_t port_id) gro_ports[port_id].enable = 1; } else { if (gro_ports[port_id].enable == 0) { - printf("Port %u has disabled GRO\n", port_id); + fprintf(stderr, "Port %u has disabled GRO\n", port_id); return; } gro_ports[port_id].enable = 0; @@ -3476,18 +5036,16 @@ void setup_gro_flush_cycles(uint8_t cycles) { if (test_done == 0) { - printf("Before change flush interval for GRO," - " please stop forwarding first.\n"); + fprintf(stderr, + "Before change flush interval for GRO, please stop forwarding first.\n"); return; } if (cycles > GRO_MAX_FLUSH_CYCLES || cycles < GRO_DEFAULT_FLUSH_CYCLES) { - printf("The flushing cycle be in the range" - " of 1 to %u. Revert to the default" - " value %u.\n", - GRO_MAX_FLUSH_CYCLES, - GRO_DEFAULT_FLUSH_CYCLES); + fprintf(stderr, + "The flushing cycle be in the range of 1 to %u. Revert to the default value %u.\n", + GRO_MAX_FLUSH_CYCLES, GRO_DEFAULT_FLUSH_CYCLES); cycles = GRO_DEFAULT_FLUSH_CYCLES; } @@ -3503,7 +5061,7 @@ show_gro(portid_t port_id) param = &gro_ports[port_id].param; if (!rte_eth_dev_is_valid_port(port_id)) { - printf("Invalid port id %u.\n", port_id); + fprintf(stderr, "Invalid port id %u.\n", port_id); return; } if (gro_ports[port_id].enable) { @@ -3519,30 +5077,33 @@ show_gro(portid_t port_id) } else printf("Port %u doesn't enable GRO.\n", port_id); } +#endif /* RTE_LIB_GRO */ +#ifdef RTE_LIB_GSO void setup_gso(const char *mode, portid_t port_id) { if (!rte_eth_dev_is_valid_port(port_id)) { - printf("invalid port id %u\n", port_id); + fprintf(stderr, "invalid port id %u\n", port_id); return; } if (strcmp(mode, "on") == 0) { if (test_done == 0) { - printf("before enabling GSO," - " please stop forwarding first\n"); + fprintf(stderr, + "before enabling GSO, please stop forwarding first\n"); return; } gso_ports[port_id].enable = 1; } else if (strcmp(mode, "off") == 0) { if (test_done == 0) { - printf("before disabling GSO," - " please stop forwarding first\n"); + fprintf(stderr, + "before disabling GSO, please stop forwarding first\n"); return; } gso_ports[port_id].enable = 0; } } +#endif /* RTE_LIB_GSO */ char* list_pkt_forwarding_modes(void) @@ -3607,7 +5168,7 @@ set_pkt_forwarding_mode(const char *fwd_mode_name) } i++; } - printf("Invalid %s packet forwarding mode\n", fwd_mode_name); + fprintf(stderr, "Invalid %s packet forwarding mode\n", fwd_mode_name); } void @@ -3741,17 +5302,18 @@ vlan_extend_set(portid_t port_id, int on) vlan_offload = rte_eth_dev_get_vlan_offload(port_id); if (on) { - vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD; - port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND; + vlan_offload |= RTE_ETH_VLAN_EXTEND_OFFLOAD; + port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; } else { - vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD; - port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND; + vlan_offload &= ~RTE_ETH_VLAN_EXTEND_OFFLOAD; + port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; } diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); if (diag < 0) { - printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed " - "diag=%d\n", port_id, on, diag); + fprintf(stderr, + "rx_vlan_extend_set(port_pi=%d, on=%d) failed diag=%d\n", + port_id, on, diag); return; } ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; @@ -3770,17 +5332,18 @@ rx_vlan_strip_set(portid_t port_id, int on) vlan_offload = rte_eth_dev_get_vlan_offload(port_id); if (on) { - vlan_offload |= ETH_VLAN_STRIP_OFFLOAD; - port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; + vlan_offload |= RTE_ETH_VLAN_STRIP_OFFLOAD; + port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; } else { - vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD; - port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; + vlan_offload &= ~RTE_ETH_VLAN_STRIP_OFFLOAD; + port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP; } diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); if (diag < 0) { - printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed " - "diag=%d\n", port_id, on, diag); + fprintf(stderr, + "%s(port_pi=%d, on=%d) failed diag=%d\n", + __func__, port_id, on, diag); return; } ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; @@ -3796,8 +5359,9 @@ rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on) diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on); if (diag < 0) - printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed " - "diag=%d\n", port_id, queue_id, on, diag); + fprintf(stderr, + "%s(port_pi=%d, queue_id=%d, on=%d) failed diag=%d\n", + __func__, port_id, queue_id, on, diag); } void @@ -3813,17 +5377,18 @@ rx_vlan_filter_set(portid_t port_id, int on) vlan_offload = rte_eth_dev_get_vlan_offload(port_id); if (on) { - vlan_offload |= ETH_VLAN_FILTER_OFFLOAD; - port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; + vlan_offload |= RTE_ETH_VLAN_FILTER_OFFLOAD; + port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER; } else { - vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD; - port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER; + vlan_offload &= ~RTE_ETH_VLAN_FILTER_OFFLOAD; + port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER; } diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); if (diag < 0) { - printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed " - "diag=%d\n", port_id, on, diag); + fprintf(stderr, + "%s(port_pi=%d, on=%d) failed diag=%d\n", + __func__, port_id, on, diag); return; } ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; @@ -3842,17 +5407,17 @@ rx_vlan_qinq_strip_set(portid_t port_id, int on) vlan_offload = rte_eth_dev_get_vlan_offload(port_id); if (on) { - vlan_offload |= ETH_QINQ_STRIP_OFFLOAD; - port_rx_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP; + vlan_offload |= RTE_ETH_QINQ_STRIP_OFFLOAD; + port_rx_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP; } else { - vlan_offload &= ~ETH_QINQ_STRIP_OFFLOAD; - port_rx_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP; + vlan_offload &= ~RTE_ETH_QINQ_STRIP_OFFLOAD; + port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP; } diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); if (diag < 0) { - printf("%s(port_pi=%d, on=%d) failed " - "diag=%d\n", __func__, port_id, on, diag); + fprintf(stderr, "%s(port_pi=%d, on=%d) failed diag=%d\n", + __func__, port_id, on, diag); return; } ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; @@ -3870,9 +5435,9 @@ rx_vft_set(portid_t port_id, uint16_t vlan_id, int on) diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on); if (diag == 0) return 0; - printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed " - "diag=%d\n", - port_id, vlan_id, on, diag); + fprintf(stderr, + "rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed diag=%d\n", + port_id, vlan_id, on, diag); return -1; } @@ -3901,9 +5466,9 @@ vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id) if (diag == 0) return; - printf("tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed " - "diag=%d\n", - port_id, vlan_type, tp_id, diag); + fprintf(stderr, + "tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed diag=%d\n", + port_id, vlan_type, tp_id, diag); } void @@ -3916,8 +5481,8 @@ tx_vlan_set(portid_t port_id, uint16_t vlan_id) return; if (ports[port_id].dev_conf.txmode.offloads & - DEV_TX_OFFLOAD_QINQ_INSERT) { - printf("Error, as QinQ has been enabled.\n"); + RTE_ETH_TX_OFFLOAD_QINQ_INSERT) { + fprintf(stderr, "Error, as QinQ has been enabled.\n"); return; } @@ -3925,14 +5490,15 @@ tx_vlan_set(portid_t port_id, uint16_t vlan_id) if (ret != 0) return; - if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) == 0) { - printf("Error: vlan insert is not supported by port %d\n", + if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) == 0) { + fprintf(stderr, + "Error: vlan insert is not supported by port %d\n", port_id); return; } tx_vlan_reset(port_id); - ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_VLAN_INSERT; + ports[port_id].dev_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT; ports[port_id].tx_vlan_id = vlan_id; } @@ -3951,15 +5517,16 @@ tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer) if (ret != 0) return; - if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) == 0) { - printf("Error: qinq insert not supported by port %d\n", + if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_QINQ_INSERT) == 0) { + fprintf(stderr, + "Error: qinq insert not supported by port %d\n", port_id); return; } tx_vlan_reset(port_id); - ports[port_id].dev_conf.txmode.offloads |= (DEV_TX_OFFLOAD_VLAN_INSERT | - DEV_TX_OFFLOAD_QINQ_INSERT); + ports[port_id].dev_conf.txmode.offloads |= (RTE_ETH_TX_OFFLOAD_VLAN_INSERT | + RTE_ETH_TX_OFFLOAD_QINQ_INSERT); ports[port_id].tx_vlan_id = vlan_id; ports[port_id].tx_vlan_id_outer = vlan_id_outer; } @@ -3968,8 +5535,8 @@ void tx_vlan_reset(portid_t port_id) { ports[port_id].dev_conf.txmode.offloads &= - ~(DEV_TX_OFFLOAD_VLAN_INSERT | - DEV_TX_OFFLOAD_QINQ_INSERT); + ~(RTE_ETH_TX_OFFLOAD_VLAN_INSERT | + RTE_ETH_TX_OFFLOAD_QINQ_INSERT); ports[port_id].tx_vlan_id = 0; ports[port_id].tx_vlan_id_outer = 0; } @@ -3986,8 +5553,7 @@ tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on) void set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value) { - uint16_t i; - uint8_t existing_mapping_found = 0; + int ret; if (port_id_is_invalid(port_id, ENABLED_WARN)) return; @@ -3996,41 +5562,26 @@ set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value) return; if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) { - printf("map_value not in required range 0..%d\n", - RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); + fprintf(stderr, "map_value not in required range 0..%d\n", + RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); return; } - if (!is_rx) { /*then tx*/ - for (i = 0; i < nb_tx_queue_stats_mappings; i++) { - if ((tx_queue_stats_mappings[i].port_id == port_id) && - (tx_queue_stats_mappings[i].queue_id == queue_id)) { - tx_queue_stats_mappings[i].stats_counter_id = map_value; - existing_mapping_found = 1; - break; - } - } - if (!existing_mapping_found) { /* A new additional mapping... */ - tx_queue_stats_mappings[nb_tx_queue_stats_mappings].port_id = port_id; - tx_queue_stats_mappings[nb_tx_queue_stats_mappings].queue_id = queue_id; - tx_queue_stats_mappings[nb_tx_queue_stats_mappings].stats_counter_id = map_value; - nb_tx_queue_stats_mappings++; - } - } - else { /*rx*/ - for (i = 0; i < nb_rx_queue_stats_mappings; i++) { - if ((rx_queue_stats_mappings[i].port_id == port_id) && - (rx_queue_stats_mappings[i].queue_id == queue_id)) { - rx_queue_stats_mappings[i].stats_counter_id = map_value; - existing_mapping_found = 1; - break; - } + if (!is_rx) { /* tx */ + ret = rte_eth_dev_set_tx_queue_stats_mapping(port_id, queue_id, + map_value); + if (ret) { + fprintf(stderr, + "failed to set tx queue stats mapping.\n"); + return; } - if (!existing_mapping_found) { /* A new additional mapping... */ - rx_queue_stats_mappings[nb_rx_queue_stats_mappings].port_id = port_id; - rx_queue_stats_mappings[nb_rx_queue_stats_mappings].queue_id = queue_id; - rx_queue_stats_mappings[nb_rx_queue_stats_mappings].stats_counter_id = map_value; - nb_rx_queue_stats_mappings++; + } else { /* rx */ + ret = rte_eth_dev_set_rx_queue_stats_mapping(port_id, queue_id, + map_value); + if (ret) { + fprintf(stderr, + "failed to set rx queue stats mapping.\n"); + return; } } } @@ -4053,6 +5604,51 @@ set_record_burst_stats(uint8_t on_off) record_burst_stats = on_off; } +static char* +flowtype_to_str(uint16_t flow_type) +{ + struct flow_type_info { + char str[32]; + uint16_t ftype; + }; + + uint8_t i; + static struct flow_type_info flowtype_str_table[] = { + {"raw", RTE_ETH_FLOW_RAW}, + {"ipv4", RTE_ETH_FLOW_IPV4}, + {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4}, + {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP}, + {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP}, + {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP}, + {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER}, + {"ipv6", RTE_ETH_FLOW_IPV6}, + {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6}, + {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP}, + {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP}, + {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP}, + {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER}, + {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD}, + {"ipv6-ex", RTE_ETH_FLOW_IPV6_EX}, + {"ipv6-tcp-ex", RTE_ETH_FLOW_IPV6_TCP_EX}, + {"ipv6-udp-ex", RTE_ETH_FLOW_IPV6_UDP_EX}, + {"port", RTE_ETH_FLOW_PORT}, + {"vxlan", RTE_ETH_FLOW_VXLAN}, + {"geneve", RTE_ETH_FLOW_GENEVE}, + {"nvgre", RTE_ETH_FLOW_NVGRE}, + {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE}, + {"gtpu", RTE_ETH_FLOW_GTPU}, + }; + + for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { + if (flowtype_str_table[i].ftype == flow_type) + return flowtype_str_table[i].str; + } + + return NULL; +} + +#if defined(RTE_NET_I40E) || defined(RTE_NET_IXGBE) + static inline void print_fdir_mask(struct rte_eth_fdir_masks *mask) { @@ -4112,45 +5708,6 @@ print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) printf("\n"); } -static char * -flowtype_to_str(uint16_t flow_type) -{ - struct flow_type_info { - char str[32]; - uint16_t ftype; - }; - - uint8_t i; - static struct flow_type_info flowtype_str_table[] = { - {"raw", RTE_ETH_FLOW_RAW}, - {"ipv4", RTE_ETH_FLOW_IPV4}, - {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4}, - {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP}, - {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP}, - {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP}, - {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER}, - {"ipv6", RTE_ETH_FLOW_IPV6}, - {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6}, - {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP}, - {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP}, - {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP}, - {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER}, - {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD}, - {"port", RTE_ETH_FLOW_PORT}, - {"vxlan", RTE_ETH_FLOW_VXLAN}, - {"geneve", RTE_ETH_FLOW_GENEVE}, - {"nvgre", RTE_ETH_FLOW_NVGRE}, - {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE}, - }; - - for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { - if (flowtype_str_table[i].ftype == flow_type) - return flowtype_str_table[i].str; - } - - return NULL; -} - static inline void print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) { @@ -4190,25 +5747,16 @@ static int get_fdir_info(portid_t port_id, struct rte_eth_fdir_info *fdir_info, struct rte_eth_fdir_stats *fdir_stat) { - int ret; - - ret = rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR); - if (!ret) { - rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, - RTE_ETH_FILTER_INFO, fdir_info); - rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, - RTE_ETH_FILTER_STATS, fdir_stat); - return 0; - } + int ret = -ENOTSUP; -#ifdef RTE_LIBRTE_I40E_PMD +#ifdef RTE_NET_I40E if (ret == -ENOTSUP) { ret = rte_pmd_i40e_get_fdir_info(port_id, fdir_info); if (!ret) ret = rte_pmd_i40e_get_fdir_stats(port_id, fdir_stat); } #endif -#ifdef RTE_LIBRTE_IXGBE_PMD +#ifdef RTE_NET_IXGBE if (ret == -ENOTSUP) { ret = rte_pmd_ixgbe_get_fdir_info(port_id, fdir_info); if (!ret) @@ -4219,11 +5767,11 @@ get_fdir_info(portid_t port_id, struct rte_eth_fdir_info *fdir_info, case 0: break; case -ENOTSUP: - printf("\n FDIR is not supported on port %-2d\n", + fprintf(stderr, "\n FDIR is not supported on port %-2d\n", port_id); break; default: - printf("programming error: (%s)\n", strerror(-ret)); + fprintf(stderr, "programming error: (%s)\n", strerror(-ret)); break; } return ret; @@ -4297,6 +5845,8 @@ fdir_get_infos(portid_t port_id) fdir_stats_border, fdir_stats_border); } +#endif /* RTE_NET_I40E || RTE_NET_IXGBE */ + void fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg) { @@ -4317,8 +5867,9 @@ fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg) idx = flex_conf->nb_flexmasks; flex_conf->nb_flexmasks++; } else { - printf("The flex mask table is full. Can not set flex" - " mask for flow_type(%u).", cfg->flow_type); + fprintf(stderr, + "The flex mask table is full. Can not set flex mask for flow_type(%u).", + cfg->flow_type); return; } } @@ -4347,8 +5898,9 @@ fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg) idx = flex_conf->nb_payloads; flex_conf->nb_payloads++; } else { - printf("The flex payload table is full. Can not set" - " flex payload for type(%u).", cfg->type); + fprintf(stderr, + "The flex payload table is full. Can not set flex payload for type(%u).", + cfg->type); return; } } @@ -4361,7 +5913,7 @@ fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg) void set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on) { -#ifdef RTE_LIBRTE_IXGBE_PMD +#ifdef RTE_NET_IXGBE int diag; if (is_rx) @@ -4371,12 +5923,13 @@ set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on) if (diag == 0) return; - printf("rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n", - is_rx ? "rx" : "tx", port_id, diag); + fprintf(stderr, + "rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n", + is_rx ? "rx" : "tx", port_id, diag); return; #endif - printf("VF %s setting not supported for port %d\n", - is_rx ? "Rx" : "Tx", port_id); + fprintf(stderr, "VF %s setting not supported for port %d\n", + is_rx ? "Rx" : "Tx", port_id); RTE_SET_USED(vf); RTE_SET_USED(on); } @@ -4393,16 +5946,18 @@ set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate) ret = eth_link_get_nowait_print_err(port_id, &link); if (ret < 0) return 1; - if (link.link_speed != ETH_SPEED_NUM_UNKNOWN && + if (link.link_speed != RTE_ETH_SPEED_NUM_UNKNOWN && rate > link.link_speed) { - printf("Invalid rate value:%u bigger than link speed: %u\n", + fprintf(stderr, + "Invalid rate value:%u bigger than link speed: %u\n", rate, link.link_speed); return 1; } diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate); if (diag == 0) return diag; - printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n", + fprintf(stderr, + "rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n", port_id, diag); return diag; } @@ -4416,20 +5971,21 @@ set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk) RTE_SET_USED(rate); RTE_SET_USED(q_msk); -#ifdef RTE_LIBRTE_IXGBE_PMD +#ifdef RTE_NET_IXGBE if (diag == -ENOTSUP) diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate, q_msk); #endif -#ifdef RTE_LIBRTE_BNXT_PMD +#ifdef RTE_NET_BNXT if (diag == -ENOTSUP) diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk); #endif if (diag == 0) return diag; - printf("set_vf_rate_limit for port_id=%d failed diag=%d\n", - port_id, diag); + fprintf(stderr, + "%s for port_id=%d failed diag=%d\n", + __func__, port_id, diag); return diag; } @@ -4476,8 +6032,9 @@ mcast_addr_pool_extend(struct rte_port *port) mc_pool = (struct rte_ether_addr *) realloc(port->mc_addr_pool, mc_pool_size); if (mc_pool == NULL) { - printf("allocation of pool of %u multicast addresses failed\n", - port->mc_addr_nb + MCAST_POOL_INC); + fprintf(stderr, + "allocation of pool of %u multicast addresses failed\n", + port->mc_addr_nb + MCAST_POOL_INC); return -ENOMEM; } @@ -4500,7 +6057,7 @@ mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) { port->mc_addr_nb--; if (addr_idx == port->mc_addr_nb) { - /* No need to recompact the set of multicast addressses. */ + /* No need to recompact the set of multicast addresses. */ if (port->mc_addr_nb == 0) { /* free the pool of multicast addresses. */ free(port->mc_addr_pool); @@ -4513,6 +6070,25 @@ mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) sizeof(struct rte_ether_addr) * (port->mc_addr_nb - addr_idx)); } +int +mcast_addr_pool_destroy(portid_t port_id) +{ + struct rte_port *port; + + if (port_id_is_invalid(port_id, ENABLED_WARN) || + port_id == (portid_t)RTE_PORT_ALL) + return -EINVAL; + port = &ports[port_id]; + + if (port->mc_addr_nb != 0) { + /* free the pool of multicast addresses. */ + free(port->mc_addr_pool); + port->mc_addr_pool = NULL; + port->mc_addr_nb = 0; + } + return 0; +} + static int eth_port_multicast_addr_list_set(portid_t port_id) { @@ -4523,7 +6099,8 @@ eth_port_multicast_addr_list_set(portid_t port_id) diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool, port->mc_addr_nb); if (diag < 0) - printf("rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n", + fprintf(stderr, + "rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n", port_id, port->mc_addr_nb, diag); return diag; @@ -4546,7 +6123,8 @@ mcast_addr_add(portid_t port_id, struct rte_ether_addr *mc_addr) */ for (i = 0; i < port->mc_addr_nb; i++) { if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) { - printf("multicast address already filtered by port\n"); + fprintf(stderr, + "multicast address already filtered by port\n"); return; } } @@ -4576,7 +6154,8 @@ mcast_addr_remove(portid_t port_id, struct rte_ether_addr *mc_addr) break; } if (i == port->mc_addr_nb) { - printf("multicast address not filtered by port %d\n", port_id); + fprintf(stderr, "multicast address not filtered by port %d\n", + port_id); return; } @@ -4599,7 +6178,7 @@ port_dcb_info_display(portid_t port_id) ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info); if (ret) { - printf("\n Failed to get dcb infos on port %-2d\n", + fprintf(stderr, "\n Failed to get dcb infos on port %-2d\n", port_id); return; } @@ -4642,34 +6221,34 @@ open_file(const char *file_path, uint32_t *size) *size = 0; if (fd == -1) { - printf("%s: Failed to open %s\n", __func__, file_path); + fprintf(stderr, "%s: Failed to open %s\n", __func__, file_path); return buf; } if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) { close(fd); - printf("%s: File operations failed\n", __func__); + fprintf(stderr, "%s: File operations failed\n", __func__); return buf; } pkg_size = st_buf.st_size; if (pkg_size < 0) { close(fd); - printf("%s: File operations failed\n", __func__); + fprintf(stderr, "%s: File operations failed\n", __func__); return buf; } buf = (uint8_t *)malloc(pkg_size); if (!buf) { close(fd); - printf("%s: Failed to malloc memory\n", __func__); + fprintf(stderr, "%s: Failed to malloc memory\n", __func__); return buf; } ret = read(fd, buf, pkg_size); if (ret < 0) { close(fd); - printf("%s: File read operation failed\n", __func__); + fprintf(stderr, "%s: File read operation failed\n", __func__); close_file(buf); return NULL; } @@ -4688,13 +6267,13 @@ save_file(const char *file_path, uint8_t *buf, uint32_t size) FILE *fh = fopen(file_path, "wb"); if (fh == NULL) { - printf("%s: Failed to open %s\n", __func__, file_path); + fprintf(stderr, "%s: Failed to open %s\n", __func__, file_path); return -1; } if (fwrite(buf, 1, size, fh) != size) { fclose(fh); - printf("%s: File write operation failed\n", __func__); + fprintf(stderr, "%s: File write operation failed\n", __func__); return -1; } @@ -4717,7 +6296,7 @@ close_file(uint8_t *buf) void port_queue_region_info_display(portid_t port_id, void *buf) { -#ifdef RTE_LIBRTE_I40E_PMD +#ifdef RTE_NET_I40E uint16_t i, j; struct rte_pmd_i40e_queue_regions *info = (struct rte_pmd_i40e_queue_regions *)buf; @@ -4762,19 +6341,20 @@ show_macs(portid_t port_id) { char buf[RTE_ETHER_ADDR_FMT_SIZE]; struct rte_eth_dev_info dev_info; - struct rte_ether_addr *addr; - uint32_t i, num_macs = 0; - struct rte_eth_dev *dev; + int32_t i, rc, num_macs = 0; - dev = &rte_eth_devices[port_id]; + if (eth_dev_info_get_print_err(port_id, &dev_info)) + return; - rte_eth_dev_info_get(port_id, &dev_info); + struct rte_ether_addr addr[dev_info.max_mac_addrs]; + rc = rte_eth_macaddrs_get(port_id, addr, dev_info.max_mac_addrs); + if (rc < 0) + return; - for (i = 0; i < dev_info.max_mac_addrs; i++) { - addr = &dev->data->mac_addrs[i]; + for (i = 0; i < rc; i++) { /* skip zero address */ - if (rte_is_zero_ether_addr(addr)) + if (rte_is_zero_ether_addr(&addr[i])) continue; num_macs++; @@ -4782,14 +6362,13 @@ show_macs(portid_t port_id) printf("Number of MAC address added: %d\n", num_macs); - for (i = 0; i < dev_info.max_mac_addrs; i++) { - addr = &dev->data->mac_addrs[i]; + for (i = 0; i < rc; i++) { /* skip zero address */ - if (rte_is_zero_ether_addr(addr)) + if (rte_is_zero_ether_addr(&addr[i])) continue; - rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr); + rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, &addr[i]); printf(" %s\n", buf); } }