X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=app%2Ftest-pmd%2Fconfig.c;h=2c2ab449b55f88d0c5b12c1d218ca8f18d38c1cb;hb=1f8cc1a388610c348da7e379dbff62f1a28690d1;hp=69c5633e854b3358308deae41f4834ea057e579a;hpb=d3bd79713be1399c51163349b74275862ff4663f;p=dpdk.git diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c index 69c5633e85..2c2ab449b5 100644 --- a/app/test-pmd/config.c +++ b/app/test-pmd/config.c @@ -27,7 +27,6 @@ #include #include #include -#include #include #include #include @@ -38,22 +37,33 @@ #include #include #include +#include #include -#ifdef RTE_LIBRTE_IXGBE_PMD +#ifdef RTE_NET_IXGBE #include #endif -#ifdef RTE_LIBRTE_I40E_PMD +#ifdef RTE_NET_I40E #include #endif -#ifdef RTE_LIBRTE_BNXT_PMD +#ifdef RTE_NET_BNXT #include #endif #include +#include #include "testpmd.h" +#include "cmdline_mtr.h" #define ETHDEV_FWVERS_LEN 32 +#ifdef CLOCK_MONOTONIC_RAW /* Defined in glibc bits/time.h */ +#define CLOCK_TYPE_ID CLOCK_MONOTONIC_RAW +#else +#define CLOCK_TYPE_ID CLOCK_MONOTONIC +#endif + +#define NS_PER_SEC 1E9 + static char *flowtype_to_str(uint16_t flow_type); static const struct { @@ -75,44 +85,87 @@ static const struct { }; const struct rss_type_info rss_type_table[] = { - { "all", ETH_RSS_IP | ETH_RSS_TCP | - ETH_RSS_UDP | ETH_RSS_SCTP | - ETH_RSS_L2_PAYLOAD }, + { "all", RTE_ETH_RSS_ETH | RTE_ETH_RSS_VLAN | RTE_ETH_RSS_IP | RTE_ETH_RSS_TCP | + RTE_ETH_RSS_UDP | RTE_ETH_RSS_SCTP | RTE_ETH_RSS_L2_PAYLOAD | + RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_ESP | RTE_ETH_RSS_AH | RTE_ETH_RSS_PFCP | + RTE_ETH_RSS_GTPU | RTE_ETH_RSS_ECPRI | RTE_ETH_RSS_MPLS}, { "none", 0 }, - { "ipv4", ETH_RSS_IPV4 }, - { "ipv4-frag", ETH_RSS_FRAG_IPV4 }, - { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP }, - { "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP }, - { "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP }, - { "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER }, - { "ipv6", ETH_RSS_IPV6 }, - { "ipv6-frag", ETH_RSS_FRAG_IPV6 }, - { "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP }, - { "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP }, - { "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP }, - { "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER }, - { "l2-payload", ETH_RSS_L2_PAYLOAD }, - { "ipv6-ex", ETH_RSS_IPV6_EX }, - { "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX }, - { "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX }, - { "port", ETH_RSS_PORT }, - { "vxlan", ETH_RSS_VXLAN }, - { "geneve", ETH_RSS_GENEVE }, - { "nvgre", ETH_RSS_NVGRE }, - { "ip", ETH_RSS_IP }, - { "udp", ETH_RSS_UDP }, - { "tcp", ETH_RSS_TCP }, - { "sctp", ETH_RSS_SCTP }, - { "tunnel", ETH_RSS_TUNNEL }, - { "l3-src-only", ETH_RSS_L3_SRC_ONLY }, - { "l3-dst-only", ETH_RSS_L3_DST_ONLY }, - { "l4-src-only", ETH_RSS_L4_SRC_ONLY }, - { "l4-dst-only", ETH_RSS_L4_DST_ONLY }, - { "esp", ETH_RSS_ESP }, - { "ah", ETH_RSS_AH }, + { "eth", RTE_ETH_RSS_ETH }, + { "l2-src-only", RTE_ETH_RSS_L2_SRC_ONLY }, + { "l2-dst-only", RTE_ETH_RSS_L2_DST_ONLY }, + { "vlan", RTE_ETH_RSS_VLAN }, + { "s-vlan", RTE_ETH_RSS_S_VLAN }, + { "c-vlan", RTE_ETH_RSS_C_VLAN }, + { "ipv4", RTE_ETH_RSS_IPV4 }, + { "ipv4-frag", RTE_ETH_RSS_FRAG_IPV4 }, + { "ipv4-tcp", RTE_ETH_RSS_NONFRAG_IPV4_TCP }, + { "ipv4-udp", RTE_ETH_RSS_NONFRAG_IPV4_UDP }, + { "ipv4-sctp", RTE_ETH_RSS_NONFRAG_IPV4_SCTP }, + { "ipv4-other", RTE_ETH_RSS_NONFRAG_IPV4_OTHER }, + { "ipv6", RTE_ETH_RSS_IPV6 }, + { "ipv6-frag", RTE_ETH_RSS_FRAG_IPV6 }, + { "ipv6-tcp", RTE_ETH_RSS_NONFRAG_IPV6_TCP }, + { "ipv6-udp", RTE_ETH_RSS_NONFRAG_IPV6_UDP }, + { "ipv6-sctp", RTE_ETH_RSS_NONFRAG_IPV6_SCTP }, + { "ipv6-other", RTE_ETH_RSS_NONFRAG_IPV6_OTHER }, + { "l2-payload", RTE_ETH_RSS_L2_PAYLOAD }, + { "ipv6-ex", RTE_ETH_RSS_IPV6_EX }, + { "ipv6-tcp-ex", RTE_ETH_RSS_IPV6_TCP_EX }, + { "ipv6-udp-ex", RTE_ETH_RSS_IPV6_UDP_EX }, + { "port", RTE_ETH_RSS_PORT }, + { "vxlan", RTE_ETH_RSS_VXLAN }, + { "geneve", RTE_ETH_RSS_GENEVE }, + { "nvgre", RTE_ETH_RSS_NVGRE }, + { "ip", RTE_ETH_RSS_IP }, + { "udp", RTE_ETH_RSS_UDP }, + { "tcp", RTE_ETH_RSS_TCP }, + { "sctp", RTE_ETH_RSS_SCTP }, + { "tunnel", RTE_ETH_RSS_TUNNEL }, + { "l3-pre32", RTE_ETH_RSS_L3_PRE32 }, + { "l3-pre40", RTE_ETH_RSS_L3_PRE40 }, + { "l3-pre48", RTE_ETH_RSS_L3_PRE48 }, + { "l3-pre56", RTE_ETH_RSS_L3_PRE56 }, + { "l3-pre64", RTE_ETH_RSS_L3_PRE64 }, + { "l3-pre96", RTE_ETH_RSS_L3_PRE96 }, + { "l3-src-only", RTE_ETH_RSS_L3_SRC_ONLY }, + { "l3-dst-only", RTE_ETH_RSS_L3_DST_ONLY }, + { "l4-src-only", RTE_ETH_RSS_L4_SRC_ONLY }, + { "l4-dst-only", RTE_ETH_RSS_L4_DST_ONLY }, + { "esp", RTE_ETH_RSS_ESP }, + { "ah", RTE_ETH_RSS_AH }, + { "l2tpv3", RTE_ETH_RSS_L2TPV3 }, + { "pfcp", RTE_ETH_RSS_PFCP }, + { "pppoe", RTE_ETH_RSS_PPPOE }, + { "gtpu", RTE_ETH_RSS_GTPU }, + { "ecpri", RTE_ETH_RSS_ECPRI }, + { "mpls", RTE_ETH_RSS_MPLS }, + { "ipv4-chksum", RTE_ETH_RSS_IPV4_CHKSUM }, + { "l4-chksum", RTE_ETH_RSS_L4_CHKSUM }, { NULL, 0 }, }; +static const struct { + enum rte_eth_fec_mode mode; + const char *name; +} fec_mode_name[] = { + { + .mode = RTE_ETH_FEC_NOFEC, + .name = "off", + }, + { + .mode = RTE_ETH_FEC_AUTO, + .name = "auto", + }, + { + .mode = RTE_ETH_FEC_BASER, + .name = "baser", + }, + { + .mode = RTE_ETH_FEC_RS, + .name = "rs", + }, +}; + static void print_ethaddr(const char *name, struct rte_ether_addr *eth_addr) { @@ -121,6 +174,65 @@ print_ethaddr(const char *name, struct rte_ether_addr *eth_addr) printf("%s%s", name, buf); } +static void +nic_xstats_display_periodic(portid_t port_id) +{ + struct xstat_display_info *xstats_info; + uint64_t *prev_values, *curr_values; + uint64_t diff_value, value_rate; + struct timespec cur_time; + uint64_t *ids_supp; + size_t ids_supp_sz; + uint64_t diff_ns; + unsigned int i; + int rc; + + xstats_info = &ports[port_id].xstats_info; + + ids_supp_sz = xstats_info->ids_supp_sz; + if (ids_supp_sz == 0) + return; + + printf("\n"); + + ids_supp = xstats_info->ids_supp; + prev_values = xstats_info->prev_values; + curr_values = xstats_info->curr_values; + + rc = rte_eth_xstats_get_by_id(port_id, ids_supp, curr_values, + ids_supp_sz); + if (rc != (int)ids_supp_sz) { + fprintf(stderr, + "Failed to get values of %zu xstats for port %u - return code %d\n", + ids_supp_sz, port_id, rc); + return; + } + + diff_ns = 0; + if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) { + uint64_t ns; + + ns = cur_time.tv_sec * NS_PER_SEC; + ns += cur_time.tv_nsec; + + if (xstats_info->prev_ns != 0) + diff_ns = ns - xstats_info->prev_ns; + xstats_info->prev_ns = ns; + } + + printf("%-31s%-17s%s\n", " ", "Value", "Rate (since last show)"); + for (i = 0; i < ids_supp_sz; i++) { + diff_value = (curr_values[i] > prev_values[i]) ? + (curr_values[i] - prev_values[i]) : 0; + prev_values[i] = curr_values[i]; + value_rate = diff_ns > 0 ? + (double)diff_value / diff_ns * NS_PER_SEC : 0; + + printf(" %-25s%12"PRIu64" %15"PRIu64"\n", + xstats_display[i].name, curr_values[i], value_rate); + } +} + void nic_stats_display(portid_t port_id) { @@ -128,13 +240,12 @@ nic_stats_display(portid_t port_id) static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS]; static uint64_t prev_bytes_rx[RTE_MAX_ETHPORTS]; static uint64_t prev_bytes_tx[RTE_MAX_ETHPORTS]; - static uint64_t prev_cycles[RTE_MAX_ETHPORTS]; + static uint64_t prev_ns[RTE_MAX_ETHPORTS]; + struct timespec cur_time; uint64_t diff_pkts_rx, diff_pkts_tx, diff_bytes_rx, diff_bytes_tx, - diff_cycles; + diff_ns; uint64_t mpps_rx, mpps_tx, mbps_rx, mbps_tx; struct rte_eth_stats stats; - struct rte_port *port = &ports[port_id]; - uint8_t i; static const char *nic_stats_border = "########################"; @@ -146,51 +257,24 @@ nic_stats_display(portid_t port_id) printf("\n %s NIC statistics for port %-2d %s\n", nic_stats_border, port_id, nic_stats_border); - if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { - printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " - "%-"PRIu64"\n", - stats.ipackets, stats.imissed, stats.ibytes); - printf(" RX-errors: %-"PRIu64"\n", stats.ierrors); - printf(" RX-nombuf: %-10"PRIu64"\n", - stats.rx_nombuf); - printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " - "%-"PRIu64"\n", - stats.opackets, stats.oerrors, stats.obytes); - } - else { - printf(" RX-packets: %10"PRIu64" RX-errors: %10"PRIu64 - " RX-bytes: %10"PRIu64"\n", - stats.ipackets, stats.ierrors, stats.ibytes); - printf(" RX-errors: %10"PRIu64"\n", stats.ierrors); - printf(" RX-nombuf: %10"PRIu64"\n", - stats.rx_nombuf); - printf(" TX-packets: %10"PRIu64" TX-errors: %10"PRIu64 - " TX-bytes: %10"PRIu64"\n", - stats.opackets, stats.oerrors, stats.obytes); - } - - if (port->rx_queue_stats_mapping_enabled) { - printf("\n"); - for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { - printf(" Stats reg %2d RX-packets: %10"PRIu64 - " RX-errors: %10"PRIu64 - " RX-bytes: %10"PRIu64"\n", - i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]); - } - } - if (port->tx_queue_stats_mapping_enabled) { - printf("\n"); - for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { - printf(" Stats reg %2d TX-packets: %10"PRIu64 - " TX-bytes: %10"PRIu64"\n", - i, stats.q_opackets[i], stats.q_obytes[i]); - } - } + printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " + "%-"PRIu64"\n", stats.ipackets, stats.imissed, stats.ibytes); + printf(" RX-errors: %-"PRIu64"\n", stats.ierrors); + printf(" RX-nombuf: %-10"PRIu64"\n", stats.rx_nombuf); + printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " + "%-"PRIu64"\n", stats.opackets, stats.oerrors, stats.obytes); + + diff_ns = 0; + if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) { + uint64_t ns; - diff_cycles = prev_cycles[port_id]; - prev_cycles[port_id] = rte_rdtsc(); - if (diff_cycles > 0) - diff_cycles = prev_cycles[port_id] - diff_cycles; + ns = cur_time.tv_sec * NS_PER_SEC; + ns += cur_time.tv_nsec; + + if (prev_ns[port_id] != 0) + diff_ns = ns - prev_ns[port_id]; + prev_ns[port_id] = ns; + } diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ? (stats.ipackets - prev_pkts_rx[port_id]) : 0; @@ -198,10 +282,10 @@ nic_stats_display(portid_t port_id) (stats.opackets - prev_pkts_tx[port_id]) : 0; prev_pkts_rx[port_id] = stats.ipackets; prev_pkts_tx[port_id] = stats.opackets; - mpps_rx = diff_cycles > 0 ? - diff_pkts_rx * rte_get_tsc_hz() / diff_cycles : 0; - mpps_tx = diff_cycles > 0 ? - diff_pkts_tx * rte_get_tsc_hz() / diff_cycles : 0; + mpps_rx = diff_ns > 0 ? + (double)diff_pkts_rx / diff_ns * NS_PER_SEC : 0; + mpps_tx = diff_ns > 0 ? + (double)diff_pkts_tx / diff_ns * NS_PER_SEC : 0; diff_bytes_rx = (stats.ibytes > prev_bytes_rx[port_id]) ? (stats.ibytes - prev_bytes_rx[port_id]) : 0; @@ -209,16 +293,19 @@ nic_stats_display(portid_t port_id) (stats.obytes - prev_bytes_tx[port_id]) : 0; prev_bytes_rx[port_id] = stats.ibytes; prev_bytes_tx[port_id] = stats.obytes; - mbps_rx = diff_cycles > 0 ? - diff_bytes_rx * rte_get_tsc_hz() / diff_cycles : 0; - mbps_tx = diff_cycles > 0 ? - diff_bytes_tx * rte_get_tsc_hz() / diff_cycles : 0; + mbps_rx = diff_ns > 0 ? + (double)diff_bytes_rx / diff_ns * NS_PER_SEC : 0; + mbps_tx = diff_ns > 0 ? + (double)diff_bytes_tx / diff_ns * NS_PER_SEC : 0; printf("\n Throughput (since last show)\n"); printf(" Rx-pps: %12"PRIu64" Rx-bps: %12"PRIu64"\n Tx-pps: %12" PRIu64" Tx-bps: %12"PRIu64"\n", mpps_rx, mbps_rx * 8, mpps_tx, mbps_tx * 8); + if (xstats_display_num > 0) + nic_xstats_display_periodic(port_id); + printf(" %s############################%s\n", nic_stats_border, nic_stats_border); } @@ -226,11 +313,30 @@ nic_stats_display(portid_t port_id) void nic_stats_clear(portid_t port_id) { + int ret; + if (port_id_is_invalid(port_id, ENABLED_WARN)) { print_valid_ports(); return; } - rte_eth_stats_reset(port_id); + + ret = rte_eth_stats_reset(port_id); + if (ret != 0) { + fprintf(stderr, + "%s: Error: failed to reset stats (port %u): %s", + __func__, port_id, strerror(-ret)); + return; + } + + ret = rte_eth_stats_get(port_id, &ports[port_id].stats); + if (ret != 0) { + if (ret < 0) + ret = -ret; + fprintf(stderr, + "%s: Error: failed to get stats (port %u): %s", + __func__, port_id, strerror(ret)); + return; + } printf("\n NIC statistics for port %d cleared\n", port_id); } @@ -247,26 +353,26 @@ nic_xstats_display(portid_t port_id) } printf("###### NIC extended statistics for port %-2d\n", port_id); if (!rte_eth_dev_is_valid_port(port_id)) { - printf("Error: Invalid port number %i\n", port_id); + fprintf(stderr, "Error: Invalid port number %i\n", port_id); return; } /* Get count */ cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0); if (cnt_xstats < 0) { - printf("Error: Cannot get count of xstats\n"); + fprintf(stderr, "Error: Cannot get count of xstats\n"); return; } /* Get id-name lookup table */ xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats); if (xstats_names == NULL) { - printf("Cannot allocate memory for xstats lookup\n"); + fprintf(stderr, "Cannot allocate memory for xstats lookup\n"); return; } if (cnt_xstats != rte_eth_xstats_get_names( port_id, xstats_names, cnt_xstats)) { - printf("Error: Cannot get xstats lookup\n"); + fprintf(stderr, "Error: Cannot get xstats lookup\n"); free(xstats_names); return; } @@ -274,12 +380,12 @@ nic_xstats_display(portid_t port_id) /* Get stats themselves */ xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats); if (xstats == NULL) { - printf("Cannot allocate memory for xstats\n"); + fprintf(stderr, "Cannot allocate memory for xstats\n"); free(xstats_names); return; } if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) { - printf("Error: Unable to get xstats\n"); + fprintf(stderr, "Error: Unable to get xstats\n"); free(xstats_names); free(xstats); return; @@ -306,59 +412,36 @@ nic_xstats_clear(portid_t port_id) print_valid_ports(); return; } + ret = rte_eth_xstats_reset(port_id); if (ret != 0) { - printf("%s: Error: failed to reset xstats (port %u): %s", - __func__, port_id, strerror(ret)); - } -} - -void -nic_stats_mapping_display(portid_t port_id) -{ - struct rte_port *port = &ports[port_id]; - uint16_t i; - - static const char *nic_stats_mapping_border = "########################"; - - if (port_id_is_invalid(port_id, ENABLED_WARN)) { - print_valid_ports(); + fprintf(stderr, + "%s: Error: failed to reset xstats (port %u): %s\n", + __func__, port_id, strerror(-ret)); return; } - if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { - printf("Port id %d - either does not support queue statistic mapping or" - " no queue statistic mapping set\n", port_id); + ret = rte_eth_stats_get(port_id, &ports[port_id].stats); + if (ret != 0) { + if (ret < 0) + ret = -ret; + fprintf(stderr, "%s: Error: failed to get stats (port %u): %s", + __func__, port_id, strerror(ret)); return; } +} - printf("\n %s NIC statistics mapping for port %-2d %s\n", - nic_stats_mapping_border, port_id, nic_stats_mapping_border); - - if (port->rx_queue_stats_mapping_enabled) { - for (i = 0; i < nb_rx_queue_stats_mappings; i++) { - if (rx_queue_stats_mappings[i].port_id == port_id) { - printf(" RX-queue %2d mapped to Stats Reg %2d\n", - rx_queue_stats_mappings[i].queue_id, - rx_queue_stats_mappings[i].stats_counter_id); - } - } - printf("\n"); - } - - - if (port->tx_queue_stats_mapping_enabled) { - for (i = 0; i < nb_tx_queue_stats_mappings; i++) { - if (tx_queue_stats_mappings[i].port_id == port_id) { - printf(" TX-queue %2d mapped to Stats Reg %2d\n", - tx_queue_stats_mappings[i].queue_id, - tx_queue_stats_mappings[i].stats_counter_id); - } - } - } - - printf(" %s####################################%s\n", - nic_stats_mapping_border, nic_stats_mapping_border); +static const char * +get_queue_state_name(uint8_t queue_state) +{ + if (queue_state == RTE_ETH_QUEUE_STATE_STOPPED) + return "stopped"; + else if (queue_state == RTE_ETH_QUEUE_STATE_STARTED) + return "started"; + else if (queue_state == RTE_ETH_QUEUE_STATE_HAIRPIN) + return "hairpin"; + else + return "unknown"; } void @@ -371,8 +454,8 @@ rx_queue_infos_display(portid_t port_id, uint16_t queue_id) rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo); if (rc != 0) { - printf("Failed to retrieve information for port: %u, " - "RX queue: %hu\nerror desc: %s(%d)\n", + fprintf(stderr, + "Failed to retrieve information for port: %u, RX queue: %hu\nerror desc: %s(%d)\n", port_id, queue_id, strerror(-rc), rc); return; } @@ -391,6 +474,9 @@ rx_queue_infos_display(portid_t port_id, uint16_t queue_id) (qinfo.conf.rx_deferred_start != 0) ? "on" : "off"); printf("\nRX scattered packets: %s", (qinfo.scattered_rx != 0) ? "on" : "off"); + printf("\nRx queue state: %s", get_queue_state_name(qinfo.queue_state)); + if (qinfo.rx_buf_size != 0) + printf("\nRX buffer size: %hu", qinfo.rx_buf_size); printf("\nNumber of RXDs: %hu", qinfo.nb_desc); if (rte_eth_rx_burst_mode_get(port_id, queue_id, &mode) == 0) @@ -412,8 +498,8 @@ tx_queue_infos_display(portid_t port_id, uint16_t queue_id) rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo); if (rc != 0) { - printf("Failed to retrieve information for port: %u, " - "TX queue: %hu\nerror desc: %s(%d)\n", + fprintf(stderr, + "Failed to retrieve information for port: %u, TX queue: %hu\nerror desc: %s(%d)\n", port_id, queue_id, strerror(-rc), rc); return; } @@ -429,6 +515,7 @@ tx_queue_infos_display(portid_t port_id, uint16_t queue_id) printf("\nTX deferred start: %s", (qinfo.conf.tx_deferred_start != 0) ? "on" : "off"); printf("\nNumber of TXDs: %hu", qinfo.nb_desc); + printf("\nTx queue state: %s", get_queue_state_name(qinfo.queue_state)); if (rte_eth_tx_burst_mode_get(port_id, queue_id, &mode) == 0) printf("\nBurst mode: %s%s", @@ -446,6 +533,46 @@ static int bus_match_all(const struct rte_bus *bus, const void *data) return 0; } +static void +device_infos_display_speeds(uint32_t speed_capa) +{ + printf("\n\tDevice speed capability:"); + if (speed_capa == RTE_ETH_LINK_SPEED_AUTONEG) + printf(" Autonegotiate (all speeds)"); + if (speed_capa & RTE_ETH_LINK_SPEED_FIXED) + printf(" Disable autonegotiate (fixed speed) "); + if (speed_capa & RTE_ETH_LINK_SPEED_10M_HD) + printf(" 10 Mbps half-duplex "); + if (speed_capa & RTE_ETH_LINK_SPEED_10M) + printf(" 10 Mbps full-duplex "); + if (speed_capa & RTE_ETH_LINK_SPEED_100M_HD) + printf(" 100 Mbps half-duplex "); + if (speed_capa & RTE_ETH_LINK_SPEED_100M) + printf(" 100 Mbps full-duplex "); + if (speed_capa & RTE_ETH_LINK_SPEED_1G) + printf(" 1 Gbps "); + if (speed_capa & RTE_ETH_LINK_SPEED_2_5G) + printf(" 2.5 Gbps "); + if (speed_capa & RTE_ETH_LINK_SPEED_5G) + printf(" 5 Gbps "); + if (speed_capa & RTE_ETH_LINK_SPEED_10G) + printf(" 10 Gbps "); + if (speed_capa & RTE_ETH_LINK_SPEED_20G) + printf(" 20 Gbps "); + if (speed_capa & RTE_ETH_LINK_SPEED_25G) + printf(" 25 Gbps "); + if (speed_capa & RTE_ETH_LINK_SPEED_40G) + printf(" 40 Gbps "); + if (speed_capa & RTE_ETH_LINK_SPEED_50G) + printf(" 50 Gbps "); + if (speed_capa & RTE_ETH_LINK_SPEED_56G) + printf(" 56 Gbps "); + if (speed_capa & RTE_ETH_LINK_SPEED_100G) + printf(" 100 Gbps "); + if (speed_capa & RTE_ETH_LINK_SPEED_200G) + printf(" 200 Gbps "); +} + void device_infos_display(const char *identifier) { @@ -457,6 +584,7 @@ device_infos_display(const char *identifier) struct rte_device *dev; struct rte_devargs da; portid_t port_id; + struct rte_eth_dev_info dev_info; char devstr[128]; memset(&da, 0, sizeof(da)); @@ -464,9 +592,7 @@ device_infos_display(const char *identifier) goto skip_parse; if (rte_devargs_parsef(&da, "%s", identifier)) { - printf("cannot parse identifier\n"); - if (da.args) - free(da.args); + fprintf(stderr, "cannot parse identifier\n"); return; } @@ -508,10 +634,36 @@ skip_parse: &mac_addr); rte_eth_dev_get_name_by_port(port_id, name); printf("\n\tDevice name: %s", name); + if (rte_eth_dev_info_get(port_id, &dev_info) == 0) + device_infos_display_speeds(dev_info.speed_capa); printf("\n"); } } }; + rte_devargs_reset(&da); +} + +static void +print_dev_capabilities(uint64_t capabilities) +{ + uint64_t single_capa; + int begin; + int end; + int bit; + + if (capabilities == 0) + return; + + begin = __builtin_ctzll(capabilities); + end = sizeof(capabilities) * CHAR_BIT - __builtin_clzll(capabilities); + + single_capa = 1ULL << begin; + for (bit = begin; bit < end; bit++) { + if (capabilities & single_capa) + printf(" %s", + rte_eth_dev_capability_name(single_capa)); + single_capa <<= 1; + } } void @@ -561,7 +713,7 @@ port_infos_display(portid_t port_id) printf("\nConnect to socket: %u", port->socket_id); if (port_numa[port_id] != NUMA_NO_CONFIG) { - mp = mbuf_pool_find(port_numa[port_id]); + mp = mbuf_pool_find(port_numa[port_id], 0); if (mp) printf("\nmemory allocation on the socket: %d", port_numa[port_id]); @@ -569,9 +721,11 @@ port_infos_display(portid_t port_id) printf("\nmemory allocation on the socket: %u",port->socket_id); printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down")); - printf("Link speed: %u Mbps\n", (unsigned) link.link_speed); - printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? + printf("Link speed: %s\n", rte_eth_link_speed_to_str(link.link_speed)); + printf("Link duplex: %s\n", (link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ? ("full-duplex") : ("half-duplex")); + printf("Autoneg status: %s\n", (link.link_autoneg == RTE_ETH_LINK_AUTONEG) ? + ("On") : ("Off")); if (!rte_eth_dev_get_mtu(port_id, &mtu)) printf("MTU: %u\n", mtu); @@ -588,22 +742,22 @@ port_infos_display(portid_t port_id) vlan_offload = rte_eth_dev_get_vlan_offload(port_id); if (vlan_offload >= 0){ printf("VLAN offload: \n"); - if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD) + if (vlan_offload & RTE_ETH_VLAN_STRIP_OFFLOAD) printf(" strip on, "); else printf(" strip off, "); - if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD) + if (vlan_offload & RTE_ETH_VLAN_FILTER_OFFLOAD) printf("filter on, "); else printf("filter off, "); - if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) + if (vlan_offload & RTE_ETH_VLAN_EXTEND_OFFLOAD) printf("extend on, "); else printf("extend off, "); - if (vlan_offload & ETH_QINQ_STRIP_OFFLOAD) + if (vlan_offload & RTE_ETH_QINQ_STRIP_OFFLOAD) printf("qinq strip on\n"); else printf("qinq strip off\n"); @@ -663,6 +817,9 @@ port_infos_display(portid_t port_id) printf("Max segment number per MTU/TSO: %hu\n", dev_info.tx_desc_lim.nb_mtu_seg_max); + printf("Device capabilities: 0x%"PRIx64"(", dev_info.dev_capa); + print_dev_capabilities(dev_info.dev_capa); + printf(" )\n"); /* Show switch info only if valid switch domain and port id is set */ if (dev_info.switch_info.domain_id != RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { @@ -673,6 +830,9 @@ port_infos_display(portid_t port_id) dev_info.switch_info.domain_id); printf("Switch Port Id: %u\n", dev_info.switch_info.port_id); + if ((dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) != 0) + printf("Switch Rx domain: %u\n", + dev_info.switch_info.rx_domain); } } @@ -714,283 +874,129 @@ port_summary_display(portid_t port_id) if (ret != 0) return; - printf("%-4d %02X:%02X:%02X:%02X:%02X:%02X %-12s %-14s %-8s %uMbps\n", - port_id, mac_addr.addr_bytes[0], mac_addr.addr_bytes[1], - mac_addr.addr_bytes[2], mac_addr.addr_bytes[3], - mac_addr.addr_bytes[4], mac_addr.addr_bytes[5], name, + printf("%-4d " RTE_ETHER_ADDR_PRT_FMT " %-12s %-14s %-8s %s\n", + port_id, RTE_ETHER_ADDR_BYTES(&mac_addr), name, dev_info.driver_name, (link.link_status) ? ("up") : ("down"), - (unsigned int) link.link_speed); + rte_eth_link_speed_to_str(link.link_speed)); } void -port_offload_cap_display(portid_t port_id) +port_eeprom_display(portid_t port_id) { - struct rte_eth_dev_info dev_info; - static const char *info_border = "************"; + struct rte_dev_eeprom_info einfo; int ret; - - if (port_id_is_invalid(port_id, ENABLED_WARN)) - return; - - ret = eth_dev_info_get_print_err(port_id, &dev_info); - if (ret != 0) + if (port_id_is_invalid(port_id, ENABLED_WARN)) { + print_valid_ports(); return; - - printf("\n%s Port %d supported offload features: %s\n", - info_border, port_id, info_border); - - if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_STRIP) { - printf("VLAN stripped: "); - if (ports[port_id].dev_conf.rxmode.offloads & - DEV_RX_OFFLOAD_VLAN_STRIP) - printf("on\n"); - else - printf("off\n"); - } - - if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_QINQ_STRIP) { - printf("Double VLANs stripped: "); - if (ports[port_id].dev_conf.rxmode.offloads & - DEV_RX_OFFLOAD_QINQ_STRIP) - printf("on\n"); - else - printf("off\n"); - } - - if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_IPV4_CKSUM) { - printf("RX IPv4 checksum: "); - if (ports[port_id].dev_conf.rxmode.offloads & - DEV_RX_OFFLOAD_IPV4_CKSUM) - printf("on\n"); - else - printf("off\n"); - } - - if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_UDP_CKSUM) { - printf("RX UDP checksum: "); - if (ports[port_id].dev_conf.rxmode.offloads & - DEV_RX_OFFLOAD_UDP_CKSUM) - printf("on\n"); - else - printf("off\n"); - } - - if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_CKSUM) { - printf("RX TCP checksum: "); - if (ports[port_id].dev_conf.rxmode.offloads & - DEV_RX_OFFLOAD_TCP_CKSUM) - printf("on\n"); - else - printf("off\n"); - } - - if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SCTP_CKSUM) { - printf("RX SCTP checksum: "); - if (ports[port_id].dev_conf.rxmode.offloads & - DEV_RX_OFFLOAD_SCTP_CKSUM) - printf("on\n"); - else - printf("off\n"); } - if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) { - printf("RX Outer IPv4 checksum: "); - if (ports[port_id].dev_conf.rxmode.offloads & - DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) - printf("on\n"); - else - printf("off\n"); - } - - if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_UDP_CKSUM) { - printf("RX Outer UDP checksum: "); - if (ports[port_id].dev_conf.rxmode.offloads & - DEV_RX_OFFLOAD_OUTER_UDP_CKSUM) - printf("on\n"); - else - printf("off\n"); - } - - if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO) { - printf("Large receive offload: "); - if (ports[port_id].dev_conf.rxmode.offloads & - DEV_RX_OFFLOAD_TCP_LRO) - printf("on\n"); - else - printf("off\n"); - } - - if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TIMESTAMP) { - printf("HW timestamp: "); - if (ports[port_id].dev_conf.rxmode.offloads & - DEV_RX_OFFLOAD_TIMESTAMP) - printf("on\n"); - else - printf("off\n"); - } - - if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_KEEP_CRC) { - printf("Rx Keep CRC: "); - if (ports[port_id].dev_conf.rxmode.offloads & - DEV_RX_OFFLOAD_KEEP_CRC) - printf("on\n"); - else - printf("off\n"); - } - - if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SECURITY) { - printf("RX offload security: "); - if (ports[port_id].dev_conf.rxmode.offloads & - DEV_RX_OFFLOAD_SECURITY) - printf("on\n"); - else - printf("off\n"); - } - - if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) { - printf("VLAN insert: "); - if (ports[port_id].dev_conf.txmode.offloads & - DEV_TX_OFFLOAD_VLAN_INSERT) - printf("on\n"); - else - printf("off\n"); - } - - if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) { - printf("Double VLANs insert: "); - if (ports[port_id].dev_conf.txmode.offloads & - DEV_TX_OFFLOAD_QINQ_INSERT) - printf("on\n"); - else - printf("off\n"); - } - - if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) { - printf("TX IPv4 checksum: "); - if (ports[port_id].dev_conf.txmode.offloads & - DEV_TX_OFFLOAD_IPV4_CKSUM) - printf("on\n"); - else - printf("off\n"); - } - - if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) { - printf("TX UDP checksum: "); - if (ports[port_id].dev_conf.txmode.offloads & - DEV_TX_OFFLOAD_UDP_CKSUM) - printf("on\n"); - else - printf("off\n"); - } - - if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) { - printf("TX TCP checksum: "); - if (ports[port_id].dev_conf.txmode.offloads & - DEV_TX_OFFLOAD_TCP_CKSUM) - printf("on\n"); - else - printf("off\n"); - } - - if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM) { - printf("TX SCTP checksum: "); - if (ports[port_id].dev_conf.txmode.offloads & - DEV_TX_OFFLOAD_SCTP_CKSUM) - printf("on\n"); - else - printf("off\n"); - } - - if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) { - printf("TX Outer IPv4 checksum: "); - if (ports[port_id].dev_conf.txmode.offloads & - DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) - printf("on\n"); - else - printf("off\n"); - } - - if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) { - printf("TX TCP segmentation: "); - if (ports[port_id].dev_conf.txmode.offloads & - DEV_TX_OFFLOAD_TCP_TSO) - printf("on\n"); - else - printf("off\n"); + int len_eeprom = rte_eth_dev_get_eeprom_length(port_id); + if (len_eeprom < 0) { + switch (len_eeprom) { + case -ENODEV: + fprintf(stderr, "port index %d invalid\n", port_id); + break; + case -ENOTSUP: + fprintf(stderr, "operation not supported by device\n"); + break; + case -EIO: + fprintf(stderr, "device is removed\n"); + break; + default: + fprintf(stderr, "Unable to get EEPROM: %d\n", + len_eeprom); + break; + } + return; } - if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TSO) { - printf("TX UDP segmentation: "); - if (ports[port_id].dev_conf.txmode.offloads & - DEV_TX_OFFLOAD_UDP_TSO) - printf("on\n"); - else - printf("off\n"); - } + char buf[len_eeprom]; + einfo.offset = 0; + einfo.length = len_eeprom; + einfo.data = buf; - if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO) { - printf("TSO for VXLAN tunnel packet: "); - if (ports[port_id].dev_conf.txmode.offloads & - DEV_TX_OFFLOAD_VXLAN_TNL_TSO) - printf("on\n"); - else - printf("off\n"); + ret = rte_eth_dev_get_eeprom(port_id, &einfo); + if (ret != 0) { + switch (ret) { + case -ENODEV: + fprintf(stderr, "port index %d invalid\n", port_id); + break; + case -ENOTSUP: + fprintf(stderr, "operation not supported by device\n"); + break; + case -EIO: + fprintf(stderr, "device is removed\n"); + break; + default: + fprintf(stderr, "Unable to get EEPROM: %d\n", ret); + break; + } + return; } + rte_hexdump(stdout, "hexdump", einfo.data, einfo.length); + printf("Finish -- Port: %d EEPROM length: %d bytes\n", port_id, len_eeprom); +} - if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GRE_TNL_TSO) { - printf("TSO for GRE tunnel packet: "); - if (ports[port_id].dev_conf.txmode.offloads & - DEV_TX_OFFLOAD_GRE_TNL_TSO) - printf("on\n"); - else - printf("off\n"); - } +void +port_module_eeprom_display(portid_t port_id) +{ + struct rte_eth_dev_module_info minfo; + struct rte_dev_eeprom_info einfo; + int ret; - if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPIP_TNL_TSO) { - printf("TSO for IPIP tunnel packet: "); - if (ports[port_id].dev_conf.txmode.offloads & - DEV_TX_OFFLOAD_IPIP_TNL_TSO) - printf("on\n"); - else - printf("off\n"); + if (port_id_is_invalid(port_id, ENABLED_WARN)) { + print_valid_ports(); + return; } - if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO) { - printf("TSO for GENEVE tunnel packet: "); - if (ports[port_id].dev_conf.txmode.offloads & - DEV_TX_OFFLOAD_GENEVE_TNL_TSO) - printf("on\n"); - else - printf("off\n"); - } - if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IP_TNL_TSO) { - printf("IP tunnel TSO: "); - if (ports[port_id].dev_conf.txmode.offloads & - DEV_TX_OFFLOAD_IP_TNL_TSO) - printf("on\n"); - else - printf("off\n"); + ret = rte_eth_dev_get_module_info(port_id, &minfo); + if (ret != 0) { + switch (ret) { + case -ENODEV: + fprintf(stderr, "port index %d invalid\n", port_id); + break; + case -ENOTSUP: + fprintf(stderr, "operation not supported by device\n"); + break; + case -EIO: + fprintf(stderr, "device is removed\n"); + break; + default: + fprintf(stderr, "Unable to get module EEPROM: %d\n", + ret); + break; + } + return; } - if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TNL_TSO) { - printf("UDP tunnel TSO: "); - if (ports[port_id].dev_conf.txmode.offloads & - DEV_TX_OFFLOAD_UDP_TNL_TSO) - printf("on\n"); - else - printf("off\n"); - } + char buf[minfo.eeprom_len]; + einfo.offset = 0; + einfo.length = minfo.eeprom_len; + einfo.data = buf; - if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) { - printf("TX Outer UDP checksum: "); - if (ports[port_id].dev_conf.txmode.offloads & - DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) - printf("on\n"); - else - printf("off\n"); + ret = rte_eth_dev_get_module_eeprom(port_id, &einfo); + if (ret != 0) { + switch (ret) { + case -ENODEV: + fprintf(stderr, "port index %d invalid\n", port_id); + break; + case -ENOTSUP: + fprintf(stderr, "operation not supported by device\n"); + break; + case -EIO: + fprintf(stderr, "device is removed\n"); + break; + default: + fprintf(stderr, "Unable to get module EEPROM: %d\n", + ret); + break; + } + return; } + rte_hexdump(stdout, "hexdump", einfo.data, einfo.length); + printf("Finish -- Port: %d MODULE EEPROM length: %d bytes\n", port_id, einfo.length); } int @@ -1006,7 +1012,7 @@ port_id_is_invalid(portid_t port_id, enum print_warning warning) return 0; if (warning == ENABLED_WARN) - printf("Invalid port %d\n", port_id); + fprintf(stderr, "Invalid port %d\n", port_id); return 1; } @@ -1027,7 +1033,7 @@ vlan_id_is_invalid(uint16_t vlan_id) { if (vlan_id < 4096) return 0; - printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id); + fprintf(stderr, "Invalid vlan_id %d (must be < 4096)\n", vlan_id); return 1; } @@ -1039,14 +1045,14 @@ port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off) uint64_t pci_len; if (reg_off & 0x3) { - printf("Port register offset 0x%X not aligned on a 4-byte " - "boundary\n", - (unsigned)reg_off); + fprintf(stderr, + "Port register offset 0x%X not aligned on a 4-byte boundary\n", + (unsigned int)reg_off); return 1; } if (!ports[port_id].dev_info.device) { - printf("Invalid device\n"); + fprintf(stderr, "Invalid device\n"); return 0; } @@ -1054,15 +1060,16 @@ port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off) if (bus && !strcmp(bus->name, "pci")) { pci_dev = RTE_DEV_TO_PCI(ports[port_id].dev_info.device); } else { - printf("Not a PCI device\n"); + fprintf(stderr, "Not a PCI device\n"); return 1; } pci_len = pci_dev->mem_resource[0].len; if (reg_off >= pci_len) { - printf("Port %d: register offset %u (0x%X) out of port PCI " - "resource (length=%"PRIu64")\n", - port_id, (unsigned)reg_off, (unsigned)reg_off, pci_len); + fprintf(stderr, + "Port %d: register offset %u (0x%X) out of port PCI resource (length=%"PRIu64")\n", + port_id, (unsigned int)reg_off, (unsigned int)reg_off, + pci_len); return 1; } return 0; @@ -1073,7 +1080,7 @@ reg_bit_pos_is_invalid(uint8_t bit_pos) { if (bit_pos <= 31) return 0; - printf("Invalid bit position %d (must be <= 31)\n", bit_pos); + fprintf(stderr, "Invalid bit position %d (must be <= 31)\n", bit_pos); return 1; } @@ -1160,7 +1167,8 @@ port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos, if (reg_bit_pos_is_invalid(bit_pos)) return; if (bit_v > 1) { - printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v); + fprintf(stderr, "Invalid bit value %d (must be 0 or 1)\n", + (int) bit_v); return; } reg_v = port_id_pci_reg_read(port_id, reg_off); @@ -1200,7 +1208,7 @@ port_reg_bit_field_set(portid_t port_id, uint32_t reg_off, max_v = 0xFFFFFFFF; if (value > max_v) { - printf("Invalid value %u (0x%x) must be < %u (0x%x)\n", + fprintf(stderr, "Invalid value %u (0x%x) must be < %u (0x%x)\n", (unsigned)value, (unsigned)value, (unsigned)max_v, (unsigned)max_v); return; @@ -1226,68 +1234,169 @@ port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v) void port_mtu_set(portid_t port_id, uint16_t mtu) { + struct rte_port *port = &ports[port_id]; int diag; - struct rte_port *rte_port = &ports[port_id]; - struct rte_eth_dev_info dev_info; - uint16_t eth_overhead; - int ret; if (port_id_is_invalid(port_id, ENABLED_WARN)) return; - ret = eth_dev_info_get_print_err(port_id, &dev_info); - if (ret != 0) - return; - - if (mtu > dev_info.max_mtu || mtu < dev_info.min_mtu) { - printf("Set MTU failed. MTU:%u is not in valid range, min:%u - max:%u\n", - mtu, dev_info.min_mtu, dev_info.max_mtu); - return; - } - diag = rte_eth_dev_set_mtu(port_id, mtu); - if (diag == 0 && - dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) { - /* - * Ether overhead in driver is equal to the difference of - * max_rx_pktlen and max_mtu in rte_eth_dev_info when the - * device supports jumbo frame. - */ - eth_overhead = dev_info.max_rx_pktlen - dev_info.max_mtu; - if (mtu > RTE_ETHER_MAX_LEN - eth_overhead) { - rte_port->dev_conf.rxmode.offloads |= - DEV_RX_OFFLOAD_JUMBO_FRAME; - rte_port->dev_conf.rxmode.max_rx_pkt_len = - mtu + eth_overhead; - } else - rte_port->dev_conf.rxmode.offloads &= - ~DEV_RX_OFFLOAD_JUMBO_FRAME; - - return; + if (port->need_reconfig == 0) { + diag = rte_eth_dev_set_mtu(port_id, mtu); + if (diag != 0) { + fprintf(stderr, "Set MTU failed. diag=%d\n", diag); + return; + } } - printf("Set MTU failed. diag=%d\n", diag); + + port->dev_conf.rxmode.mtu = mtu; } /* Generic flow management functions. */ -/** Generate a port_flow entry from attributes/pattern/actions. */ -static struct port_flow * -port_flow_new(const struct rte_flow_attr *attr, - const struct rte_flow_item *pattern, - const struct rte_flow_action *actions, - struct rte_flow_error *error) +static struct port_flow_tunnel * +port_flow_locate_tunnel_id(struct rte_port *port, uint32_t port_tunnel_id) { - const struct rte_flow_conv_rule rule = { - .attr_ro = attr, - .pattern_ro = pattern, - .actions_ro = actions, - }; - struct port_flow *pf; - int ret; + struct port_flow_tunnel *flow_tunnel; - ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, error); - if (ret < 0) - return NULL; - pf = calloc(1, offsetof(struct port_flow, rule) + ret); + LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) { + if (flow_tunnel->id == port_tunnel_id) + goto out; + } + flow_tunnel = NULL; + +out: + return flow_tunnel; +} + +const char * +port_flow_tunnel_type(struct rte_flow_tunnel *tunnel) +{ + const char *type; + switch (tunnel->type) { + default: + type = "unknown"; + break; + case RTE_FLOW_ITEM_TYPE_VXLAN: + type = "vxlan"; + break; + case RTE_FLOW_ITEM_TYPE_GRE: + type = "gre"; + break; + case RTE_FLOW_ITEM_TYPE_NVGRE: + type = "nvgre"; + break; + case RTE_FLOW_ITEM_TYPE_GENEVE: + type = "geneve"; + break; + } + + return type; +} + +struct port_flow_tunnel * +port_flow_locate_tunnel(uint16_t port_id, struct rte_flow_tunnel *tun) +{ + struct rte_port *port = &ports[port_id]; + struct port_flow_tunnel *flow_tunnel; + + LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) { + if (!memcmp(&flow_tunnel->tunnel, tun, sizeof(*tun))) + goto out; + } + flow_tunnel = NULL; + +out: + return flow_tunnel; +} + +void port_flow_tunnel_list(portid_t port_id) +{ + struct rte_port *port = &ports[port_id]; + struct port_flow_tunnel *flt; + + LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { + printf("port %u tunnel #%u type=%s", + port_id, flt->id, port_flow_tunnel_type(&flt->tunnel)); + if (flt->tunnel.tun_id) + printf(" id=%" PRIu64, flt->tunnel.tun_id); + printf("\n"); + } +} + +void port_flow_tunnel_destroy(portid_t port_id, uint32_t tunnel_id) +{ + struct rte_port *port = &ports[port_id]; + struct port_flow_tunnel *flt; + + LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { + if (flt->id == tunnel_id) + break; + } + if (flt) { + LIST_REMOVE(flt, chain); + free(flt); + printf("port %u: flow tunnel #%u destroyed\n", + port_id, tunnel_id); + } +} + +void port_flow_tunnel_create(portid_t port_id, const struct tunnel_ops *ops) +{ + struct rte_port *port = &ports[port_id]; + enum rte_flow_item_type type; + struct port_flow_tunnel *flt; + + if (!strcmp(ops->type, "vxlan")) + type = RTE_FLOW_ITEM_TYPE_VXLAN; + else if (!strcmp(ops->type, "gre")) + type = RTE_FLOW_ITEM_TYPE_GRE; + else if (!strcmp(ops->type, "nvgre")) + type = RTE_FLOW_ITEM_TYPE_NVGRE; + else if (!strcmp(ops->type, "geneve")) + type = RTE_FLOW_ITEM_TYPE_GENEVE; + else { + fprintf(stderr, "cannot offload \"%s\" tunnel type\n", + ops->type); + return; + } + LIST_FOREACH(flt, &port->flow_tunnel_list, chain) { + if (flt->tunnel.type == type) + break; + } + if (!flt) { + flt = calloc(1, sizeof(*flt)); + if (!flt) { + fprintf(stderr, "failed to allocate port flt object\n"); + return; + } + flt->tunnel.type = type; + flt->id = LIST_EMPTY(&port->flow_tunnel_list) ? 1 : + LIST_FIRST(&port->flow_tunnel_list)->id + 1; + LIST_INSERT_HEAD(&port->flow_tunnel_list, flt, chain); + } + printf("port %d: flow tunnel #%u type %s\n", + port_id, flt->id, ops->type); +} + +/** Generate a port_flow entry from attributes/pattern/actions. */ +static struct port_flow * +port_flow_new(const struct rte_flow_attr *attr, + const struct rte_flow_item *pattern, + const struct rte_flow_action *actions, + struct rte_flow_error *error) +{ + const struct rte_flow_conv_rule rule = { + .attr_ro = attr, + .pattern_ro = pattern, + .actions_ro = actions, + }; + struct port_flow *pf; + int ret; + + ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, error); + if (ret < 0) + return NULL; + pf = calloc(1, offsetof(struct port_flow, rule) + ret); if (!pf) { rte_flow_error_set (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, @@ -1328,18 +1437,568 @@ port_flow_complain(struct rte_flow_error *error) char buf[32]; int err = rte_errno; - if ((unsigned int)error->type >= RTE_DIM(errstrlist) || - !errstrlist[error->type]) - errstr = "unknown type"; - else - errstr = errstrlist[error->type]; - printf("%s(): Caught PMD error type %d (%s): %s%s: %s\n", __func__, - error->type, errstr, - error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ", - error->cause), buf) : "", - error->message ? error->message : "(no stated reason)", - rte_strerror(err)); - return -err; + if ((unsigned int)error->type >= RTE_DIM(errstrlist) || + !errstrlist[error->type]) + errstr = "unknown type"; + else + errstr = errstrlist[error->type]; + fprintf(stderr, "%s(): Caught PMD error type %d (%s): %s%s: %s\n", + __func__, error->type, errstr, + error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ", + error->cause), buf) : "", + error->message ? error->message : "(no stated reason)", + rte_strerror(err)); + return -err; +} + +static void +rss_config_display(struct rte_flow_action_rss *rss_conf) +{ + uint8_t i; + + if (rss_conf == NULL) { + fprintf(stderr, "Invalid rule\n"); + return; + } + + printf("RSS:\n" + " queues:"); + if (rss_conf->queue_num == 0) + printf(" none"); + for (i = 0; i < rss_conf->queue_num; i++) + printf(" %d", rss_conf->queue[i]); + printf("\n"); + + printf(" function: "); + switch (rss_conf->func) { + case RTE_ETH_HASH_FUNCTION_DEFAULT: + printf("default\n"); + break; + case RTE_ETH_HASH_FUNCTION_TOEPLITZ: + printf("toeplitz\n"); + break; + case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR: + printf("simple_xor\n"); + break; + case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ: + printf("symmetric_toeplitz\n"); + break; + default: + printf("Unknown function\n"); + return; + } + + printf(" types:\n"); + if (rss_conf->types == 0) { + printf(" none\n"); + return; + } + for (i = 0; rss_type_table[i].str; i++) { + if ((rss_conf->types & + rss_type_table[i].rss_type) == + rss_type_table[i].rss_type && + rss_type_table[i].rss_type != 0) + printf(" %s\n", rss_type_table[i].str); + } +} + +static struct port_indirect_action * +action_get_by_id(portid_t port_id, uint32_t id) +{ + struct rte_port *port; + struct port_indirect_action **ppia; + struct port_indirect_action *pia = NULL; + + if (port_id_is_invalid(port_id, ENABLED_WARN) || + port_id == (portid_t)RTE_PORT_ALL) + return NULL; + port = &ports[port_id]; + ppia = &port->actions_list; + while (*ppia) { + if ((*ppia)->id == id) { + pia = *ppia; + break; + } + ppia = &(*ppia)->next; + } + if (!pia) + fprintf(stderr, + "Failed to find indirect action #%u on port %u\n", + id, port_id); + return pia; +} + +static int +action_alloc(portid_t port_id, uint32_t id, + struct port_indirect_action **action) +{ + struct rte_port *port; + struct port_indirect_action **ppia; + struct port_indirect_action *pia = NULL; + + *action = NULL; + if (port_id_is_invalid(port_id, ENABLED_WARN) || + port_id == (portid_t)RTE_PORT_ALL) + return -EINVAL; + port = &ports[port_id]; + if (id == UINT32_MAX) { + /* taking first available ID */ + if (port->actions_list) { + if (port->actions_list->id == UINT32_MAX - 1) { + fprintf(stderr, + "Highest indirect action ID is already assigned, delete it first\n"); + return -ENOMEM; + } + id = port->actions_list->id + 1; + } else { + id = 0; + } + } + pia = calloc(1, sizeof(*pia)); + if (!pia) { + fprintf(stderr, + "Allocation of port %u indirect action failed\n", + port_id); + return -ENOMEM; + } + ppia = &port->actions_list; + while (*ppia && (*ppia)->id > id) + ppia = &(*ppia)->next; + if (*ppia && (*ppia)->id == id) { + fprintf(stderr, + "Indirect action #%u is already assigned, delete it first\n", + id); + free(pia); + return -EINVAL; + } + pia->next = *ppia; + pia->id = id; + *ppia = pia; + *action = pia; + return 0; +} + +/** Create indirect action */ +int +port_action_handle_create(portid_t port_id, uint32_t id, + const struct rte_flow_indir_action_conf *conf, + const struct rte_flow_action *action) +{ + struct port_indirect_action *pia; + int ret; + struct rte_flow_error error; + struct rte_port *port; + + if (port_id_is_invalid(port_id, ENABLED_WARN) || + port_id == (portid_t)RTE_PORT_ALL) + return -EINVAL; + + ret = action_alloc(port_id, id, &pia); + if (ret) + return ret; + + port = &ports[port_id]; + + if (conf->transfer) + port_id = port->flow_transfer_proxy; + + if (port_id_is_invalid(port_id, ENABLED_WARN) || + port_id == (portid_t)RTE_PORT_ALL) + return -EINVAL; + + if (action->type == RTE_FLOW_ACTION_TYPE_AGE) { + struct rte_flow_action_age *age = + (struct rte_flow_action_age *)(uintptr_t)(action->conf); + + pia->age_type = ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION; + age->context = &pia->age_type; + } else if (action->type == RTE_FLOW_ACTION_TYPE_CONNTRACK) { + struct rte_flow_action_conntrack *ct = + (struct rte_flow_action_conntrack *)(uintptr_t)(action->conf); + + memcpy(ct, &conntrack_context, sizeof(*ct)); + } + /* Poisoning to make sure PMDs update it in case of error. */ + memset(&error, 0x22, sizeof(error)); + pia->handle = rte_flow_action_handle_create(port_id, conf, action, + &error); + if (!pia->handle) { + uint32_t destroy_id = pia->id; + port_action_handle_destroy(port_id, 1, &destroy_id); + return port_flow_complain(&error); + } + pia->type = action->type; + pia->transfer = conf->transfer; + printf("Indirect action #%u created\n", pia->id); + return 0; +} + +/** Destroy indirect action */ +int +port_action_handle_destroy(portid_t port_id, + uint32_t n, + const uint32_t *actions) +{ + struct rte_port *port; + struct port_indirect_action **tmp; + uint32_t c = 0; + int ret = 0; + + if (port_id_is_invalid(port_id, ENABLED_WARN) || + port_id == (portid_t)RTE_PORT_ALL) + return -EINVAL; + port = &ports[port_id]; + tmp = &port->actions_list; + while (*tmp) { + uint32_t i; + + for (i = 0; i != n; ++i) { + struct rte_flow_error error; + struct port_indirect_action *pia = *tmp; + portid_t port_id_eff = port_id; + + if (actions[i] != pia->id) + continue; + + if (pia->transfer) + port_id_eff = port->flow_transfer_proxy; + + if (port_id_is_invalid(port_id_eff, ENABLED_WARN) || + port_id_eff == (portid_t)RTE_PORT_ALL) + return -EINVAL; + + /* + * Poisoning to make sure PMDs update it in case + * of error. + */ + memset(&error, 0x33, sizeof(error)); + + if (pia->handle && rte_flow_action_handle_destroy( + port_id_eff, pia->handle, &error)) { + ret = port_flow_complain(&error); + continue; + } + *tmp = pia->next; + printf("Indirect action #%u destroyed\n", pia->id); + free(pia); + break; + } + if (i == n) + tmp = &(*tmp)->next; + ++c; + } + return ret; +} + + +/** Get indirect action by port + id */ +struct rte_flow_action_handle * +port_action_handle_get_by_id(portid_t port_id, uint32_t id) +{ + + struct port_indirect_action *pia = action_get_by_id(port_id, id); + + return (pia) ? pia->handle : NULL; +} + +/** Update indirect action */ +int +port_action_handle_update(portid_t port_id, uint32_t id, + const struct rte_flow_action *action) +{ + struct rte_flow_error error; + struct rte_flow_action_handle *action_handle; + struct port_indirect_action *pia; + struct rte_port *port; + const void *update; + + if (port_id_is_invalid(port_id, ENABLED_WARN) || + port_id == (portid_t)RTE_PORT_ALL) + return -EINVAL; + + port = &ports[port_id]; + + action_handle = port_action_handle_get_by_id(port_id, id); + if (!action_handle) + return -EINVAL; + pia = action_get_by_id(port_id, id); + if (!pia) + return -EINVAL; + switch (pia->type) { + case RTE_FLOW_ACTION_TYPE_CONNTRACK: + update = action->conf; + break; + default: + update = action; + break; + } + + if (pia->transfer) + port_id = port->flow_transfer_proxy; + + if (port_id_is_invalid(port_id, ENABLED_WARN) || + port_id == (portid_t)RTE_PORT_ALL) + return -EINVAL; + + if (rte_flow_action_handle_update(port_id, action_handle, update, + &error)) { + return port_flow_complain(&error); + } + printf("Indirect action #%u updated\n", id); + return 0; +} + +int +port_action_handle_query(portid_t port_id, uint32_t id) +{ + struct rte_flow_error error; + struct port_indirect_action *pia; + union { + struct rte_flow_query_count count; + struct rte_flow_query_age age; + struct rte_flow_action_conntrack ct; + } query; + portid_t port_id_eff = port_id; + struct rte_port *port; + + if (port_id_is_invalid(port_id, ENABLED_WARN) || + port_id == (portid_t)RTE_PORT_ALL) + return -EINVAL; + + port = &ports[port_id]; + + pia = action_get_by_id(port_id, id); + if (!pia) + return -EINVAL; + switch (pia->type) { + case RTE_FLOW_ACTION_TYPE_AGE: + case RTE_FLOW_ACTION_TYPE_COUNT: + break; + default: + fprintf(stderr, + "Indirect action %u (type: %d) on port %u doesn't support query\n", + id, pia->type, port_id); + return -ENOTSUP; + } + + if (pia->transfer) + port_id_eff = port->flow_transfer_proxy; + + if (port_id_is_invalid(port_id_eff, ENABLED_WARN) || + port_id_eff == (portid_t)RTE_PORT_ALL) + return -EINVAL; + + /* Poisoning to make sure PMDs update it in case of error. */ + memset(&error, 0x55, sizeof(error)); + memset(&query, 0, sizeof(query)); + if (rte_flow_action_handle_query(port_id_eff, pia->handle, &query, + &error)) + return port_flow_complain(&error); + switch (pia->type) { + case RTE_FLOW_ACTION_TYPE_AGE: + printf("Indirect AGE action:\n" + " aged: %u\n" + " sec_since_last_hit_valid: %u\n" + " sec_since_last_hit: %" PRIu32 "\n", + query.age.aged, + query.age.sec_since_last_hit_valid, + query.age.sec_since_last_hit); + break; + case RTE_FLOW_ACTION_TYPE_COUNT: + printf("Indirect COUNT action:\n" + " hits_set: %u\n" + " bytes_set: %u\n" + " hits: %" PRIu64 "\n" + " bytes: %" PRIu64 "\n", + query.count.hits_set, + query.count.bytes_set, + query.count.hits, + query.count.bytes); + break; + case RTE_FLOW_ACTION_TYPE_CONNTRACK: + printf("Conntrack Context:\n" + " Peer: %u, Flow dir: %s, Enable: %u\n" + " Live: %u, SACK: %u, CACK: %u\n" + " Packet dir: %s, Liberal: %u, State: %u\n" + " Factor: %u, Retrans: %u, TCP flags: %u\n" + " Last Seq: %u, Last ACK: %u\n" + " Last Win: %u, Last End: %u\n", + query.ct.peer_port, + query.ct.is_original_dir ? "Original" : "Reply", + query.ct.enable, query.ct.live_connection, + query.ct.selective_ack, query.ct.challenge_ack_passed, + query.ct.last_direction ? "Original" : "Reply", + query.ct.liberal_mode, query.ct.state, + query.ct.max_ack_window, query.ct.retransmission_limit, + query.ct.last_index, query.ct.last_seq, + query.ct.last_ack, query.ct.last_window, + query.ct.last_end); + printf(" Original Dir:\n" + " scale: %u, fin: %u, ack seen: %u\n" + " unacked data: %u\n Sent end: %u," + " Reply end: %u, Max win: %u, Max ACK: %u\n", + query.ct.original_dir.scale, + query.ct.original_dir.close_initiated, + query.ct.original_dir.last_ack_seen, + query.ct.original_dir.data_unacked, + query.ct.original_dir.sent_end, + query.ct.original_dir.reply_end, + query.ct.original_dir.max_win, + query.ct.original_dir.max_ack); + printf(" Reply Dir:\n" + " scale: %u, fin: %u, ack seen: %u\n" + " unacked data: %u\n Sent end: %u," + " Reply end: %u, Max win: %u, Max ACK: %u\n", + query.ct.reply_dir.scale, + query.ct.reply_dir.close_initiated, + query.ct.reply_dir.last_ack_seen, + query.ct.reply_dir.data_unacked, + query.ct.reply_dir.sent_end, + query.ct.reply_dir.reply_end, + query.ct.reply_dir.max_win, + query.ct.reply_dir.max_ack); + break; + default: + fprintf(stderr, + "Indirect action %u (type: %d) on port %u doesn't support query\n", + id, pia->type, port_id); + break; + } + return 0; +} + +static struct port_flow_tunnel * +port_flow_tunnel_offload_cmd_prep(portid_t port_id, + const struct rte_flow_item *pattern, + const struct rte_flow_action *actions, + const struct tunnel_ops *tunnel_ops) +{ + int ret; + struct rte_port *port; + struct port_flow_tunnel *pft; + struct rte_flow_error error; + + port = &ports[port_id]; + pft = port_flow_locate_tunnel_id(port, tunnel_ops->id); + if (!pft) { + fprintf(stderr, "failed to locate port flow tunnel #%u\n", + tunnel_ops->id); + return NULL; + } + if (tunnel_ops->actions) { + uint32_t num_actions; + const struct rte_flow_action *aptr; + + ret = rte_flow_tunnel_decap_set(port_id, &pft->tunnel, + &pft->pmd_actions, + &pft->num_pmd_actions, + &error); + if (ret) { + port_flow_complain(&error); + return NULL; + } + for (aptr = actions, num_actions = 1; + aptr->type != RTE_FLOW_ACTION_TYPE_END; + aptr++, num_actions++); + pft->actions = malloc( + (num_actions + pft->num_pmd_actions) * + sizeof(actions[0])); + if (!pft->actions) { + rte_flow_tunnel_action_decap_release( + port_id, pft->actions, + pft->num_pmd_actions, &error); + return NULL; + } + rte_memcpy(pft->actions, pft->pmd_actions, + pft->num_pmd_actions * sizeof(actions[0])); + rte_memcpy(pft->actions + pft->num_pmd_actions, actions, + num_actions * sizeof(actions[0])); + } + if (tunnel_ops->items) { + uint32_t num_items; + const struct rte_flow_item *iptr; + + ret = rte_flow_tunnel_match(port_id, &pft->tunnel, + &pft->pmd_items, + &pft->num_pmd_items, + &error); + if (ret) { + port_flow_complain(&error); + return NULL; + } + for (iptr = pattern, num_items = 1; + iptr->type != RTE_FLOW_ITEM_TYPE_END; + iptr++, num_items++); + pft->items = malloc((num_items + pft->num_pmd_items) * + sizeof(pattern[0])); + if (!pft->items) { + rte_flow_tunnel_item_release( + port_id, pft->pmd_items, + pft->num_pmd_items, &error); + return NULL; + } + rte_memcpy(pft->items, pft->pmd_items, + pft->num_pmd_items * sizeof(pattern[0])); + rte_memcpy(pft->items + pft->num_pmd_items, pattern, + num_items * sizeof(pattern[0])); + } + + return pft; +} + +static void +port_flow_tunnel_offload_cmd_release(portid_t port_id, + const struct tunnel_ops *tunnel_ops, + struct port_flow_tunnel *pft) +{ + struct rte_flow_error error; + + if (tunnel_ops->actions) { + free(pft->actions); + rte_flow_tunnel_action_decap_release( + port_id, pft->pmd_actions, + pft->num_pmd_actions, &error); + pft->actions = NULL; + pft->pmd_actions = NULL; + } + if (tunnel_ops->items) { + free(pft->items); + rte_flow_tunnel_item_release(port_id, pft->pmd_items, + pft->num_pmd_items, + &error); + pft->items = NULL; + pft->pmd_items = NULL; + } +} + +/** Add port meter policy */ +int +port_meter_policy_add(portid_t port_id, uint32_t policy_id, + const struct rte_flow_action *actions) +{ + struct rte_mtr_error error; + const struct rte_flow_action *act = actions; + const struct rte_flow_action *start; + struct rte_mtr_meter_policy_params policy; + uint32_t i = 0, act_n; + int ret; + + for (i = 0; i < RTE_COLORS; i++) { + for (act_n = 0, start = act; + act->type != RTE_FLOW_ACTION_TYPE_END; act++) + act_n++; + if (act_n && act->type == RTE_FLOW_ACTION_TYPE_END) + policy.actions[i] = start; + else + policy.actions[i] = NULL; + act++; + } + ret = rte_mtr_meter_policy_add(port_id, + policy_id, + &policy, &error); + if (ret) + print_mtr_err_msg(&error); + return ret; } /** Validate flow rule. */ @@ -1347,56 +2006,132 @@ int port_flow_validate(portid_t port_id, const struct rte_flow_attr *attr, const struct rte_flow_item *pattern, - const struct rte_flow_action *actions) + const struct rte_flow_action *actions, + const struct tunnel_ops *tunnel_ops) { struct rte_flow_error error; + struct port_flow_tunnel *pft = NULL; + struct rte_port *port; + + if (port_id_is_invalid(port_id, ENABLED_WARN) || + port_id == (portid_t)RTE_PORT_ALL) + return -EINVAL; + + port = &ports[port_id]; + + if (attr->transfer) + port_id = port->flow_transfer_proxy; + + if (port_id_is_invalid(port_id, ENABLED_WARN) || + port_id == (portid_t)RTE_PORT_ALL) + return -EINVAL; /* Poisoning to make sure PMDs update it in case of error. */ memset(&error, 0x11, sizeof(error)); + if (tunnel_ops->enabled) { + pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern, + actions, tunnel_ops); + if (!pft) + return -ENOENT; + if (pft->items) + pattern = pft->items; + if (pft->actions) + actions = pft->actions; + } if (rte_flow_validate(port_id, attr, pattern, actions, &error)) return port_flow_complain(&error); + if (tunnel_ops->enabled) + port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft); printf("Flow rule validated\n"); return 0; } +/** Return age action structure if exists, otherwise NULL. */ +static struct rte_flow_action_age * +age_action_get(const struct rte_flow_action *actions) +{ + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_AGE: + return (struct rte_flow_action_age *) + (uintptr_t)actions->conf; + default: + break; + } + } + return NULL; +} + /** Create flow rule. */ int port_flow_create(portid_t port_id, const struct rte_flow_attr *attr, const struct rte_flow_item *pattern, - const struct rte_flow_action *actions) + const struct rte_flow_action *actions, + const struct tunnel_ops *tunnel_ops) { struct rte_flow *flow; struct rte_port *port; struct port_flow *pf; - uint32_t id; + uint32_t id = 0; struct rte_flow_error error; + struct port_flow_tunnel *pft = NULL; + struct rte_flow_action_age *age = age_action_get(actions); + + if (port_id_is_invalid(port_id, ENABLED_WARN) || + port_id == (portid_t)RTE_PORT_ALL) + return -EINVAL; - /* Poisoning to make sure PMDs update it in case of error. */ - memset(&error, 0x22, sizeof(error)); - flow = rte_flow_create(port_id, attr, pattern, actions, &error); - if (!flow) - return port_flow_complain(&error); port = &ports[port_id]; + + if (attr->transfer) + port_id = port->flow_transfer_proxy; + + if (port_id_is_invalid(port_id, ENABLED_WARN) || + port_id == (portid_t)RTE_PORT_ALL) + return -EINVAL; + if (port->flow_list) { if (port->flow_list->id == UINT32_MAX) { - printf("Highest rule ID is already assigned, delete" - " it first"); - rte_flow_destroy(port_id, flow, NULL); + fprintf(stderr, + "Highest rule ID is already assigned, delete it first"); return -ENOMEM; } id = port->flow_list->id + 1; - } else - id = 0; + } + if (tunnel_ops->enabled) { + pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern, + actions, tunnel_ops); + if (!pft) + return -ENOENT; + if (pft->items) + pattern = pft->items; + if (pft->actions) + actions = pft->actions; + } pf = port_flow_new(attr, pattern, actions, &error); - if (!pf) { - rte_flow_destroy(port_id, flow, NULL); + if (!pf) + return port_flow_complain(&error); + if (age) { + pf->age_type = ACTION_AGE_CONTEXT_TYPE_FLOW; + age->context = &pf->age_type; + } + /* Poisoning to make sure PMDs update it in case of error. */ + memset(&error, 0x22, sizeof(error)); + flow = rte_flow_create(port_id, attr, pattern, actions, &error); + if (!flow) { + if (tunnel_ops->enabled) + port_flow_tunnel_offload_cmd_release(port_id, + tunnel_ops, pft); + free(pf); return port_flow_complain(&error); } pf->next = port->flow_list; pf->id = id; pf->flow = flow; port->flow_list = pf; + if (tunnel_ops->enabled) + port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft); printf("Flow rule #%u created\n", pf->id); return 0; } @@ -1419,6 +2154,7 @@ port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule) uint32_t i; for (i = 0; i != n; ++i) { + portid_t port_id_eff = port_id; struct rte_flow_error error; struct port_flow *pf = *tmp; @@ -1429,7 +2165,15 @@ port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule) * of error. */ memset(&error, 0x33, sizeof(error)); - if (rte_flow_destroy(port_id, pf->flow, &error)) { + + if (pf->rule.attr->transfer) + port_id_eff = port->flow_transfer_proxy; + + if (port_id_is_invalid(port_id_eff, ENABLED_WARN) || + port_id_eff == (portid_t)RTE_PORT_ALL) + return -EINVAL; + + if (rte_flow_destroy(port_id_eff, pf->flow, &error)) { ret = port_flow_complain(&error); continue; } @@ -1453,15 +2197,21 @@ port_flow_flush(portid_t port_id) struct rte_port *port; int ret = 0; + if (port_id_is_invalid(port_id, ENABLED_WARN) || + port_id == (portid_t)RTE_PORT_ALL) + return -EINVAL; + + port = &ports[port_id]; + + if (port->flow_list == NULL) + return ret; + /* Poisoning to make sure PMDs update it in case of error. */ memset(&error, 0x44, sizeof(error)); if (rte_flow_flush(port_id, &error)) { - ret = port_flow_complain(&error); - if (port_id_is_invalid(port_id, DISABLED_WARN) || - port_id == (portid_t)RTE_PORT_ALL) - return ret; + port_flow_complain(&error); } - port = &ports[port_id]; + while (port->flow_list) { struct port_flow *pf = port->flow_list->next; @@ -1471,26 +2221,58 @@ port_flow_flush(portid_t port_id) return ret; } -/** Dump all flow rules. */ +/** Dump flow rules. */ int -port_flow_dump(portid_t port_id, const char *file_name) +port_flow_dump(portid_t port_id, bool dump_all, uint32_t rule_id, + const char *file_name) { int ret = 0; FILE *file = stdout; struct rte_flow_error error; + struct rte_port *port; + struct port_flow *pflow; + struct rte_flow *tmpFlow = NULL; + bool found = false; + + if (port_id_is_invalid(port_id, ENABLED_WARN) || + port_id == (portid_t)RTE_PORT_ALL) + return -EINVAL; + + if (!dump_all) { + port = &ports[port_id]; + pflow = port->flow_list; + while (pflow) { + if (rule_id != pflow->id) { + pflow = pflow->next; + } else { + tmpFlow = pflow->flow; + if (tmpFlow) + found = true; + break; + } + } + if (found == false) { + fprintf(stderr, "Failed to dump to flow %d\n", rule_id); + return -EINVAL; + } + } if (file_name && strlen(file_name)) { file = fopen(file_name, "w"); if (!file) { - printf("Failed to create file %s: %s\n", file_name, - strerror(errno)); + fprintf(stderr, "Failed to create file %s: %s\n", + file_name, strerror(errno)); return -errno; } } - ret = rte_flow_dev_dump(port_id, file, &error); + + if (!dump_all) + ret = rte_flow_dev_dump(port_id, tmpFlow, file, &error); + else + ret = rte_flow_dev_dump(port_id, NULL, file, &error); if (ret) { port_flow_complain(&error); - printf("Failed to dump flow: %s\n", strerror(-ret)); + fprintf(stderr, "Failed to dump flow: %s\n", strerror(-ret)); } else printf("Flow dump finished\n"); if (file_name && strlen(file_name)) @@ -1509,6 +2291,8 @@ port_flow_query(portid_t port_id, uint32_t rule, const char *name; union { struct rte_flow_query_count count; + struct rte_flow_action_rss rss_conf; + struct rte_flow_query_age age; } query; int ret; @@ -1520,9 +2304,17 @@ port_flow_query(portid_t port_id, uint32_t rule, if (pf->id == rule) break; if (!pf) { - printf("Flow rule #%u not found\n", rule); + fprintf(stderr, "Flow rule #%u not found\n", rule); return -ENOENT; } + + if (pf->rule.attr->transfer) + port_id = port->flow_transfer_proxy; + + if (port_id_is_invalid(port_id, ENABLED_WARN) || + port_id == (portid_t)RTE_PORT_ALL) + return -EINVAL; + ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, &name, sizeof(name), (void *)(uintptr_t)action->type, &error); @@ -1530,9 +2322,11 @@ port_flow_query(portid_t port_id, uint32_t rule, return port_flow_complain(&error); switch (action->type) { case RTE_FLOW_ACTION_TYPE_COUNT: + case RTE_FLOW_ACTION_TYPE_RSS: + case RTE_FLOW_ACTION_TYPE_AGE: break; default: - printf("Cannot query action type %d (%s)\n", + fprintf(stderr, "Cannot query action type %d (%s)\n", action->type, name); return -ENOTSUP; } @@ -1554,17 +2348,109 @@ port_flow_query(portid_t port_id, uint32_t rule, query.count.hits, query.count.bytes); break; + case RTE_FLOW_ACTION_TYPE_RSS: + rss_config_display(&query.rss_conf); + break; + case RTE_FLOW_ACTION_TYPE_AGE: + printf("%s:\n" + " aged: %u\n" + " sec_since_last_hit_valid: %u\n" + " sec_since_last_hit: %" PRIu32 "\n", + name, + query.age.aged, + query.age.sec_since_last_hit_valid, + query.age.sec_since_last_hit); + break; default: - printf("Cannot display result for action type %d (%s)\n", - action->type, name); + fprintf(stderr, + "Cannot display result for action type %d (%s)\n", + action->type, name); break; } return 0; } +/** List simply and destroy all aged flows. */ +void +port_flow_aged(portid_t port_id, uint8_t destroy) +{ + void **contexts; + int nb_context, total = 0, idx; + struct rte_flow_error error; + enum age_action_context_type *type; + union { + struct port_flow *pf; + struct port_indirect_action *pia; + } ctx; + + if (port_id_is_invalid(port_id, ENABLED_WARN) || + port_id == (portid_t)RTE_PORT_ALL) + return; + total = rte_flow_get_aged_flows(port_id, NULL, 0, &error); + printf("Port %u total aged flows: %d\n", port_id, total); + if (total < 0) { + port_flow_complain(&error); + return; + } + if (total == 0) + return; + contexts = malloc(sizeof(void *) * total); + if (contexts == NULL) { + fprintf(stderr, "Cannot allocate contexts for aged flow\n"); + return; + } + printf("%-20s\tID\tGroup\tPrio\tAttr\n", "Type"); + nb_context = rte_flow_get_aged_flows(port_id, contexts, total, &error); + if (nb_context != total) { + fprintf(stderr, + "Port:%d get aged flows count(%d) != total(%d)\n", + port_id, nb_context, total); + free(contexts); + return; + } + total = 0; + for (idx = 0; idx < nb_context; idx++) { + if (!contexts[idx]) { + fprintf(stderr, "Error: get Null context in port %u\n", + port_id); + continue; + } + type = (enum age_action_context_type *)contexts[idx]; + switch (*type) { + case ACTION_AGE_CONTEXT_TYPE_FLOW: + ctx.pf = container_of(type, struct port_flow, age_type); + printf("%-20s\t%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 + "\t%c%c%c\t\n", + "Flow", + ctx.pf->id, + ctx.pf->rule.attr->group, + ctx.pf->rule.attr->priority, + ctx.pf->rule.attr->ingress ? 'i' : '-', + ctx.pf->rule.attr->egress ? 'e' : '-', + ctx.pf->rule.attr->transfer ? 't' : '-'); + if (destroy && !port_flow_destroy(port_id, 1, + &ctx.pf->id)) + total++; + break; + case ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION: + ctx.pia = container_of(type, + struct port_indirect_action, age_type); + printf("%-20s\t%" PRIu32 "\n", "Indirect action", + ctx.pia->id); + break; + default: + fprintf(stderr, "Error: invalid context type %u\n", + port_id); + break; + } + } + printf("\n%d flows destroyed\n", total); + free(contexts); +} + /** List flow rules. */ void -port_flow_list(portid_t port_id, uint32_t n, const uint32_t group[n]) +port_flow_list(portid_t port_id, uint32_t n, const uint32_t *group) { struct rte_port *port; struct port_flow *pf; @@ -1619,7 +2505,9 @@ port_flow_list(portid_t port_id, uint32_t n, const uint32_t group[n]) pf->rule.attr->egress ? 'e' : '-', pf->rule.attr->transfer ? 't' : '-'); while (item->type != RTE_FLOW_ITEM_TYPE_END) { - if (rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR, + if ((uint32_t)item->type > INT_MAX) + name = "PMD_INTERNAL"; + else if (rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR, &name, sizeof(name), (void *)(uintptr_t)item->type, NULL) <= 0) @@ -1630,7 +2518,9 @@ port_flow_list(portid_t port_id, uint32_t n, const uint32_t group[n]) } printf("=>"); while (action->type != RTE_FLOW_ACTION_TYPE_END) { - if (rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, + if ((uint32_t)action->type > INT_MAX) + name = "PMD_INTERNAL"; + else if (rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, &name, sizeof(name), (void *)(uintptr_t)action->type, NULL) <= 0) @@ -1667,7 +2557,8 @@ rx_queue_id_is_invalid(queueid_t rxq_id) { if (rxq_id < nb_rxq) return 0; - printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq); + fprintf(stderr, "Invalid RX queue %d (must be < nb_rxq=%d)\n", + rxq_id, nb_rxq); return 1; } @@ -1676,27 +2567,108 @@ tx_queue_id_is_invalid(queueid_t txq_id) { if (txq_id < nb_txq) return 0; - printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq); + fprintf(stderr, "Invalid TX queue %d (must be < nb_txq=%d)\n", + txq_id, nb_txq); return 1; } static int -rx_desc_id_is_invalid(uint16_t rxdesc_id) +get_rx_ring_size(portid_t port_id, queueid_t rxq_id, uint16_t *ring_size) +{ + struct rte_port *port = &ports[port_id]; + struct rte_eth_rxq_info rx_qinfo; + int ret; + + ret = rte_eth_rx_queue_info_get(port_id, rxq_id, &rx_qinfo); + if (ret == 0) { + *ring_size = rx_qinfo.nb_desc; + return ret; + } + + if (ret != -ENOTSUP) + return ret; + /* + * If the rte_eth_rx_queue_info_get is not support for this PMD, + * ring_size stored in testpmd will be used for validity verification. + * When configure the rxq by rte_eth_rx_queue_setup with nb_rx_desc + * being 0, it will use a default value provided by PMDs to setup this + * rxq. If the default value is 0, it will use the + * RTE_ETH_DEV_FALLBACK_RX_RINGSIZE to setup this rxq. + */ + if (port->nb_rx_desc[rxq_id]) + *ring_size = port->nb_rx_desc[rxq_id]; + else if (port->dev_info.default_rxportconf.ring_size) + *ring_size = port->dev_info.default_rxportconf.ring_size; + else + *ring_size = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE; + return 0; +} + +static int +get_tx_ring_size(portid_t port_id, queueid_t txq_id, uint16_t *ring_size) +{ + struct rte_port *port = &ports[port_id]; + struct rte_eth_txq_info tx_qinfo; + int ret; + + ret = rte_eth_tx_queue_info_get(port_id, txq_id, &tx_qinfo); + if (ret == 0) { + *ring_size = tx_qinfo.nb_desc; + return ret; + } + + if (ret != -ENOTSUP) + return ret; + /* + * If the rte_eth_tx_queue_info_get is not support for this PMD, + * ring_size stored in testpmd will be used for validity verification. + * When configure the txq by rte_eth_tx_queue_setup with nb_tx_desc + * being 0, it will use a default value provided by PMDs to setup this + * txq. If the default value is 0, it will use the + * RTE_ETH_DEV_FALLBACK_TX_RINGSIZE to setup this txq. + */ + if (port->nb_tx_desc[txq_id]) + *ring_size = port->nb_tx_desc[txq_id]; + else if (port->dev_info.default_txportconf.ring_size) + *ring_size = port->dev_info.default_txportconf.ring_size; + else + *ring_size = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE; + return 0; +} + +static int +rx_desc_id_is_invalid(portid_t port_id, queueid_t rxq_id, uint16_t rxdesc_id) { - if (rxdesc_id < nb_rxd) + uint16_t ring_size; + int ret; + + ret = get_rx_ring_size(port_id, rxq_id, &ring_size); + if (ret) + return 1; + + if (rxdesc_id < ring_size) return 0; - printf("Invalid RX descriptor %d (must be < nb_rxd=%d)\n", - rxdesc_id, nb_rxd); + + fprintf(stderr, "Invalid RX descriptor %u (must be < ring_size=%u)\n", + rxdesc_id, ring_size); return 1; } static int -tx_desc_id_is_invalid(uint16_t txdesc_id) +tx_desc_id_is_invalid(portid_t port_id, queueid_t txq_id, uint16_t txdesc_id) { - if (txdesc_id < nb_txd) + uint16_t ring_size; + int ret; + + ret = get_tx_ring_size(port_id, txq_id, &ring_size); + if (ret) + return 1; + + if (txdesc_id < ring_size) return 0; - printf("Invalid TX descriptor %d (must be < nb_txd=%d)\n", - txdesc_id, nb_txd); + + fprintf(stderr, "Invalid TX descriptor %u (must be < ring_size=%u)\n", + txdesc_id, ring_size); return 1; } @@ -1710,9 +2682,9 @@ ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id) port_id, q_id, ring_name); mz = rte_memzone_lookup(mz_name); if (mz == NULL) - printf("%s ring memory zoneof (port %d, queue %d) not" - "found (zone name = %s\n", - ring_name, port_id, q_id, mz_name); + fprintf(stderr, + "%s ring memory zoneof (port %d, queue %d) not found (zone name = %s\n", + ring_name, port_id, q_id, mz_name); return mz; } @@ -1817,11 +2789,7 @@ rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id) { const struct rte_memzone *rx_mz; - if (port_id_is_invalid(port_id, ENABLED_WARN)) - return; - if (rx_queue_id_is_invalid(rxq_id)) - return; - if (rx_desc_id_is_invalid(rxd_id)) + if (rx_desc_id_is_invalid(port_id, rxq_id, rxd_id)) return; rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id); if (rx_mz == NULL) @@ -1834,11 +2802,7 @@ tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id) { const struct rte_memzone *tx_mz; - if (port_id_is_invalid(port_id, ENABLED_WARN)) - return; - if (tx_queue_id_is_invalid(txq_id)) - return; - if (tx_desc_id_is_invalid(txd_id)) + if (tx_desc_id_is_invalid(port_id, txq_id, txd_id)) return; tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id); if (tx_mz == NULL) @@ -1879,10 +2843,17 @@ rxtx_config_display(void) struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf[0]; uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0]; uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0]; - uint16_t nb_rx_desc_tmp; - uint16_t nb_tx_desc_tmp; struct rte_eth_rxq_info rx_qinfo; struct rte_eth_txq_info tx_qinfo; + uint16_t rx_free_thresh_tmp; + uint16_t tx_free_thresh_tmp; + uint16_t tx_rs_thresh_tmp; + uint16_t nb_rx_desc_tmp; + uint16_t nb_tx_desc_tmp; + uint64_t offloads_tmp; + uint8_t pthresh_tmp; + uint8_t hthresh_tmp; + uint8_t wthresh_tmp; int32_t rc; /* per port config */ @@ -1896,41 +2867,69 @@ rxtx_config_display(void) /* per rx queue config only for first queue to be less verbose */ for (qid = 0; qid < 1; qid++) { rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo); - if (rc) + if (rc) { nb_rx_desc_tmp = nb_rx_desc[qid]; - else + rx_free_thresh_tmp = + rx_conf[qid].rx_free_thresh; + pthresh_tmp = rx_conf[qid].rx_thresh.pthresh; + hthresh_tmp = rx_conf[qid].rx_thresh.hthresh; + wthresh_tmp = rx_conf[qid].rx_thresh.wthresh; + offloads_tmp = rx_conf[qid].offloads; + } else { nb_rx_desc_tmp = rx_qinfo.nb_desc; + rx_free_thresh_tmp = + rx_qinfo.conf.rx_free_thresh; + pthresh_tmp = rx_qinfo.conf.rx_thresh.pthresh; + hthresh_tmp = rx_qinfo.conf.rx_thresh.hthresh; + wthresh_tmp = rx_qinfo.conf.rx_thresh.wthresh; + offloads_tmp = rx_qinfo.conf.offloads; + } printf(" RX queue: %d\n", qid); printf(" RX desc=%d - RX free threshold=%d\n", - nb_rx_desc_tmp, rx_conf[qid].rx_free_thresh); + nb_rx_desc_tmp, rx_free_thresh_tmp); printf(" RX threshold registers: pthresh=%d hthresh=%d " " wthresh=%d\n", - rx_conf[qid].rx_thresh.pthresh, - rx_conf[qid].rx_thresh.hthresh, - rx_conf[qid].rx_thresh.wthresh); - printf(" RX Offloads=0x%"PRIx64"\n", - rx_conf[qid].offloads); + pthresh_tmp, hthresh_tmp, wthresh_tmp); + printf(" RX Offloads=0x%"PRIx64, offloads_tmp); + if (rx_conf->share_group > 0) + printf(" share_group=%u share_qid=%u", + rx_conf->share_group, + rx_conf->share_qid); + printf("\n"); } /* per tx queue config only for first queue to be less verbose */ for (qid = 0; qid < 1; qid++) { rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo); - if (rc) + if (rc) { nb_tx_desc_tmp = nb_tx_desc[qid]; - else + tx_free_thresh_tmp = + tx_conf[qid].tx_free_thresh; + pthresh_tmp = tx_conf[qid].tx_thresh.pthresh; + hthresh_tmp = tx_conf[qid].tx_thresh.hthresh; + wthresh_tmp = tx_conf[qid].tx_thresh.wthresh; + offloads_tmp = tx_conf[qid].offloads; + tx_rs_thresh_tmp = tx_conf[qid].tx_rs_thresh; + } else { nb_tx_desc_tmp = tx_qinfo.nb_desc; + tx_free_thresh_tmp = + tx_qinfo.conf.tx_free_thresh; + pthresh_tmp = tx_qinfo.conf.tx_thresh.pthresh; + hthresh_tmp = tx_qinfo.conf.tx_thresh.hthresh; + wthresh_tmp = tx_qinfo.conf.tx_thresh.wthresh; + offloads_tmp = tx_qinfo.conf.offloads; + tx_rs_thresh_tmp = tx_qinfo.conf.tx_rs_thresh; + } printf(" TX queue: %d\n", qid); printf(" TX desc=%d - TX free threshold=%d\n", - nb_tx_desc_tmp, tx_conf[qid].tx_free_thresh); + nb_tx_desc_tmp, tx_free_thresh_tmp); printf(" TX threshold registers: pthresh=%d hthresh=%d " " wthresh=%d\n", - tx_conf[qid].tx_thresh.pthresh, - tx_conf[qid].tx_thresh.hthresh, - tx_conf[qid].tx_thresh.wthresh); + pthresh_tmp, hthresh_tmp, wthresh_tmp); printf(" TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n", - tx_conf[qid].offloads, tx_conf->tx_rs_thresh); + offloads_tmp, tx_rs_thresh_tmp); } } } @@ -1948,13 +2947,15 @@ port_rss_reta_info(portid_t port_id, ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries); if (ret != 0) { - printf("Failed to get RSS RETA info, return code = %d\n", ret); + fprintf(stderr, + "Failed to get RSS RETA info, return code = %d\n", + ret); return; } for (i = 0; i < nb_entries; i++) { - idx = i / RTE_RETA_GROUP_SIZE; - shift = i % RTE_RETA_GROUP_SIZE; + idx = i / RTE_ETH_RETA_GROUP_SIZE; + shift = i % RTE_ETH_RETA_GROUP_SIZE; if (!(reta_conf[idx].mask & (1ULL << shift))) continue; printf("RSS RETA configuration: hash index=%u, queue=%u\n", @@ -1963,7 +2964,7 @@ port_rss_reta_info(portid_t port_id, } /* - * Displays the RSS hash functions of a port, and, optionaly, the RSS hash + * Displays the RSS hash functions of a port, and, optionally, the RSS hash * key of the port. */ void @@ -1989,7 +2990,8 @@ port_rss_hash_conf_show(portid_t port_id, int show_rss_key) dev_info.hash_key_size <= sizeof(rss_key)) hash_key_size = dev_info.hash_key_size; else { - printf("dev_info did not provide a valid hash key size\n"); + fprintf(stderr, + "dev_info did not provide a valid hash key size\n"); return; } @@ -2000,13 +3002,13 @@ port_rss_hash_conf_show(portid_t port_id, int show_rss_key) if (diag != 0) { switch (diag) { case -ENODEV: - printf("port index %d invalid\n", port_id); + fprintf(stderr, "port index %d invalid\n", port_id); break; case -ENOTSUP: - printf("operation not supported by device\n"); + fprintf(stderr, "operation not supported by device\n"); break; default: - printf("operation failed - diag=%d\n", diag); + fprintf(stderr, "operation failed - diag=%d\n", diag); break; } return; @@ -2018,7 +3020,9 @@ port_rss_hash_conf_show(portid_t port_id, int show_rss_key) } printf("RSS functions:\n "); for (i = 0; rss_type_table[i].str; i++) { - if (rss_hf & rss_type_table[i].rss_type) + if (rss_type_table[i].rss_type == 0) + continue; + if ((rss_hf & rss_type_table[i].rss_type) == rss_type_table[i].rss_type) printf("%s ", rss_type_table[i].str); } printf("\n"); @@ -2032,14 +3036,14 @@ port_rss_hash_conf_show(portid_t port_id, int show_rss_key) void port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key, - uint hash_key_len) + uint8_t hash_key_len) { struct rte_eth_rss_conf rss_conf; int diag; unsigned int i; rss_conf.rss_key = NULL; - rss_conf.rss_key_len = hash_key_len; + rss_conf.rss_key_len = 0; rss_conf.rss_hf = 0; for (i = 0; rss_type_table[i].str; i++) { if (!strcmp(rss_type_table[i].str, rss_type)) @@ -2048,6 +3052,7 @@ port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key, diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); if (diag == 0) { rss_conf.rss_key = hash_key; + rss_conf.rss_key_len = hash_key_len; diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf); } if (diag == 0) @@ -2055,17 +3060,122 @@ port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key, switch (diag) { case -ENODEV: - printf("port index %d invalid\n", port_id); + fprintf(stderr, "port index %d invalid\n", port_id); break; case -ENOTSUP: - printf("operation not supported by device\n"); + fprintf(stderr, "operation not supported by device\n"); break; default: - printf("operation failed - diag=%d\n", diag); + fprintf(stderr, "operation failed - diag=%d\n", diag); break; } } +/* + * Check whether a shared rxq scheduled on other lcores. + */ +static bool +fwd_stream_on_other_lcores(uint16_t domain_id, lcoreid_t src_lc, + portid_t src_port, queueid_t src_rxq, + uint32_t share_group, queueid_t share_rxq) +{ + streamid_t sm_id; + streamid_t nb_fs_per_lcore; + lcoreid_t nb_fc; + lcoreid_t lc_id; + struct fwd_stream *fs; + struct rte_port *port; + struct rte_eth_dev_info *dev_info; + struct rte_eth_rxconf *rxq_conf; + + nb_fc = cur_fwd_config.nb_fwd_lcores; + /* Check remaining cores. */ + for (lc_id = src_lc + 1; lc_id < nb_fc; lc_id++) { + sm_id = fwd_lcores[lc_id]->stream_idx; + nb_fs_per_lcore = fwd_lcores[lc_id]->stream_nb; + for (; sm_id < fwd_lcores[lc_id]->stream_idx + nb_fs_per_lcore; + sm_id++) { + fs = fwd_streams[sm_id]; + port = &ports[fs->rx_port]; + dev_info = &port->dev_info; + rxq_conf = &port->rx_conf[fs->rx_queue]; + if ((dev_info->dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) + == 0 || rxq_conf->share_group == 0) + /* Not shared rxq. */ + continue; + if (domain_id != port->dev_info.switch_info.domain_id) + continue; + if (rxq_conf->share_group != share_group) + continue; + if (rxq_conf->share_qid != share_rxq) + continue; + printf("Shared Rx queue group %u queue %hu can't be scheduled on different cores:\n", + share_group, share_rxq); + printf(" lcore %hhu Port %hu queue %hu\n", + src_lc, src_port, src_rxq); + printf(" lcore %hhu Port %hu queue %hu\n", + lc_id, fs->rx_port, fs->rx_queue); + printf("Please use --nb-cores=%hu to limit number of forwarding cores\n", + nb_rxq); + return true; + } + } + return false; +} + +/* + * Check shared rxq configuration. + * + * Shared group must not being scheduled on different core. + */ +bool +pkt_fwd_shared_rxq_check(void) +{ + streamid_t sm_id; + streamid_t nb_fs_per_lcore; + lcoreid_t nb_fc; + lcoreid_t lc_id; + struct fwd_stream *fs; + uint16_t domain_id; + struct rte_port *port; + struct rte_eth_dev_info *dev_info; + struct rte_eth_rxconf *rxq_conf; + + if (rxq_share == 0) + return true; + nb_fc = cur_fwd_config.nb_fwd_lcores; + /* + * Check streams on each core, make sure the same switch domain + + * group + queue doesn't get scheduled on other cores. + */ + for (lc_id = 0; lc_id < nb_fc; lc_id++) { + sm_id = fwd_lcores[lc_id]->stream_idx; + nb_fs_per_lcore = fwd_lcores[lc_id]->stream_nb; + for (; sm_id < fwd_lcores[lc_id]->stream_idx + nb_fs_per_lcore; + sm_id++) { + fs = fwd_streams[sm_id]; + /* Update lcore info stream being scheduled. */ + fs->lcore = fwd_lcores[lc_id]; + port = &ports[fs->rx_port]; + dev_info = &port->dev_info; + rxq_conf = &port->rx_conf[fs->rx_queue]; + if ((dev_info->dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) + == 0 || rxq_conf->share_group == 0) + /* Not shared rxq. */ + continue; + /* Check shared rxq not scheduled on remaining cores. */ + domain_id = port->dev_info.switch_info.domain_id; + if (fwd_stream_on_other_lcores(domain_id, lc_id, + fs->rx_port, + fs->rx_queue, + rxq_conf->share_group, + rxq_conf->share_qid)) + return false; + } + } + return true; +} + /* * Setup forwarding configuration for each logical core. */ @@ -2123,10 +3233,8 @@ fwd_topology_tx_port_get(portid_t rxp) if (rxp + 1 < cur_fwd_config.nb_fwd_ports) return rxp + 1; if (warning_once) { - printf("\nWarning! port-topology=paired" - " and odd forward ports number," - " the last port will pair with" - " itself.\n\n"); + fprintf(stderr, + "\nWarning! port-topology=paired and odd forward ports number, the last port will pair with itself.\n\n"); warning_once = 0; } return rxp; @@ -2185,6 +3293,8 @@ rss_fwd_config_setup(void) queueid_t rxq; queueid_t nb_q; streamid_t sm_id; + int start; + int end; nb_q = nb_rxq; if (nb_q > nb_txq) @@ -2202,7 +3312,21 @@ rss_fwd_config_setup(void) init_fwd_streams(); setup_fwd_config_of_each_lcore(&cur_fwd_config); - rxp = 0; rxq = 0; + + if (proc_id > 0 && nb_q % num_procs != 0) + printf("Warning! queue numbers should be multiple of processes, or packet loss will happen.\n"); + + /** + * In multi-process, All queues are allocated to different + * processes based on num_procs and proc_id. For example: + * if supports 4 queues(nb_q), 2 processes(num_procs), + * the 0~1 queue for primary process. + * the 2~3 queue for secondary process. + */ + start = proc_id * nb_q / num_procs; + end = start + nb_q / num_procs; + rxp = 0; + rxq = start; for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { struct fwd_stream *fs; @@ -2219,7 +3343,24 @@ rss_fwd_config_setup(void) continue; rxp = 0; rxq++; + if (rxq >= end) + rxq = start; + } +} + +static uint16_t +get_fwd_port_total_tc_num(void) +{ + struct rte_eth_dcb_info dcb_info; + uint16_t total_tc_num = 0; + unsigned int i; + + for (i = 0; i < nb_fwd_ports; i++) { + (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[i], &dcb_info); + total_tc_num += dcb_info.nb_tcs; } + + return total_tc_num; } /** @@ -2241,12 +3382,43 @@ dcb_fwd_config_setup(void) lcoreid_t lc_id; uint16_t nb_rx_queue, nb_tx_queue; uint16_t i, j, k, sm_id = 0; + uint16_t total_tc_num; + struct rte_port *port; uint8_t tc = 0; + portid_t pid; + int ret; + + /* + * The fwd_config_setup() is called when the port is RTE_PORT_STARTED + * or RTE_PORT_STOPPED. + * + * Re-configure ports to get updated mapping between tc and queue in + * case the queue number of the port is changed. Skip for started ports + * since modifying queue number and calling dev_configure need to stop + * ports first. + */ + for (pid = 0; pid < nb_fwd_ports; pid++) { + if (port_is_started(pid) == 1) + continue; + + port = &ports[pid]; + ret = rte_eth_dev_configure(pid, nb_rxq, nb_txq, + &port->dev_conf); + if (ret < 0) { + fprintf(stderr, + "Failed to re-configure port %d, ret = %d.\n", + pid, ret); + return; + } + } cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; cur_fwd_config.nb_fwd_ports = nb_fwd_ports; cur_fwd_config.nb_fwd_streams = (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); + total_tc_num = get_fwd_port_total_tc_num(); + if (cur_fwd_config.nb_fwd_lcores > total_tc_num) + cur_fwd_config.nb_fwd_lcores = total_tc_num; /* reinitialize forwarding streams */ init_fwd_streams(); @@ -2259,7 +3431,7 @@ dcb_fwd_config_setup(void) for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { fwd_lcores[lc_id]->stream_nb = 0; fwd_lcores[lc_id]->stream_idx = sm_id; - for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) { + for (i = 0; i < RTE_ETH_MAX_VMDQ_POOL; i++) { /* if the nb_queue is zero, means this tc is * not enabled on the POOL */ @@ -2365,75 +3537,38 @@ icmp_echo_config_setup(void) } } -#if defined RTE_LIBRTE_PMD_SOFTNIC -static void -softnic_fwd_config_setup(void) -{ - struct rte_port *port; - portid_t pid, softnic_portid; - queueid_t i; - uint8_t softnic_enable = 0; - - RTE_ETH_FOREACH_DEV(pid) { - port = &ports[pid]; - const char *driver = port->dev_info.driver_name; - - if (strcmp(driver, "net_softnic") == 0) { - softnic_portid = pid; - softnic_enable = 1; - break; - } - } - - if (softnic_enable == 0) { - printf("Softnic mode not configured(%s)!\n", __func__); - return; - } - - cur_fwd_config.nb_fwd_ports = 1; - cur_fwd_config.nb_fwd_streams = (streamid_t) nb_rxq; - - /* Re-initialize forwarding streams */ - init_fwd_streams(); - - /* - * In the softnic forwarding test, the number of forwarding cores - * is set to one and remaining are used for softnic packet processing. - */ - cur_fwd_config.nb_fwd_lcores = 1; - setup_fwd_config_of_each_lcore(&cur_fwd_config); - - for (i = 0; i < cur_fwd_config.nb_fwd_streams; i++) { - fwd_streams[i]->rx_port = softnic_portid; - fwd_streams[i]->rx_queue = i; - fwd_streams[i]->tx_port = softnic_portid; - fwd_streams[i]->tx_queue = i; - fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port; - fwd_streams[i]->retry_enabled = retry_enabled; - } -} -#endif - void fwd_config_setup(void) { + struct rte_port *port; + portid_t pt_id; + unsigned int i; + cur_fwd_config.fwd_eng = cur_fwd_eng; if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) { icmp_echo_config_setup(); return; } -#if defined RTE_LIBRTE_PMD_SOFTNIC - if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) { - softnic_fwd_config_setup(); - return; - } -#endif - if ((nb_rxq > 1) && (nb_txq > 1)){ - if (dcb_config) + if (dcb_config) { + for (i = 0; i < nb_fwd_ports; i++) { + pt_id = fwd_ports_ids[i]; + port = &ports[pt_id]; + if (!port->dcb_flag) { + fprintf(stderr, + "In DCB mode, all forwarding ports must be configured in this mode.\n"); + return; + } + } + if (nb_fwd_lcores == 1) { + fprintf(stderr, + "In DCB mode,the nb forwarding cores should be larger than 1.\n"); + return; + } + dcb_fwd_config_setup(); - else + } else rss_fwd_config_setup(); } else @@ -2504,11 +3639,12 @@ set_fwd_eth_peer(portid_t port_id, char *peer_addr) { struct rte_ether_addr new_peer_addr; if (!rte_eth_dev_is_valid_port(port_id)) { - printf("Error: Invalid port number %i\n", port_id); + fprintf(stderr, "Error: Invalid port number %i\n", port_id); return; } if (rte_ether_unformat_addr(peer_addr, &new_peer_addr) < 0) { - printf("Error: Invalid ethernet address: %s\n", peer_addr); + fprintf(stderr, "Error: Invalid ethernet address: %s\n", + peer_addr); return; } peer_eth_addrs[port_id] = new_peer_addr; @@ -2526,14 +3662,13 @@ set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc) for (i = 0; i < nb_lc; i++) { lcore_cpuid = lcorelist[i]; if (! rte_lcore_is_enabled(lcore_cpuid)) { - printf("lcore %u not enabled\n", lcore_cpuid); + fprintf(stderr, "lcore %u not enabled\n", lcore_cpuid); return -1; } - if (lcore_cpuid == rte_get_master_lcore()) { - printf("lcore %u cannot be masked on for running " - "packet forwarding, which is the master lcore " - "and reserved for command line parsing only\n", - lcore_cpuid); + if (lcore_cpuid == rte_get_main_lcore()) { + fprintf(stderr, + "lcore %u cannot be masked on for running packet forwarding, which is the main lcore and reserved for command line parsing only\n", + lcore_cpuid); return -1; } if (record_now) @@ -2562,7 +3697,7 @@ set_fwd_lcores_mask(uint64_t lcoremask) unsigned int i; if (lcoremask == 0) { - printf("Invalid NULL mask of cores\n"); + fprintf(stderr, "Invalid NULL mask of cores\n"); return -1; } nb_lc = 0; @@ -2577,10 +3712,14 @@ set_fwd_lcores_mask(uint64_t lcoremask) void set_fwd_lcores_number(uint16_t nb_lc) { + if (test_done == 0) { + fprintf(stderr, "Please stop forwarding first\n"); + return; + } if (nb_lc > nb_cfg_lcores) { - printf("nb fwd cores %u > %u (max. number of configured " - "lcores) - ignored\n", - (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores); + fprintf(stderr, + "nb fwd cores %u > %u (max. number of configured lcores) - ignored\n", + (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores); return; } nb_fwd_lcores = (lcoreid_t) nb_lc; @@ -2731,7 +3870,7 @@ set_fwd_ports_mask(uint64_t portmask) unsigned int i; if (portmask == 0) { - printf("Invalid NULL mask of ports\n"); + fprintf(stderr, "Invalid NULL mask of ports\n"); return; } nb_pt = 0; @@ -2747,9 +3886,9 @@ void set_fwd_ports_number(uint16_t nb_pt) { if (nb_pt > nb_cfg_ports) { - printf("nb fwd ports %u > %u (number of configured " - "ports) - ignored\n", - (unsigned int) nb_pt, (unsigned int) nb_cfg_ports); + fprintf(stderr, + "nb fwd ports %u > %u (number of configured ports) - ignored\n", + (unsigned int) nb_pt, (unsigned int) nb_cfg_ports); return; } nb_fwd_ports = (portid_t) nb_pt; @@ -2777,9 +3916,9 @@ void set_nb_pkt_per_burst(uint16_t nb) { if (nb > MAX_PKT_BURST) { - printf("nb pkt per burst: %u > %u (maximum packet per burst) " - " ignored\n", - (unsigned int) nb, (unsigned int) MAX_PKT_BURST); + fprintf(stderr, + "nb pkt per burst: %u > %u (maximum packet per burst) ignored\n", + (unsigned int) nb, (unsigned int) MAX_PKT_BURST); return; } nb_pkt_per_burst = nb; @@ -2810,7 +3949,130 @@ set_tx_pkt_split(const char *name) return; } } - printf("unknown value: \"%s\"\n", name); + fprintf(stderr, "unknown value: \"%s\"\n", name); +} + +int +parse_fec_mode(const char *name, uint32_t *fec_capa) +{ + uint8_t i; + + for (i = 0; i < RTE_DIM(fec_mode_name); i++) { + if (strcmp(fec_mode_name[i].name, name) == 0) { + *fec_capa = + RTE_ETH_FEC_MODE_TO_CAPA(fec_mode_name[i].mode); + return 0; + } + } + return -1; +} + +void +show_fec_capability(unsigned int num, struct rte_eth_fec_capa *speed_fec_capa) +{ + unsigned int i, j; + + printf("FEC capabilities:\n"); + + for (i = 0; i < num; i++) { + printf("%s : ", + rte_eth_link_speed_to_str(speed_fec_capa[i].speed)); + + for (j = 0; j < RTE_DIM(fec_mode_name); j++) { + if (RTE_ETH_FEC_MODE_TO_CAPA(j) & + speed_fec_capa[i].capa) + printf("%s ", fec_mode_name[j].name); + } + printf("\n"); + } +} + +void +show_rx_pkt_offsets(void) +{ + uint32_t i, n; + + n = rx_pkt_nb_offs; + printf("Number of offsets: %u\n", n); + if (n) { + printf("Segment offsets: "); + for (i = 0; i != n - 1; i++) + printf("%hu,", rx_pkt_seg_offsets[i]); + printf("%hu\n", rx_pkt_seg_lengths[i]); + } +} + +void +set_rx_pkt_offsets(unsigned int *seg_offsets, unsigned int nb_offs) +{ + unsigned int i; + + if (nb_offs >= MAX_SEGS_BUFFER_SPLIT) { + printf("nb segments per RX packets=%u >= " + "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_offs); + return; + } + + /* + * No extra check here, the segment length will be checked by PMD + * in the extended queue setup. + */ + for (i = 0; i < nb_offs; i++) { + if (seg_offsets[i] >= UINT16_MAX) { + printf("offset[%u]=%u > UINT16_MAX - give up\n", + i, seg_offsets[i]); + return; + } + } + + for (i = 0; i < nb_offs; i++) + rx_pkt_seg_offsets[i] = (uint16_t) seg_offsets[i]; + + rx_pkt_nb_offs = (uint8_t) nb_offs; +} + +void +show_rx_pkt_segments(void) +{ + uint32_t i, n; + + n = rx_pkt_nb_segs; + printf("Number of segments: %u\n", n); + if (n) { + printf("Segment sizes: "); + for (i = 0; i != n - 1; i++) + printf("%hu,", rx_pkt_seg_lengths[i]); + printf("%hu\n", rx_pkt_seg_lengths[i]); + } +} + +void +set_rx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs) +{ + unsigned int i; + + if (nb_segs >= MAX_SEGS_BUFFER_SPLIT) { + printf("nb segments per RX packets=%u >= " + "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_segs); + return; + } + + /* + * No extra check here, the segment length will be checked by PMD + * in the extended queue setup. + */ + for (i = 0; i < nb_segs; i++) { + if (seg_lengths[i] >= UINT16_MAX) { + printf("length[%u]=%u > UINT16_MAX - give up\n", + i, seg_lengths[i]); + return; + } + } + + for (i = 0; i < nb_segs; i++) + rx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; + + rx_pkt_nb_segs = (uint8_t) nb_segs; } void @@ -2830,36 +4092,79 @@ show_tx_pkt_segments(void) printf("Split packet: %s\n", split); } +static bool +nb_segs_is_invalid(unsigned int nb_segs) +{ + uint16_t ring_size; + uint16_t queue_id; + uint16_t port_id; + int ret; + + RTE_ETH_FOREACH_DEV(port_id) { + for (queue_id = 0; queue_id < nb_txq; queue_id++) { + ret = get_tx_ring_size(port_id, queue_id, &ring_size); + if (ret) { + /* Port may not be initialized yet, can't say + * the port is invalid in this stage. + */ + continue; + } + if (ring_size < nb_segs) { + printf("nb segments per TX packets=%u >= TX " + "queue(%u) ring_size=%u - txpkts ignored\n", + nb_segs, queue_id, ring_size); + return true; + } + } + } + + return false; +} + void -set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs) +set_tx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs) { uint16_t tx_pkt_len; - unsigned i; + unsigned int i; + + /* + * For single segment settings failed check is ignored. + * It is a very basic capability to send the single segment + * packets, suppose it is always supported. + */ + if (nb_segs > 1 && nb_segs_is_invalid(nb_segs)) { + fprintf(stderr, + "Tx segment size(%u) is not supported - txpkts ignored\n", + nb_segs); + return; + } - if (nb_segs >= (unsigned) nb_txd) { - printf("nb segments per TX packets=%u >= nb_txd=%u - ignored\n", - nb_segs, (unsigned int) nb_txd); + if (nb_segs > RTE_MAX_SEGS_PER_PKT) { + fprintf(stderr, + "Tx segment size(%u) is bigger than max number of segment(%u)\n", + nb_segs, RTE_MAX_SEGS_PER_PKT); return; } /* * Check that each segment length is greater or equal than - * the mbuf data sise. + * the mbuf data size. * Check also that the total packet length is greater or equal than the * size of an empty UDP/IP packet (sizeof(struct rte_ether_hdr) + * 20 + 8). */ tx_pkt_len = 0; for (i = 0; i < nb_segs; i++) { - if (seg_lengths[i] > (unsigned) mbuf_data_size) { - printf("length[%u]=%u > mbuf_data_size=%u - give up\n", - i, seg_lengths[i], (unsigned) mbuf_data_size); + if (seg_lengths[i] > mbuf_data_size[0]) { + fprintf(stderr, + "length[%u]=%u > mbuf_data_size=%u - give up\n", + i, seg_lengths[i], mbuf_data_size[0]); return; } tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]); } if (tx_pkt_len < (sizeof(struct rte_ether_hdr) + 20 + 8)) { - printf("total packet length=%u < %d - give up\n", + fprintf(stderr, "total packet length=%u < %d - give up\n", (unsigned) tx_pkt_len, (int)(sizeof(struct rte_ether_hdr) + 20 + 8)); return; @@ -2872,22 +4177,37 @@ set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs) tx_pkt_nb_segs = (uint8_t) nb_segs; } +void +show_tx_pkt_times(void) +{ + printf("Interburst gap: %u\n", tx_pkt_times_inter); + printf("Intraburst gap: %u\n", tx_pkt_times_intra); +} + +void +set_tx_pkt_times(unsigned int *tx_times) +{ + tx_pkt_times_inter = tx_times[0]; + tx_pkt_times_intra = tx_times[1]; +} + void setup_gro(const char *onoff, portid_t port_id) { if (!rte_eth_dev_is_valid_port(port_id)) { - printf("invalid port id %u\n", port_id); + fprintf(stderr, "invalid port id %u\n", port_id); return; } if (test_done == 0) { - printf("Before enable/disable GRO," - " please stop forwarding first\n"); + fprintf(stderr, + "Before enable/disable GRO, please stop forwarding first\n"); return; } if (strcmp(onoff, "on") == 0) { if (gro_ports[port_id].enable != 0) { - printf("Port %u has enabled GRO. Please" - " disable GRO first\n", port_id); + fprintf(stderr, + "Port %u has enabled GRO. Please disable GRO first\n", + port_id); return; } if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { @@ -2900,7 +4220,7 @@ setup_gro(const char *onoff, portid_t port_id) gro_ports[port_id].enable = 1; } else { if (gro_ports[port_id].enable == 0) { - printf("Port %u has disabled GRO\n", port_id); + fprintf(stderr, "Port %u has disabled GRO\n", port_id); return; } gro_ports[port_id].enable = 0; @@ -2911,18 +4231,16 @@ void setup_gro_flush_cycles(uint8_t cycles) { if (test_done == 0) { - printf("Before change flush interval for GRO," - " please stop forwarding first.\n"); + fprintf(stderr, + "Before change flush interval for GRO, please stop forwarding first.\n"); return; } if (cycles > GRO_MAX_FLUSH_CYCLES || cycles < GRO_DEFAULT_FLUSH_CYCLES) { - printf("The flushing cycle be in the range" - " of 1 to %u. Revert to the default" - " value %u.\n", - GRO_MAX_FLUSH_CYCLES, - GRO_DEFAULT_FLUSH_CYCLES); + fprintf(stderr, + "The flushing cycle be in the range of 1 to %u. Revert to the default value %u.\n", + GRO_MAX_FLUSH_CYCLES, GRO_DEFAULT_FLUSH_CYCLES); cycles = GRO_DEFAULT_FLUSH_CYCLES; } @@ -2938,7 +4256,7 @@ show_gro(portid_t port_id) param = &gro_ports[port_id].param; if (!rte_eth_dev_is_valid_port(port_id)) { - printf("Invalid port id %u.\n", port_id); + fprintf(stderr, "Invalid port id %u.\n", port_id); return; } if (gro_ports[port_id].enable) { @@ -2959,20 +4277,20 @@ void setup_gso(const char *mode, portid_t port_id) { if (!rte_eth_dev_is_valid_port(port_id)) { - printf("invalid port id %u\n", port_id); + fprintf(stderr, "invalid port id %u\n", port_id); return; } if (strcmp(mode, "on") == 0) { if (test_done == 0) { - printf("before enabling GSO," - " please stop forwarding first\n"); + fprintf(stderr, + "before enabling GSO, please stop forwarding first\n"); return; } gso_ports[port_id].enable = 1; } else if (strcmp(mode, "off") == 0) { if (test_done == 0) { - printf("before disabling GSO," - " please stop forwarding first\n"); + fprintf(stderr, + "before disabling GSO, please stop forwarding first\n"); return; } gso_ports[port_id].enable = 0; @@ -3042,7 +4360,7 @@ set_pkt_forwarding_mode(const char *fwd_mode_name) } i++; } - printf("Invalid %s packet forwarding mode\n", fwd_mode_name); + fprintf(stderr, "Invalid %s packet forwarding mode\n", fwd_mode_name); } void @@ -3176,17 +4494,20 @@ vlan_extend_set(portid_t port_id, int on) vlan_offload = rte_eth_dev_get_vlan_offload(port_id); if (on) { - vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD; - port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND; + vlan_offload |= RTE_ETH_VLAN_EXTEND_OFFLOAD; + port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; } else { - vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD; - port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND; + vlan_offload &= ~RTE_ETH_VLAN_EXTEND_OFFLOAD; + port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; } diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); - if (diag < 0) - printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed " - "diag=%d\n", port_id, on, diag); + if (diag < 0) { + fprintf(stderr, + "rx_vlan_extend_set(port_pi=%d, on=%d) failed diag=%d\n", + port_id, on, diag); + return; + } ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; } @@ -3203,17 +4524,20 @@ rx_vlan_strip_set(portid_t port_id, int on) vlan_offload = rte_eth_dev_get_vlan_offload(port_id); if (on) { - vlan_offload |= ETH_VLAN_STRIP_OFFLOAD; - port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; + vlan_offload |= RTE_ETH_VLAN_STRIP_OFFLOAD; + port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; } else { - vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD; - port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; + vlan_offload &= ~RTE_ETH_VLAN_STRIP_OFFLOAD; + port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP; } diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); - if (diag < 0) - printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed " - "diag=%d\n", port_id, on, diag); + if (diag < 0) { + fprintf(stderr, + "%s(port_pi=%d, on=%d) failed diag=%d\n", + __func__, port_id, on, diag); + return; + } ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; } @@ -3227,8 +4551,9 @@ rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on) diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on); if (diag < 0) - printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed " - "diag=%d\n", port_id, queue_id, on, diag); + fprintf(stderr, + "%s(port_pi=%d, queue_id=%d, on=%d) failed diag=%d\n", + __func__, port_id, queue_id, on, diag); } void @@ -3244,17 +4569,20 @@ rx_vlan_filter_set(portid_t port_id, int on) vlan_offload = rte_eth_dev_get_vlan_offload(port_id); if (on) { - vlan_offload |= ETH_VLAN_FILTER_OFFLOAD; - port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; + vlan_offload |= RTE_ETH_VLAN_FILTER_OFFLOAD; + port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER; } else { - vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD; - port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER; + vlan_offload &= ~RTE_ETH_VLAN_FILTER_OFFLOAD; + port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER; } diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); - if (diag < 0) - printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed " - "diag=%d\n", port_id, on, diag); + if (diag < 0) { + fprintf(stderr, + "%s(port_pi=%d, on=%d) failed diag=%d\n", + __func__, port_id, on, diag); + return; + } ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; } @@ -3271,17 +4599,19 @@ rx_vlan_qinq_strip_set(portid_t port_id, int on) vlan_offload = rte_eth_dev_get_vlan_offload(port_id); if (on) { - vlan_offload |= ETH_QINQ_STRIP_OFFLOAD; - port_rx_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP; + vlan_offload |= RTE_ETH_QINQ_STRIP_OFFLOAD; + port_rx_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP; } else { - vlan_offload &= ~ETH_QINQ_STRIP_OFFLOAD; - port_rx_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP; + vlan_offload &= ~RTE_ETH_QINQ_STRIP_OFFLOAD; + port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP; } diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); - if (diag < 0) - printf("%s(port_pi=%d, on=%d) failed " - "diag=%d\n", __func__, port_id, on, diag); + if (diag < 0) { + fprintf(stderr, "%s(port_pi=%d, on=%d) failed diag=%d\n", + __func__, port_id, on, diag); + return; + } ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; } @@ -3297,9 +4627,9 @@ rx_vft_set(portid_t port_id, uint16_t vlan_id, int on) diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on); if (diag == 0) return 0; - printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed " - "diag=%d\n", - port_id, vlan_id, on, diag); + fprintf(stderr, + "rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed diag=%d\n", + port_id, vlan_id, on, diag); return -1; } @@ -3328,9 +4658,9 @@ vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id) if (diag == 0) return; - printf("tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed " - "diag=%d\n", - port_id, vlan_type, tp_id, diag); + fprintf(stderr, + "tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed diag=%d\n", + port_id, vlan_type, tp_id, diag); } void @@ -3339,14 +4669,12 @@ tx_vlan_set(portid_t port_id, uint16_t vlan_id) struct rte_eth_dev_info dev_info; int ret; - if (port_id_is_invalid(port_id, ENABLED_WARN)) - return; if (vlan_id_is_invalid(vlan_id)) return; if (ports[port_id].dev_conf.txmode.offloads & - DEV_TX_OFFLOAD_QINQ_INSERT) { - printf("Error, as QinQ has been enabled.\n"); + RTE_ETH_TX_OFFLOAD_QINQ_INSERT) { + fprintf(stderr, "Error, as QinQ has been enabled.\n"); return; } @@ -3354,14 +4682,15 @@ tx_vlan_set(portid_t port_id, uint16_t vlan_id) if (ret != 0) return; - if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) == 0) { - printf("Error: vlan insert is not supported by port %d\n", + if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) == 0) { + fprintf(stderr, + "Error: vlan insert is not supported by port %d\n", port_id); return; } tx_vlan_reset(port_id); - ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_VLAN_INSERT; + ports[port_id].dev_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT; ports[port_id].tx_vlan_id = vlan_id; } @@ -3371,8 +4700,6 @@ tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer) struct rte_eth_dev_info dev_info; int ret; - if (port_id_is_invalid(port_id, ENABLED_WARN)) - return; if (vlan_id_is_invalid(vlan_id)) return; if (vlan_id_is_invalid(vlan_id_outer)) @@ -3382,15 +4709,16 @@ tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer) if (ret != 0) return; - if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) == 0) { - printf("Error: qinq insert not supported by port %d\n", + if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_QINQ_INSERT) == 0) { + fprintf(stderr, + "Error: qinq insert not supported by port %d\n", port_id); return; } tx_vlan_reset(port_id); - ports[port_id].dev_conf.txmode.offloads |= (DEV_TX_OFFLOAD_VLAN_INSERT | - DEV_TX_OFFLOAD_QINQ_INSERT); + ports[port_id].dev_conf.txmode.offloads |= (RTE_ETH_TX_OFFLOAD_VLAN_INSERT | + RTE_ETH_TX_OFFLOAD_QINQ_INSERT); ports[port_id].tx_vlan_id = vlan_id; ports[port_id].tx_vlan_id_outer = vlan_id_outer; } @@ -3398,11 +4726,9 @@ tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer) void tx_vlan_reset(portid_t port_id) { - if (port_id_is_invalid(port_id, ENABLED_WARN)) - return; ports[port_id].dev_conf.txmode.offloads &= - ~(DEV_TX_OFFLOAD_VLAN_INSERT | - DEV_TX_OFFLOAD_QINQ_INSERT); + ~(RTE_ETH_TX_OFFLOAD_VLAN_INSERT | + RTE_ETH_TX_OFFLOAD_QINQ_INSERT); ports[port_id].tx_vlan_id = 0; ports[port_id].tx_vlan_id_outer = 0; } @@ -3419,8 +4745,7 @@ tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on) void set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value) { - uint16_t i; - uint8_t existing_mapping_found = 0; + int ret; if (port_id_is_invalid(port_id, ENABLED_WARN)) return; @@ -3429,41 +4754,26 @@ set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value) return; if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) { - printf("map_value not in required range 0..%d\n", - RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); + fprintf(stderr, "map_value not in required range 0..%d\n", + RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); return; } - if (!is_rx) { /*then tx*/ - for (i = 0; i < nb_tx_queue_stats_mappings; i++) { - if ((tx_queue_stats_mappings[i].port_id == port_id) && - (tx_queue_stats_mappings[i].queue_id == queue_id)) { - tx_queue_stats_mappings[i].stats_counter_id = map_value; - existing_mapping_found = 1; - break; - } - } - if (!existing_mapping_found) { /* A new additional mapping... */ - tx_queue_stats_mappings[nb_tx_queue_stats_mappings].port_id = port_id; - tx_queue_stats_mappings[nb_tx_queue_stats_mappings].queue_id = queue_id; - tx_queue_stats_mappings[nb_tx_queue_stats_mappings].stats_counter_id = map_value; - nb_tx_queue_stats_mappings++; - } - } - else { /*rx*/ - for (i = 0; i < nb_rx_queue_stats_mappings; i++) { - if ((rx_queue_stats_mappings[i].port_id == port_id) && - (rx_queue_stats_mappings[i].queue_id == queue_id)) { - rx_queue_stats_mappings[i].stats_counter_id = map_value; - existing_mapping_found = 1; - break; - } + if (!is_rx) { /* tx */ + ret = rte_eth_dev_set_tx_queue_stats_mapping(port_id, queue_id, + map_value); + if (ret) { + fprintf(stderr, + "failed to set tx queue stats mapping.\n"); + return; } - if (!existing_mapping_found) { /* A new additional mapping... */ - rx_queue_stats_mappings[nb_rx_queue_stats_mappings].port_id = port_id; - rx_queue_stats_mappings[nb_rx_queue_stats_mappings].queue_id = queue_id; - rx_queue_stats_mappings[nb_rx_queue_stats_mappings].stats_counter_id = map_value; - nb_rx_queue_stats_mappings++; + } else { /* rx */ + ret = rte_eth_dev_set_rx_queue_stats_mapping(port_id, queue_id, + map_value); + if (ret) { + fprintf(stderr, + "failed to set rx queue stats mapping.\n"); + return; } } } @@ -3474,6 +4784,63 @@ set_xstats_hide_zero(uint8_t on_off) xstats_hide_zero = on_off; } +void +set_record_core_cycles(uint8_t on_off) +{ + record_core_cycles = on_off; +} + +void +set_record_burst_stats(uint8_t on_off) +{ + record_burst_stats = on_off; +} + +static char* +flowtype_to_str(uint16_t flow_type) +{ + struct flow_type_info { + char str[32]; + uint16_t ftype; + }; + + uint8_t i; + static struct flow_type_info flowtype_str_table[] = { + {"raw", RTE_ETH_FLOW_RAW}, + {"ipv4", RTE_ETH_FLOW_IPV4}, + {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4}, + {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP}, + {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP}, + {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP}, + {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER}, + {"ipv6", RTE_ETH_FLOW_IPV6}, + {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6}, + {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP}, + {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP}, + {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP}, + {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER}, + {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD}, + {"ipv6-ex", RTE_ETH_FLOW_IPV6_EX}, + {"ipv6-tcp-ex", RTE_ETH_FLOW_IPV6_TCP_EX}, + {"ipv6-udp-ex", RTE_ETH_FLOW_IPV6_UDP_EX}, + {"port", RTE_ETH_FLOW_PORT}, + {"vxlan", RTE_ETH_FLOW_VXLAN}, + {"geneve", RTE_ETH_FLOW_GENEVE}, + {"nvgre", RTE_ETH_FLOW_NVGRE}, + {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE}, + {"gtpu", RTE_ETH_FLOW_GTPU}, + }; + + for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { + if (flowtype_str_table[i].ftype == flow_type) + return flowtype_str_table[i].str; + } + + return NULL; +} + +#if defined(RTE_NET_I40E) || defined(RTE_NET_IXGBE) + static inline void print_fdir_mask(struct rte_eth_fdir_masks *mask) { @@ -3533,45 +4900,6 @@ print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) printf("\n"); } -static char * -flowtype_to_str(uint16_t flow_type) -{ - struct flow_type_info { - char str[32]; - uint16_t ftype; - }; - - uint8_t i; - static struct flow_type_info flowtype_str_table[] = { - {"raw", RTE_ETH_FLOW_RAW}, - {"ipv4", RTE_ETH_FLOW_IPV4}, - {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4}, - {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP}, - {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP}, - {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP}, - {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER}, - {"ipv6", RTE_ETH_FLOW_IPV6}, - {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6}, - {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP}, - {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP}, - {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP}, - {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER}, - {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD}, - {"port", RTE_ETH_FLOW_PORT}, - {"vxlan", RTE_ETH_FLOW_VXLAN}, - {"geneve", RTE_ETH_FLOW_GENEVE}, - {"nvgre", RTE_ETH_FLOW_NVGRE}, - {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE}, - }; - - for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { - if (flowtype_str_table[i].ftype == flow_type) - return flowtype_str_table[i].str; - } - - return NULL; -} - static inline void print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) { @@ -3607,30 +4935,56 @@ print_fdir_flow_type(uint32_t flow_types_mask) printf("\n"); } +static int +get_fdir_info(portid_t port_id, struct rte_eth_fdir_info *fdir_info, + struct rte_eth_fdir_stats *fdir_stat) +{ + int ret = -ENOTSUP; + +#ifdef RTE_NET_I40E + if (ret == -ENOTSUP) { + ret = rte_pmd_i40e_get_fdir_info(port_id, fdir_info); + if (!ret) + ret = rte_pmd_i40e_get_fdir_stats(port_id, fdir_stat); + } +#endif +#ifdef RTE_NET_IXGBE + if (ret == -ENOTSUP) { + ret = rte_pmd_ixgbe_get_fdir_info(port_id, fdir_info); + if (!ret) + ret = rte_pmd_ixgbe_get_fdir_stats(port_id, fdir_stat); + } +#endif + switch (ret) { + case 0: + break; + case -ENOTSUP: + fprintf(stderr, "\n FDIR is not supported on port %-2d\n", + port_id); + break; + default: + fprintf(stderr, "programming error: (%s)\n", strerror(-ret)); + break; + } + return ret; +} + void fdir_get_infos(portid_t port_id) { struct rte_eth_fdir_stats fdir_stat; struct rte_eth_fdir_info fdir_info; - int ret; static const char *fdir_stats_border = "########################"; if (port_id_is_invalid(port_id, ENABLED_WARN)) return; - ret = rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR); - if (ret < 0) { - printf("\n FDIR is not supported on port %-2d\n", - port_id); - return; - } memset(&fdir_info, 0, sizeof(fdir_info)); - rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, - RTE_ETH_FILTER_INFO, &fdir_info); memset(&fdir_stat, 0, sizeof(fdir_stat)); - rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, - RTE_ETH_FILTER_STATS, &fdir_stat); + if (get_fdir_info(port_id, &fdir_info, &fdir_stat)) + return; + printf("\n %s FDIR infos for port %-2d %s\n", fdir_stats_border, port_id, fdir_stats_border); printf(" MODE: "); @@ -3683,6 +5037,8 @@ fdir_get_infos(portid_t port_id) fdir_stats_border, fdir_stats_border); } +#endif /* RTE_NET_I40E || RTE_NET_IXGBE */ + void fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg) { @@ -3703,8 +5059,9 @@ fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg) idx = flex_conf->nb_flexmasks; flex_conf->nb_flexmasks++; } else { - printf("The flex mask table is full. Can not set flex" - " mask for flow_type(%u).", cfg->flow_type); + fprintf(stderr, + "The flex mask table is full. Can not set flex mask for flow_type(%u).", + cfg->flow_type); return; } } @@ -3733,8 +5090,9 @@ fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg) idx = flex_conf->nb_payloads; flex_conf->nb_payloads++; } else { - printf("The flex payload table is full. Can not set" - " flex payload for type(%u).", cfg->type); + fprintf(stderr, + "The flex payload table is full. Can not set flex payload for type(%u).", + cfg->type); return; } } @@ -3747,7 +5105,7 @@ fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg) void set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on) { -#ifdef RTE_LIBRTE_IXGBE_PMD +#ifdef RTE_NET_IXGBE int diag; if (is_rx) @@ -3757,12 +5115,13 @@ set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on) if (diag == 0) return; - printf("rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n", - is_rx ? "rx" : "tx", port_id, diag); + fprintf(stderr, + "rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n", + is_rx ? "rx" : "tx", port_id, diag); return; #endif - printf("VF %s setting not supported for port %d\n", - is_rx ? "Rx" : "Tx", port_id); + fprintf(stderr, "VF %s setting not supported for port %d\n", + is_rx ? "Rx" : "Tx", port_id); RTE_SET_USED(vf); RTE_SET_USED(on); } @@ -3779,15 +5138,18 @@ set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate) ret = eth_link_get_nowait_print_err(port_id, &link); if (ret < 0) return 1; - if (rate > link.link_speed) { - printf("Invalid rate value:%u bigger than link speed: %u\n", + if (link.link_speed != RTE_ETH_SPEED_NUM_UNKNOWN && + rate > link.link_speed) { + fprintf(stderr, + "Invalid rate value:%u bigger than link speed: %u\n", rate, link.link_speed); return 1; } diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate); if (diag == 0) return diag; - printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n", + fprintf(stderr, + "rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n", port_id, diag); return diag; } @@ -3801,20 +5163,21 @@ set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk) RTE_SET_USED(rate); RTE_SET_USED(q_msk); -#ifdef RTE_LIBRTE_IXGBE_PMD +#ifdef RTE_NET_IXGBE if (diag == -ENOTSUP) diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate, q_msk); #endif -#ifdef RTE_LIBRTE_BNXT_PMD +#ifdef RTE_NET_BNXT if (diag == -ENOTSUP) diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk); #endif if (diag == 0) return diag; - printf("set_vf_rate_limit for port_id=%d failed diag=%d\n", - port_id, diag); + fprintf(stderr, + "%s for port_id=%d failed diag=%d\n", + __func__, port_id, diag); return diag; } @@ -3861,8 +5224,9 @@ mcast_addr_pool_extend(struct rte_port *port) mc_pool = (struct rte_ether_addr *) realloc(port->mc_addr_pool, mc_pool_size); if (mc_pool == NULL) { - printf("allocation of pool of %u multicast addresses failed\n", - port->mc_addr_nb + MCAST_POOL_INC); + fprintf(stderr, + "allocation of pool of %u multicast addresses failed\n", + port->mc_addr_nb + MCAST_POOL_INC); return -ENOMEM; } @@ -3885,7 +5249,7 @@ mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) { port->mc_addr_nb--; if (addr_idx == port->mc_addr_nb) { - /* No need to recompact the set of multicast addressses. */ + /* No need to recompact the set of multicast addresses. */ if (port->mc_addr_nb == 0) { /* free the pool of multicast addresses. */ free(port->mc_addr_pool); @@ -3908,7 +5272,8 @@ eth_port_multicast_addr_list_set(portid_t port_id) diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool, port->mc_addr_nb); if (diag < 0) - printf("rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n", + fprintf(stderr, + "rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n", port_id, port->mc_addr_nb, diag); return diag; @@ -3931,7 +5296,8 @@ mcast_addr_add(portid_t port_id, struct rte_ether_addr *mc_addr) */ for (i = 0; i < port->mc_addr_nb; i++) { if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) { - printf("multicast address already filtered by port\n"); + fprintf(stderr, + "multicast address already filtered by port\n"); return; } } @@ -3961,7 +5327,8 @@ mcast_addr_remove(portid_t port_id, struct rte_ether_addr *mc_addr) break; } if (i == port->mc_addr_nb) { - printf("multicast address not filtered by port %d\n", port_id); + fprintf(stderr, "multicast address not filtered by port %d\n", + port_id); return; } @@ -3984,7 +5351,7 @@ port_dcb_info_display(portid_t port_id) ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info); if (ret) { - printf("\n Failed to get dcb infos on port %-2d\n", + fprintf(stderr, "\n Failed to get dcb infos on port %-2d\n", port_id); return; } @@ -4027,34 +5394,34 @@ open_file(const char *file_path, uint32_t *size) *size = 0; if (fd == -1) { - printf("%s: Failed to open %s\n", __func__, file_path); + fprintf(stderr, "%s: Failed to open %s\n", __func__, file_path); return buf; } if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) { close(fd); - printf("%s: File operations failed\n", __func__); + fprintf(stderr, "%s: File operations failed\n", __func__); return buf; } pkg_size = st_buf.st_size; if (pkg_size < 0) { close(fd); - printf("%s: File operations failed\n", __func__); + fprintf(stderr, "%s: File operations failed\n", __func__); return buf; } buf = (uint8_t *)malloc(pkg_size); if (!buf) { close(fd); - printf("%s: Failed to malloc memory\n", __func__); + fprintf(stderr, "%s: Failed to malloc memory\n", __func__); return buf; } ret = read(fd, buf, pkg_size); if (ret < 0) { close(fd); - printf("%s: File read operation failed\n", __func__); + fprintf(stderr, "%s: File read operation failed\n", __func__); close_file(buf); return NULL; } @@ -4073,13 +5440,13 @@ save_file(const char *file_path, uint8_t *buf, uint32_t size) FILE *fh = fopen(file_path, "wb"); if (fh == NULL) { - printf("%s: Failed to open %s\n", __func__, file_path); + fprintf(stderr, "%s: Failed to open %s\n", __func__, file_path); return -1; } if (fwrite(buf, 1, size, fh) != size) { fclose(fh); - printf("%s: File write operation failed\n", __func__); + fprintf(stderr, "%s: File write operation failed\n", __func__); return -1; } @@ -4102,7 +5469,7 @@ close_file(uint8_t *buf) void port_queue_region_info_display(portid_t port_id, void *buf) { -#ifdef RTE_LIBRTE_I40E_PMD +#ifdef RTE_NET_I40E uint16_t i, j; struct rte_pmd_i40e_queue_regions *info = (struct rte_pmd_i40e_queue_regions *)buf; @@ -4147,19 +5514,20 @@ show_macs(portid_t port_id) { char buf[RTE_ETHER_ADDR_FMT_SIZE]; struct rte_eth_dev_info dev_info; - struct rte_ether_addr *addr; - uint32_t i, num_macs = 0; - struct rte_eth_dev *dev; + int32_t i, rc, num_macs = 0; - dev = &rte_eth_devices[port_id]; + if (eth_dev_info_get_print_err(port_id, &dev_info)) + return; - rte_eth_dev_info_get(port_id, &dev_info); + struct rte_ether_addr addr[dev_info.max_mac_addrs]; + rc = rte_eth_macaddrs_get(port_id, addr, dev_info.max_mac_addrs); + if (rc < 0) + return; - for (i = 0; i < dev_info.max_mac_addrs; i++) { - addr = &dev->data->mac_addrs[i]; + for (i = 0; i < rc; i++) { /* skip zero address */ - if (rte_is_zero_ether_addr(addr)) + if (rte_is_zero_ether_addr(&addr[i])) continue; num_macs++; @@ -4167,14 +5535,13 @@ show_macs(portid_t port_id) printf("Number of MAC address added: %d\n", num_macs); - for (i = 0; i < dev_info.max_mac_addrs; i++) { - addr = &dev->data->mac_addrs[i]; + for (i = 0; i < rc; i++) { /* skip zero address */ - if (rte_is_zero_ether_addr(addr)) + if (rte_is_zero_ether_addr(&addr[i])) continue; - rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr); + rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, &addr[i]); printf(" %s\n", buf); } }