#include <rte_eal.h>
#include <rte_per_lcore.h>
#include <rte_lcore.h>
-#include <rte_atomic.h>
#include <rte_branch_prediction.h>
#include <rte_mempool.h>
#include <rte_mbuf.h>
#ifdef RTE_NET_BNXT
#include <rte_pmd_bnxt.h>
#endif
+#ifdef RTE_LIB_GRO
#include <rte_gro.h>
+#endif
#include <rte_hexdump.h>
#include "testpmd.h"
};
const struct rss_type_info rss_type_table[] = {
- { "all", ETH_RSS_ETH | ETH_RSS_VLAN | ETH_RSS_IP | ETH_RSS_TCP |
- ETH_RSS_UDP | ETH_RSS_SCTP | ETH_RSS_L2_PAYLOAD |
- ETH_RSS_L2TPV3 | ETH_RSS_ESP | ETH_RSS_AH | ETH_RSS_PFCP |
- ETH_RSS_GTPU | ETH_RSS_ECPRI | ETH_RSS_MPLS},
+ { "all", RTE_ETH_RSS_ETH | RTE_ETH_RSS_VLAN | RTE_ETH_RSS_IP | RTE_ETH_RSS_TCP |
+ RTE_ETH_RSS_UDP | RTE_ETH_RSS_SCTP | RTE_ETH_RSS_L2_PAYLOAD |
+ RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_ESP | RTE_ETH_RSS_AH | RTE_ETH_RSS_PFCP |
+ RTE_ETH_RSS_GTPU | RTE_ETH_RSS_ECPRI | RTE_ETH_RSS_MPLS | RTE_ETH_RSS_L2TPV2},
{ "none", 0 },
- { "eth", ETH_RSS_ETH },
- { "l2-src-only", ETH_RSS_L2_SRC_ONLY },
- { "l2-dst-only", ETH_RSS_L2_DST_ONLY },
- { "vlan", ETH_RSS_VLAN },
- { "s-vlan", ETH_RSS_S_VLAN },
- { "c-vlan", ETH_RSS_C_VLAN },
- { "ipv4", ETH_RSS_IPV4 },
- { "ipv4-frag", ETH_RSS_FRAG_IPV4 },
- { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP },
- { "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP },
- { "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP },
- { "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER },
- { "ipv6", ETH_RSS_IPV6 },
- { "ipv6-frag", ETH_RSS_FRAG_IPV6 },
- { "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP },
- { "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP },
- { "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP },
- { "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER },
- { "l2-payload", ETH_RSS_L2_PAYLOAD },
- { "ipv6-ex", ETH_RSS_IPV6_EX },
- { "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX },
- { "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX },
- { "port", ETH_RSS_PORT },
- { "vxlan", ETH_RSS_VXLAN },
- { "geneve", ETH_RSS_GENEVE },
- { "nvgre", ETH_RSS_NVGRE },
- { "ip", ETH_RSS_IP },
- { "udp", ETH_RSS_UDP },
- { "tcp", ETH_RSS_TCP },
- { "sctp", ETH_RSS_SCTP },
- { "tunnel", ETH_RSS_TUNNEL },
+ { "eth", RTE_ETH_RSS_ETH },
+ { "l2-src-only", RTE_ETH_RSS_L2_SRC_ONLY },
+ { "l2-dst-only", RTE_ETH_RSS_L2_DST_ONLY },
+ { "vlan", RTE_ETH_RSS_VLAN },
+ { "s-vlan", RTE_ETH_RSS_S_VLAN },
+ { "c-vlan", RTE_ETH_RSS_C_VLAN },
+ { "ipv4", RTE_ETH_RSS_IPV4 },
+ { "ipv4-frag", RTE_ETH_RSS_FRAG_IPV4 },
+ { "ipv4-tcp", RTE_ETH_RSS_NONFRAG_IPV4_TCP },
+ { "ipv4-udp", RTE_ETH_RSS_NONFRAG_IPV4_UDP },
+ { "ipv4-sctp", RTE_ETH_RSS_NONFRAG_IPV4_SCTP },
+ { "ipv4-other", RTE_ETH_RSS_NONFRAG_IPV4_OTHER },
+ { "ipv6", RTE_ETH_RSS_IPV6 },
+ { "ipv6-frag", RTE_ETH_RSS_FRAG_IPV6 },
+ { "ipv6-tcp", RTE_ETH_RSS_NONFRAG_IPV6_TCP },
+ { "ipv6-udp", RTE_ETH_RSS_NONFRAG_IPV6_UDP },
+ { "ipv6-sctp", RTE_ETH_RSS_NONFRAG_IPV6_SCTP },
+ { "ipv6-other", RTE_ETH_RSS_NONFRAG_IPV6_OTHER },
+ { "l2-payload", RTE_ETH_RSS_L2_PAYLOAD },
+ { "ipv6-ex", RTE_ETH_RSS_IPV6_EX },
+ { "ipv6-tcp-ex", RTE_ETH_RSS_IPV6_TCP_EX },
+ { "ipv6-udp-ex", RTE_ETH_RSS_IPV6_UDP_EX },
+ { "port", RTE_ETH_RSS_PORT },
+ { "vxlan", RTE_ETH_RSS_VXLAN },
+ { "geneve", RTE_ETH_RSS_GENEVE },
+ { "nvgre", RTE_ETH_RSS_NVGRE },
+ { "ip", RTE_ETH_RSS_IP },
+ { "udp", RTE_ETH_RSS_UDP },
+ { "tcp", RTE_ETH_RSS_TCP },
+ { "sctp", RTE_ETH_RSS_SCTP },
+ { "tunnel", RTE_ETH_RSS_TUNNEL },
{ "l3-pre32", RTE_ETH_RSS_L3_PRE32 },
{ "l3-pre40", RTE_ETH_RSS_L3_PRE40 },
{ "l3-pre48", RTE_ETH_RSS_L3_PRE48 },
{ "l3-pre56", RTE_ETH_RSS_L3_PRE56 },
{ "l3-pre64", RTE_ETH_RSS_L3_PRE64 },
{ "l3-pre96", RTE_ETH_RSS_L3_PRE96 },
- { "l3-src-only", ETH_RSS_L3_SRC_ONLY },
- { "l3-dst-only", ETH_RSS_L3_DST_ONLY },
- { "l4-src-only", ETH_RSS_L4_SRC_ONLY },
- { "l4-dst-only", ETH_RSS_L4_DST_ONLY },
- { "esp", ETH_RSS_ESP },
- { "ah", ETH_RSS_AH },
- { "l2tpv3", ETH_RSS_L2TPV3 },
- { "pfcp", ETH_RSS_PFCP },
- { "pppoe", ETH_RSS_PPPOE },
- { "gtpu", ETH_RSS_GTPU },
- { "ecpri", ETH_RSS_ECPRI },
- { "mpls", ETH_RSS_MPLS },
- { "ipv4-chksum", ETH_RSS_IPV4_CHKSUM },
- { "l4-chksum", ETH_RSS_L4_CHKSUM },
+ { "l3-src-only", RTE_ETH_RSS_L3_SRC_ONLY },
+ { "l3-dst-only", RTE_ETH_RSS_L3_DST_ONLY },
+ { "l4-src-only", RTE_ETH_RSS_L4_SRC_ONLY },
+ { "l4-dst-only", RTE_ETH_RSS_L4_DST_ONLY },
+ { "esp", RTE_ETH_RSS_ESP },
+ { "ah", RTE_ETH_RSS_AH },
+ { "l2tpv3", RTE_ETH_RSS_L2TPV3 },
+ { "pfcp", RTE_ETH_RSS_PFCP },
+ { "pppoe", RTE_ETH_RSS_PPPOE },
+ { "gtpu", RTE_ETH_RSS_GTPU },
+ { "ecpri", RTE_ETH_RSS_ECPRI },
+ { "mpls", RTE_ETH_RSS_MPLS },
+ { "ipv4-chksum", RTE_ETH_RSS_IPV4_CHKSUM },
+ { "l4-chksum", RTE_ETH_RSS_L4_CHKSUM },
+ { "l2tpv2", RTE_ETH_RSS_L2TPV2 },
{ NULL, 0 },
};
printf("%s%s", name, buf);
}
+static void
+nic_xstats_display_periodic(portid_t port_id)
+{
+ struct xstat_display_info *xstats_info;
+ uint64_t *prev_values, *curr_values;
+ uint64_t diff_value, value_rate;
+ struct timespec cur_time;
+ uint64_t *ids_supp;
+ size_t ids_supp_sz;
+ uint64_t diff_ns;
+ unsigned int i;
+ int rc;
+
+ xstats_info = &ports[port_id].xstats_info;
+
+ ids_supp_sz = xstats_info->ids_supp_sz;
+ if (ids_supp_sz == 0)
+ return;
+
+ printf("\n");
+
+ ids_supp = xstats_info->ids_supp;
+ prev_values = xstats_info->prev_values;
+ curr_values = xstats_info->curr_values;
+
+ rc = rte_eth_xstats_get_by_id(port_id, ids_supp, curr_values,
+ ids_supp_sz);
+ if (rc != (int)ids_supp_sz) {
+ fprintf(stderr,
+ "Failed to get values of %zu xstats for port %u - return code %d\n",
+ ids_supp_sz, port_id, rc);
+ return;
+ }
+
+ diff_ns = 0;
+ if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) {
+ uint64_t ns;
+
+ ns = cur_time.tv_sec * NS_PER_SEC;
+ ns += cur_time.tv_nsec;
+
+ if (xstats_info->prev_ns != 0)
+ diff_ns = ns - xstats_info->prev_ns;
+ xstats_info->prev_ns = ns;
+ }
+
+ printf("%-31s%-17s%s\n", " ", "Value", "Rate (since last show)");
+ for (i = 0; i < ids_supp_sz; i++) {
+ diff_value = (curr_values[i] > prev_values[i]) ?
+ (curr_values[i] - prev_values[i]) : 0;
+ prev_values[i] = curr_values[i];
+ value_rate = diff_ns > 0 ?
+ (double)diff_value / diff_ns * NS_PER_SEC : 0;
+
+ printf(" %-25s%12"PRIu64" %15"PRIu64"\n",
+ xstats_display[i].name, curr_values[i], value_rate);
+ }
+}
+
void
nic_stats_display(portid_t port_id)
{
diff_ns;
uint64_t mpps_rx, mpps_tx, mbps_rx, mbps_tx;
struct rte_eth_stats stats;
-
static const char *nic_stats_border = "########################";
+ int ret;
if (port_id_is_invalid(port_id, ENABLED_WARN)) {
print_valid_ports();
return;
}
- rte_eth_stats_get(port_id, &stats);
+ ret = rte_eth_stats_get(port_id, &stats);
+ if (ret != 0) {
+ fprintf(stderr,
+ "%s: Error: failed to get stats (port %u): %d",
+ __func__, port_id, ret);
+ return;
+ }
printf("\n %s NIC statistics for port %-2d %s\n",
nic_stats_border, port_id, nic_stats_border);
PRIu64" Tx-bps: %12"PRIu64"\n", mpps_rx, mbps_rx * 8,
mpps_tx, mbps_tx * 8);
+ if (xstats_display_num > 0)
+ nic_xstats_display_periodic(port_id);
+
printf(" %s############################%s\n",
nic_stats_border, nic_stats_border);
}
device_infos_display_speeds(uint32_t speed_capa)
{
printf("\n\tDevice speed capability:");
- if (speed_capa == ETH_LINK_SPEED_AUTONEG)
+ if (speed_capa == RTE_ETH_LINK_SPEED_AUTONEG)
printf(" Autonegotiate (all speeds)");
- if (speed_capa & ETH_LINK_SPEED_FIXED)
+ if (speed_capa & RTE_ETH_LINK_SPEED_FIXED)
printf(" Disable autonegotiate (fixed speed) ");
- if (speed_capa & ETH_LINK_SPEED_10M_HD)
+ if (speed_capa & RTE_ETH_LINK_SPEED_10M_HD)
printf(" 10 Mbps half-duplex ");
- if (speed_capa & ETH_LINK_SPEED_10M)
+ if (speed_capa & RTE_ETH_LINK_SPEED_10M)
printf(" 10 Mbps full-duplex ");
- if (speed_capa & ETH_LINK_SPEED_100M_HD)
+ if (speed_capa & RTE_ETH_LINK_SPEED_100M_HD)
printf(" 100 Mbps half-duplex ");
- if (speed_capa & ETH_LINK_SPEED_100M)
+ if (speed_capa & RTE_ETH_LINK_SPEED_100M)
printf(" 100 Mbps full-duplex ");
- if (speed_capa & ETH_LINK_SPEED_1G)
+ if (speed_capa & RTE_ETH_LINK_SPEED_1G)
printf(" 1 Gbps ");
- if (speed_capa & ETH_LINK_SPEED_2_5G)
+ if (speed_capa & RTE_ETH_LINK_SPEED_2_5G)
printf(" 2.5 Gbps ");
- if (speed_capa & ETH_LINK_SPEED_5G)
+ if (speed_capa & RTE_ETH_LINK_SPEED_5G)
printf(" 5 Gbps ");
- if (speed_capa & ETH_LINK_SPEED_10G)
+ if (speed_capa & RTE_ETH_LINK_SPEED_10G)
printf(" 10 Gbps ");
- if (speed_capa & ETH_LINK_SPEED_20G)
+ if (speed_capa & RTE_ETH_LINK_SPEED_20G)
printf(" 20 Gbps ");
- if (speed_capa & ETH_LINK_SPEED_25G)
+ if (speed_capa & RTE_ETH_LINK_SPEED_25G)
printf(" 25 Gbps ");
- if (speed_capa & ETH_LINK_SPEED_40G)
+ if (speed_capa & RTE_ETH_LINK_SPEED_40G)
printf(" 40 Gbps ");
- if (speed_capa & ETH_LINK_SPEED_50G)
+ if (speed_capa & RTE_ETH_LINK_SPEED_50G)
printf(" 50 Gbps ");
- if (speed_capa & ETH_LINK_SPEED_56G)
+ if (speed_capa & RTE_ETH_LINK_SPEED_56G)
printf(" 56 Gbps ");
- if (speed_capa & ETH_LINK_SPEED_100G)
+ if (speed_capa & RTE_ETH_LINK_SPEED_100G)
printf(" 100 Gbps ");
- if (speed_capa & ETH_LINK_SPEED_200G)
+ if (speed_capa & RTE_ETH_LINK_SPEED_200G)
printf(" 200 Gbps ");
}
rte_devargs_reset(&da);
}
+static void
+print_dev_capabilities(uint64_t capabilities)
+{
+ uint64_t single_capa;
+ int begin;
+ int end;
+ int bit;
+
+ if (capabilities == 0)
+ return;
+
+ begin = __builtin_ctzll(capabilities);
+ end = sizeof(capabilities) * CHAR_BIT - __builtin_clzll(capabilities);
+
+ single_capa = 1ULL << begin;
+ for (bit = begin; bit < end; bit++) {
+ if (capabilities & single_capa)
+ printf(" %s",
+ rte_eth_dev_capability_name(single_capa));
+ single_capa <<= 1;
+ }
+}
+
void
port_infos_display(portid_t port_id)
{
printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down"));
printf("Link speed: %s\n", rte_eth_link_speed_to_str(link.link_speed));
- printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
+ printf("Link duplex: %s\n", (link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex"));
- printf("Autoneg status: %s\n", (link.link_autoneg == ETH_LINK_AUTONEG) ?
+ printf("Autoneg status: %s\n", (link.link_autoneg == RTE_ETH_LINK_AUTONEG) ?
("On") : ("Off"));
if (!rte_eth_dev_get_mtu(port_id, &mtu))
vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
if (vlan_offload >= 0){
printf("VLAN offload: \n");
- if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD)
+ if (vlan_offload & RTE_ETH_VLAN_STRIP_OFFLOAD)
printf(" strip on, ");
else
printf(" strip off, ");
- if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD)
+ if (vlan_offload & RTE_ETH_VLAN_FILTER_OFFLOAD)
printf("filter on, ");
else
printf("filter off, ");
- if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)
+ if (vlan_offload & RTE_ETH_VLAN_EXTEND_OFFLOAD)
printf("extend on, ");
else
printf("extend off, ");
- if (vlan_offload & ETH_QINQ_STRIP_OFFLOAD)
+ if (vlan_offload & RTE_ETH_QINQ_STRIP_OFFLOAD)
printf("qinq strip on\n");
else
printf("qinq strip off\n");
printf("Max segment number per MTU/TSO: %hu\n",
dev_info.tx_desc_lim.nb_mtu_seg_max);
+ printf("Device capabilities: 0x%"PRIx64"(", dev_info.dev_capa);
+ print_dev_capabilities(dev_info.dev_capa);
+ printf(" )\n");
/* Show switch info only if valid switch domain and port id is set */
if (dev_info.switch_info.domain_id !=
RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
dev_info.switch_info.domain_id);
printf("Switch Port Id: %u\n",
dev_info.switch_info.port_id);
+ if ((dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) != 0)
+ printf("Switch Rx domain: %u\n",
+ dev_info.switch_info.rx_domain);
}
}
return;
}
- char buf[len_eeprom];
einfo.offset = 0;
einfo.length = len_eeprom;
- einfo.data = buf;
+ einfo.data = calloc(1, len_eeprom);
+ if (!einfo.data) {
+ fprintf(stderr,
+ "Allocation of port %u eeprom data failed\n",
+ port_id);
+ return;
+ }
ret = rte_eth_dev_get_eeprom(port_id, &einfo);
if (ret != 0) {
fprintf(stderr, "Unable to get EEPROM: %d\n", ret);
break;
}
+ free(einfo.data);
return;
}
rte_hexdump(stdout, "hexdump", einfo.data, einfo.length);
printf("Finish -- Port: %d EEPROM length: %d bytes\n", port_id, len_eeprom);
+ free(einfo.data);
}
void
return;
}
- char buf[minfo.eeprom_len];
einfo.offset = 0;
einfo.length = minfo.eeprom_len;
- einfo.data = buf;
+ einfo.data = calloc(1, minfo.eeprom_len);
+ if (!einfo.data) {
+ fprintf(stderr,
+ "Allocation of port %u eeprom data failed\n",
+ port_id);
+ return;
+ }
ret = rte_eth_dev_get_module_eeprom(port_id, &einfo);
if (ret != 0) {
ret);
break;
}
+ free(einfo.data);
return;
}
rte_hexdump(stdout, "hexdump", einfo.data, einfo.length);
printf("Finish -- Port: %d MODULE EEPROM length: %d bytes\n", port_id, einfo.length);
+ free(einfo.data);
}
int
display_port_reg_value(port_id, reg_off, reg_v);
}
+static uint32_t
+eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu)
+{
+ uint32_t overhead_len;
+
+ if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu)
+ overhead_len = max_rx_pktlen - max_mtu;
+ else
+ overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
+
+ return overhead_len;
+}
+
+static int
+eth_dev_validate_mtu(uint16_t port_id, uint16_t mtu)
+{
+ struct rte_eth_dev_info dev_info;
+ uint32_t overhead_len;
+ uint32_t frame_size;
+ int ret;
+
+ ret = rte_eth_dev_info_get(port_id, &dev_info);
+ if (ret != 0)
+ return ret;
+
+ if (mtu < dev_info.min_mtu) {
+ fprintf(stderr,
+ "MTU (%u) < device min MTU (%u) for port_id %u\n",
+ mtu, dev_info.min_mtu, port_id);
+ return -EINVAL;
+ }
+ if (mtu > dev_info.max_mtu) {
+ fprintf(stderr,
+ "MTU (%u) > device max MTU (%u) for port_id %u\n",
+ mtu, dev_info.max_mtu, port_id);
+ return -EINVAL;
+ }
+
+ overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen,
+ dev_info.max_mtu);
+ frame_size = mtu + overhead_len;
+ if (frame_size > dev_info.max_rx_pktlen) {
+ fprintf(stderr,
+ "Frame size (%u) > device max frame size (%u) for port_id %u\n",
+ frame_size, dev_info.max_rx_pktlen, port_id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
void
port_mtu_set(portid_t port_id, uint16_t mtu)
{
+ struct rte_port *port = &ports[port_id];
int diag;
- struct rte_port *rte_port = &ports[port_id];
- struct rte_eth_dev_info dev_info;
- uint16_t eth_overhead;
- int ret;
if (port_id_is_invalid(port_id, ENABLED_WARN))
return;
- ret = eth_dev_info_get_print_err(port_id, &dev_info);
- if (ret != 0)
+ diag = eth_dev_validate_mtu(port_id, mtu);
+ if (diag != 0)
return;
- if (mtu > dev_info.max_mtu || mtu < dev_info.min_mtu) {
- fprintf(stderr,
- "Set MTU failed. MTU:%u is not in valid range, min:%u - max:%u\n",
- mtu, dev_info.min_mtu, dev_info.max_mtu);
- return;
- }
- diag = rte_eth_dev_set_mtu(port_id, mtu);
- if (diag)
- fprintf(stderr, "Set MTU failed. diag=%d\n", diag);
- else if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) {
- /*
- * Ether overhead in driver is equal to the difference of
- * max_rx_pktlen and max_mtu in rte_eth_dev_info when the
- * device supports jumbo frame.
- */
- eth_overhead = dev_info.max_rx_pktlen - dev_info.max_mtu;
- if (mtu > RTE_ETHER_MTU) {
- rte_port->dev_conf.rxmode.offloads |=
- DEV_RX_OFFLOAD_JUMBO_FRAME;
- rte_port->dev_conf.rxmode.max_rx_pkt_len =
- mtu + eth_overhead;
- } else
- rte_port->dev_conf.rxmode.offloads &=
- ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+ if (port->need_reconfig == 0) {
+ diag = rte_eth_dev_set_mtu(port_id, mtu);
+ if (diag != 0) {
+ fprintf(stderr, "Set MTU failed. diag=%d\n", diag);
+ return;
+ }
}
+
+ port->dev_conf.rxmode.mtu = mtu;
}
/* Generic flow management functions. */
case RTE_FLOW_ITEM_TYPE_VXLAN:
type = "vxlan";
break;
+ case RTE_FLOW_ITEM_TYPE_GRE:
+ type = "gre";
+ break;
+ case RTE_FLOW_ITEM_TYPE_NVGRE:
+ type = "nvgre";
+ break;
+ case RTE_FLOW_ITEM_TYPE_GENEVE:
+ type = "geneve";
+ break;
}
return type;
if (!strcmp(ops->type, "vxlan"))
type = RTE_FLOW_ITEM_TYPE_VXLAN;
+ else if (!strcmp(ops->type, "gre"))
+ type = RTE_FLOW_ITEM_TYPE_GRE;
+ else if (!strcmp(ops->type, "nvgre"))
+ type = RTE_FLOW_ITEM_TYPE_NVGRE;
+ else if (!strcmp(ops->type, "geneve"))
+ type = RTE_FLOW_ITEM_TYPE_GENEVE;
else {
fprintf(stderr, "cannot offload \"%s\" tunnel type\n",
ops->type);
error->cause), buf) : "",
error->message ? error->message : "(no stated reason)",
rte_strerror(err));
+
+ switch (error->type) {
+ case RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER:
+ fprintf(stderr, "The status suggests the use of \"transfer\" "
+ "as the possible cause of the failure. Make "
+ "sure that the flow in question and its "
+ "indirect components (if any) are managed "
+ "via \"transfer\" proxy port. Use command "
+ "\"show port (port_id) flow transfer proxy\" "
+ "to figure out the proxy port ID\n");
+ break;
+ default:
+ break;
+ }
+
return -err;
}
return 0;
}
-/** Create indirect action */
+static int
+template_alloc(uint32_t id, struct port_template **template,
+ struct port_template **list)
+{
+ struct port_template *lst = *list;
+ struct port_template **ppt;
+ struct port_template *pt = NULL;
+
+ *template = NULL;
+ if (id == UINT32_MAX) {
+ /* taking first available ID */
+ if (lst) {
+ if (lst->id == UINT32_MAX - 1) {
+ printf("Highest template ID is already"
+ " assigned, delete it first\n");
+ return -ENOMEM;
+ }
+ id = lst->id + 1;
+ } else {
+ id = 0;
+ }
+ }
+ pt = calloc(1, sizeof(*pt));
+ if (!pt) {
+ printf("Allocation of port template failed\n");
+ return -ENOMEM;
+ }
+ ppt = list;
+ while (*ppt && (*ppt)->id > id)
+ ppt = &(*ppt)->next;
+ if (*ppt && (*ppt)->id == id) {
+ printf("Template #%u is already assigned,"
+ " delete it first\n", id);
+ free(pt);
+ return -EINVAL;
+ }
+ pt->next = *ppt;
+ pt->id = id;
+ *ppt = pt;
+ *template = pt;
+ return 0;
+}
+
+static int
+table_alloc(uint32_t id, struct port_table **table,
+ struct port_table **list)
+{
+ struct port_table *lst = *list;
+ struct port_table **ppt;
+ struct port_table *pt = NULL;
+
+ *table = NULL;
+ if (id == UINT32_MAX) {
+ /* taking first available ID */
+ if (lst) {
+ if (lst->id == UINT32_MAX - 1) {
+ printf("Highest table ID is already"
+ " assigned, delete it first\n");
+ return -ENOMEM;
+ }
+ id = lst->id + 1;
+ } else {
+ id = 0;
+ }
+ }
+ pt = calloc(1, sizeof(*pt));
+ if (!pt) {
+ printf("Allocation of table failed\n");
+ return -ENOMEM;
+ }
+ ppt = list;
+ while (*ppt && (*ppt)->id > id)
+ ppt = &(*ppt)->next;
+ if (*ppt && (*ppt)->id == id) {
+ printf("Table #%u is already assigned,"
+ " delete it first\n", id);
+ free(pt);
+ return -EINVAL;
+ }
+ pt->next = *ppt;
+ pt->id = id;
+ *ppt = pt;
+ *table = pt;
+ return 0;
+}
+
+/** Get info about flow management resources. */
int
-port_action_handle_create(portid_t port_id, uint32_t id,
- const struct rte_flow_indir_action_conf *conf,
- const struct rte_flow_action *action)
+port_flow_get_info(portid_t port_id)
{
- struct port_indirect_action *pia;
- int ret;
+ struct rte_flow_port_info port_info;
+ struct rte_flow_queue_info queue_info;
struct rte_flow_error error;
- struct rte_port *port;
if (port_id_is_invalid(port_id, ENABLED_WARN) ||
port_id == (portid_t)RTE_PORT_ALL)
return -EINVAL;
+ /* Poisoning to make sure PMDs update it in case of error. */
+ memset(&error, 0x99, sizeof(error));
+ memset(&port_info, 0, sizeof(port_info));
+ memset(&queue_info, 0, sizeof(queue_info));
+ if (rte_flow_info_get(port_id, &port_info, &queue_info, &error))
+ return port_flow_complain(&error);
+ printf("Flow engine resources on port %u:\n"
+ "Number of queues: %d\n"
+ "Size of queues: %d\n"
+ "Number of counters: %d\n"
+ "Number of aging objects: %d\n"
+ "Number of meter actions: %d\n",
+ port_id, port_info.max_nb_queues,
+ queue_info.max_size,
+ port_info.max_nb_counters,
+ port_info.max_nb_aging_objects,
+ port_info.max_nb_meters);
+ return 0;
+}
- ret = action_alloc(port_id, id, &pia);
- if (ret)
- return ret;
-
- port = &ports[port_id];
-
- if (conf->transfer)
- port_id = port->flow_transfer_proxy;
+/** Configure flow management resources. */
+int
+port_flow_configure(portid_t port_id,
+ const struct rte_flow_port_attr *port_attr,
+ uint16_t nb_queue,
+ const struct rte_flow_queue_attr *queue_attr)
+{
+ struct rte_port *port;
+ struct rte_flow_error error;
+ const struct rte_flow_queue_attr *attr_list[nb_queue];
+ int std_queue;
if (port_id_is_invalid(port_id, ENABLED_WARN) ||
port_id == (portid_t)RTE_PORT_ALL)
return -EINVAL;
+ port = &ports[port_id];
+ port->queue_nb = nb_queue;
+ port->queue_sz = queue_attr->size;
+ for (std_queue = 0; std_queue < nb_queue; std_queue++)
+ attr_list[std_queue] = queue_attr;
+ /* Poisoning to make sure PMDs update it in case of error. */
+ memset(&error, 0x66, sizeof(error));
+ if (rte_flow_configure(port_id, port_attr, nb_queue, attr_list, &error))
+ return port_flow_complain(&error);
+ printf("Configure flows on port %u: "
+ "number of queues %d with %d elements\n",
+ port_id, nb_queue, queue_attr->size);
+ return 0;
+}
+
+/** Create indirect action */
+int
+port_action_handle_create(portid_t port_id, uint32_t id,
+ const struct rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *action)
+{
+ struct port_indirect_action *pia;
+ int ret;
+ struct rte_flow_error error;
+ ret = action_alloc(port_id, id, &pia);
+ if (ret)
+ return ret;
if (action->type == RTE_FLOW_ACTION_TYPE_AGE) {
struct rte_flow_action_age *age =
(struct rte_flow_action_age *)(uintptr_t)(action->conf);
return port_flow_complain(&error);
}
pia->type = action->type;
- pia->transfer = conf->transfer;
printf("Indirect action #%u created\n", pia->id);
return 0;
}
for (i = 0; i != n; ++i) {
struct rte_flow_error error;
struct port_indirect_action *pia = *tmp;
- portid_t port_id_eff = port_id;
if (actions[i] != pia->id)
continue;
-
- if (pia->transfer)
- port_id_eff = port->flow_transfer_proxy;
-
- if (port_id_is_invalid(port_id_eff, ENABLED_WARN) ||
- port_id_eff == (portid_t)RTE_PORT_ALL)
- return -EINVAL;
-
/*
* Poisoning to make sure PMDs update it in case
* of error.
memset(&error, 0x33, sizeof(error));
if (pia->handle && rte_flow_action_handle_destroy(
- port_id_eff, pia->handle, &error)) {
+ port_id, pia->handle, &error)) {
ret = port_flow_complain(&error);
continue;
}
return ret;
}
+int
+port_action_handle_flush(portid_t port_id)
+{
+ struct rte_port *port;
+ struct port_indirect_action **tmp;
+ int ret = 0;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN) ||
+ port_id == (portid_t)RTE_PORT_ALL)
+ return -EINVAL;
+ port = &ports[port_id];
+ tmp = &port->actions_list;
+ while (*tmp != NULL) {
+ struct rte_flow_error error;
+ struct port_indirect_action *pia = *tmp;
+
+ /* Poisoning to make sure PMDs update it in case of error. */
+ memset(&error, 0x44, sizeof(error));
+ if (pia->handle != NULL &&
+ rte_flow_action_handle_destroy
+ (port_id, pia->handle, &error) != 0) {
+ printf("Indirect action #%u not destroyed\n", pia->id);
+ ret = port_flow_complain(&error);
+ tmp = &pia->next;
+ } else {
+ *tmp = pia->next;
+ free(pia);
+ }
+ }
+ return ret;
+}
/** Get indirect action by port + id */
struct rte_flow_action_handle *
struct rte_flow_error error;
struct rte_flow_action_handle *action_handle;
struct port_indirect_action *pia;
- struct rte_port *port;
const void *update;
- if (port_id_is_invalid(port_id, ENABLED_WARN) ||
- port_id == (portid_t)RTE_PORT_ALL)
- return -EINVAL;
-
- port = &ports[port_id];
-
action_handle = port_action_handle_get_by_id(port_id, id);
if (!action_handle)
return -EINVAL;
update = action;
break;
}
-
- if (pia->transfer)
- port_id = port->flow_transfer_proxy;
-
- if (port_id_is_invalid(port_id, ENABLED_WARN) ||
- port_id == (portid_t)RTE_PORT_ALL)
- return -EINVAL;
-
if (rte_flow_action_handle_update(port_id, action_handle, update,
&error)) {
return port_flow_complain(&error);
struct rte_flow_query_age age;
struct rte_flow_action_conntrack ct;
} query;
- portid_t port_id_eff = port_id;
- struct rte_port *port;
-
- if (port_id_is_invalid(port_id, ENABLED_WARN) ||
- port_id == (portid_t)RTE_PORT_ALL)
- return -EINVAL;
-
- port = &ports[port_id];
pia = action_get_by_id(port_id, id);
if (!pia)
id, pia->type, port_id);
return -ENOTSUP;
}
-
- if (pia->transfer)
- port_id_eff = port->flow_transfer_proxy;
-
- if (port_id_is_invalid(port_id_eff, ENABLED_WARN) ||
- port_id_eff == (portid_t)RTE_PORT_ALL)
- return -EINVAL;
-
/* Poisoning to make sure PMDs update it in case of error. */
memset(&error, 0x55, sizeof(error));
memset(&query, 0, sizeof(query));
- if (rte_flow_action_handle_query(port_id_eff, pia->handle, &query,
- &error))
+ if (rte_flow_action_handle_query(port_id, pia->handle, &query, &error))
return port_flow_complain(&error);
switch (pia->type) {
case RTE_FLOW_ACTION_TYPE_AGE:
{
struct rte_flow_error error;
struct port_flow_tunnel *pft = NULL;
- struct rte_port *port;
-
- if (port_id_is_invalid(port_id, ENABLED_WARN) ||
- port_id == (portid_t)RTE_PORT_ALL)
- return -EINVAL;
-
- port = &ports[port_id];
-
- if (attr->transfer)
- port_id = port->flow_transfer_proxy;
-
- if (port_id_is_invalid(port_id, ENABLED_WARN) ||
- port_id == (portid_t)RTE_PORT_ALL)
- return -EINVAL;
+ int ret;
/* Poisoning to make sure PMDs update it in case of error. */
memset(&error, 0x11, sizeof(error));
if (pft->actions)
actions = pft->actions;
}
- if (rte_flow_validate(port_id, attr, pattern, actions, &error))
- return port_flow_complain(&error);
+ ret = rte_flow_validate(port_id, attr, pattern, actions, &error);
if (tunnel_ops->enabled)
port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft);
+ if (ret)
+ return port_flow_complain(&error);
printf("Flow rule validated\n");
return 0;
}
return NULL;
}
-/** Create flow rule. */
+/** Create pattern template */
int
-port_flow_create(portid_t port_id,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item *pattern,
- const struct rte_flow_action *actions,
- const struct tunnel_ops *tunnel_ops)
+port_flow_pattern_template_create(portid_t port_id, uint32_t id,
+ const struct rte_flow_pattern_template_attr *attr,
+ const struct rte_flow_item *pattern)
{
- struct rte_flow *flow;
struct rte_port *port;
- struct port_flow *pf;
- uint32_t id = 0;
+ struct port_template *pit;
+ int ret;
struct rte_flow_error error;
- struct port_flow_tunnel *pft = NULL;
- struct rte_flow_action_age *age = age_action_get(actions);
if (port_id_is_invalid(port_id, ENABLED_WARN) ||
port_id == (portid_t)RTE_PORT_ALL)
return -EINVAL;
-
port = &ports[port_id];
+ ret = template_alloc(id, &pit, &port->pattern_templ_list);
+ if (ret)
+ return ret;
+ /* Poisoning to make sure PMDs update it in case of error. */
+ memset(&error, 0x22, sizeof(error));
+ pit->template.pattern_template = rte_flow_pattern_template_create(port_id,
+ attr, pattern, &error);
+ if (!pit->template.pattern_template) {
+ uint32_t destroy_id = pit->id;
+ port_flow_pattern_template_destroy(port_id, 1, &destroy_id);
+ return port_flow_complain(&error);
+ }
+ printf("Pattern template #%u created\n", pit->id);
+ return 0;
+}
+
+/** Destroy pattern template */
+int
+port_flow_pattern_template_destroy(portid_t port_id, uint32_t n,
+ const uint32_t *template)
+{
+ struct rte_port *port;
+ struct port_template **tmp;
+ uint32_t c = 0;
+ int ret = 0;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN) ||
+ port_id == (portid_t)RTE_PORT_ALL)
+ return -EINVAL;
+ port = &ports[port_id];
+ tmp = &port->pattern_templ_list;
+ while (*tmp) {
+ uint32_t i;
+
+ for (i = 0; i != n; ++i) {
+ struct rte_flow_error error;
+ struct port_template *pit = *tmp;
+
+ if (template[i] != pit->id)
+ continue;
+ /*
+ * Poisoning to make sure PMDs update it in case
+ * of error.
+ */
+ memset(&error, 0x33, sizeof(error));
+
+ if (pit->template.pattern_template &&
+ rte_flow_pattern_template_destroy(port_id,
+ pit->template.pattern_template,
+ &error)) {
+ ret = port_flow_complain(&error);
+ continue;
+ }
+ *tmp = pit->next;
+ printf("Pattern template #%u destroyed\n", pit->id);
+ free(pit);
+ break;
+ }
+ if (i == n)
+ tmp = &(*tmp)->next;
+ ++c;
+ }
+ return ret;
+}
+
+/** Create actions template */
+int
+port_flow_actions_template_create(portid_t port_id, uint32_t id,
+ const struct rte_flow_actions_template_attr *attr,
+ const struct rte_flow_action *actions,
+ const struct rte_flow_action *masks)
+{
+ struct rte_port *port;
+ struct port_template *pat;
+ int ret;
+ struct rte_flow_error error;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN) ||
+ port_id == (portid_t)RTE_PORT_ALL)
+ return -EINVAL;
+ port = &ports[port_id];
+ ret = template_alloc(id, &pat, &port->actions_templ_list);
+ if (ret)
+ return ret;
+ /* Poisoning to make sure PMDs update it in case of error. */
+ memset(&error, 0x22, sizeof(error));
+ pat->template.actions_template = rte_flow_actions_template_create(port_id,
+ attr, actions, masks, &error);
+ if (!pat->template.actions_template) {
+ uint32_t destroy_id = pat->id;
+ port_flow_actions_template_destroy(port_id, 1, &destroy_id);
+ return port_flow_complain(&error);
+ }
+ printf("Actions template #%u created\n", pat->id);
+ return 0;
+}
+
+/** Destroy actions template */
+int
+port_flow_actions_template_destroy(portid_t port_id, uint32_t n,
+ const uint32_t *template)
+{
+ struct rte_port *port;
+ struct port_template **tmp;
+ uint32_t c = 0;
+ int ret = 0;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN) ||
+ port_id == (portid_t)RTE_PORT_ALL)
+ return -EINVAL;
+ port = &ports[port_id];
+ tmp = &port->actions_templ_list;
+ while (*tmp) {
+ uint32_t i;
+
+ for (i = 0; i != n; ++i) {
+ struct rte_flow_error error;
+ struct port_template *pat = *tmp;
+
+ if (template[i] != pat->id)
+ continue;
+ /*
+ * Poisoning to make sure PMDs update it in case
+ * of error.
+ */
+ memset(&error, 0x33, sizeof(error));
+
+ if (pat->template.actions_template &&
+ rte_flow_actions_template_destroy(port_id,
+ pat->template.actions_template, &error)) {
+ ret = port_flow_complain(&error);
+ continue;
+ }
+ *tmp = pat->next;
+ printf("Actions template #%u destroyed\n", pat->id);
+ free(pat);
+ break;
+ }
+ if (i == n)
+ tmp = &(*tmp)->next;
+ ++c;
+ }
+ return ret;
+}
+
+/** Create table */
+int
+port_flow_template_table_create(portid_t port_id, uint32_t id,
+ const struct rte_flow_template_table_attr *table_attr,
+ uint32_t nb_pattern_templates, uint32_t *pattern_templates,
+ uint32_t nb_actions_templates, uint32_t *actions_templates)
+{
+ struct rte_port *port;
+ struct port_table *pt;
+ struct port_template *temp = NULL;
+ int ret;
+ uint32_t i;
+ struct rte_flow_error error;
+ struct rte_flow_pattern_template
+ *flow_pattern_templates[nb_pattern_templates];
+ struct rte_flow_actions_template
+ *flow_actions_templates[nb_actions_templates];
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN) ||
+ port_id == (portid_t)RTE_PORT_ALL)
+ return -EINVAL;
+ port = &ports[port_id];
+ for (i = 0; i < nb_pattern_templates; ++i) {
+ bool found = false;
+ temp = port->pattern_templ_list;
+ while (temp) {
+ if (pattern_templates[i] == temp->id) {
+ flow_pattern_templates[i] =
+ temp->template.pattern_template;
+ found = true;
+ break;
+ }
+ temp = temp->next;
+ }
+ if (!found) {
+ printf("Pattern template #%u is invalid\n",
+ pattern_templates[i]);
+ return -EINVAL;
+ }
+ }
+ for (i = 0; i < nb_actions_templates; ++i) {
+ bool found = false;
+ temp = port->actions_templ_list;
+ while (temp) {
+ if (actions_templates[i] == temp->id) {
+ flow_actions_templates[i] =
+ temp->template.actions_template;
+ found = true;
+ break;
+ }
+ temp = temp->next;
+ }
+ if (!found) {
+ printf("Actions template #%u is invalid\n",
+ actions_templates[i]);
+ return -EINVAL;
+ }
+ }
+ ret = table_alloc(id, &pt, &port->table_list);
+ if (ret)
+ return ret;
+ /* Poisoning to make sure PMDs update it in case of error. */
+ memset(&error, 0x22, sizeof(error));
+ pt->table = rte_flow_template_table_create(port_id, table_attr,
+ flow_pattern_templates, nb_pattern_templates,
+ flow_actions_templates, nb_actions_templates,
+ &error);
+
+ if (!pt->table) {
+ uint32_t destroy_id = pt->id;
+ port_flow_template_table_destroy(port_id, 1, &destroy_id);
+ return port_flow_complain(&error);
+ }
+ pt->nb_pattern_templates = nb_pattern_templates;
+ pt->nb_actions_templates = nb_actions_templates;
+ printf("Template table #%u created\n", pt->id);
+ return 0;
+}
+
+/** Destroy table */
+int
+port_flow_template_table_destroy(portid_t port_id,
+ uint32_t n, const uint32_t *table)
+{
+ struct rte_port *port;
+ struct port_table **tmp;
+ uint32_t c = 0;
+ int ret = 0;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN) ||
+ port_id == (portid_t)RTE_PORT_ALL)
+ return -EINVAL;
+ port = &ports[port_id];
+ tmp = &port->table_list;
+ while (*tmp) {
+ uint32_t i;
+
+ for (i = 0; i != n; ++i) {
+ struct rte_flow_error error;
+ struct port_table *pt = *tmp;
+
+ if (table[i] != pt->id)
+ continue;
+ /*
+ * Poisoning to make sure PMDs update it in case
+ * of error.
+ */
+ memset(&error, 0x33, sizeof(error));
- if (attr->transfer)
- port_id = port->flow_transfer_proxy;
-
- if (port_id_is_invalid(port_id, ENABLED_WARN) ||
- port_id == (portid_t)RTE_PORT_ALL)
- return -EINVAL;
-
+ if (pt->table &&
+ rte_flow_template_table_destroy(port_id,
+ pt->table,
+ &error)) {
+ ret = port_flow_complain(&error);
+ continue;
+ }
+ *tmp = pt->next;
+ printf("Template table #%u destroyed\n", pt->id);
+ free(pt);
+ break;
+ }
+ if (i == n)
+ tmp = &(*tmp)->next;
+ ++c;
+ }
+ return ret;
+}
+
+/** Enqueue create flow rule operation. */
+int
+port_queue_flow_create(portid_t port_id, queueid_t queue_id,
+ bool postpone, uint32_t table_id,
+ uint32_t pattern_idx, uint32_t actions_idx,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action *actions)
+{
+ struct rte_flow_op_attr op_attr = { .postpone = postpone };
+ struct rte_flow *flow;
+ struct rte_port *port;
+ struct port_flow *pf;
+ struct port_table *pt;
+ uint32_t id = 0;
+ bool found;
+ struct rte_flow_error error = { RTE_FLOW_ERROR_TYPE_NONE, NULL, NULL };
+ struct rte_flow_action_age *age = age_action_get(actions);
+
+ port = &ports[port_id];
+ if (port->flow_list) {
+ if (port->flow_list->id == UINT32_MAX) {
+ printf("Highest rule ID is already assigned,"
+ " delete it first");
+ return -ENOMEM;
+ }
+ id = port->flow_list->id + 1;
+ }
+
+ if (queue_id >= port->queue_nb) {
+ printf("Queue #%u is invalid\n", queue_id);
+ return -EINVAL;
+ }
+
+ found = false;
+ pt = port->table_list;
+ while (pt) {
+ if (table_id == pt->id) {
+ found = true;
+ break;
+ }
+ pt = pt->next;
+ }
+ if (!found) {
+ printf("Table #%u is invalid\n", table_id);
+ return -EINVAL;
+ }
+
+ if (pattern_idx >= pt->nb_pattern_templates) {
+ printf("Pattern template index #%u is invalid,"
+ " %u templates present in the table\n",
+ pattern_idx, pt->nb_pattern_templates);
+ return -EINVAL;
+ }
+ if (actions_idx >= pt->nb_actions_templates) {
+ printf("Actions template index #%u is invalid,"
+ " %u templates present in the table\n",
+ actions_idx, pt->nb_actions_templates);
+ return -EINVAL;
+ }
+
+ pf = port_flow_new(NULL, pattern, actions, &error);
+ if (!pf)
+ return port_flow_complain(&error);
+ if (age) {
+ pf->age_type = ACTION_AGE_CONTEXT_TYPE_FLOW;
+ age->context = &pf->age_type;
+ }
+ /* Poisoning to make sure PMDs update it in case of error. */
+ memset(&error, 0x11, sizeof(error));
+ flow = rte_flow_async_create(port_id, queue_id, &op_attr, pt->table,
+ pattern, pattern_idx, actions, actions_idx, NULL, &error);
+ if (!flow) {
+ uint32_t flow_id = pf->id;
+ port_queue_flow_destroy(port_id, queue_id, true, 1, &flow_id);
+ return port_flow_complain(&error);
+ }
+
+ pf->next = port->flow_list;
+ pf->id = id;
+ pf->flow = flow;
+ port->flow_list = pf;
+ printf("Flow rule #%u creation enqueued\n", pf->id);
+ return 0;
+}
+
+/** Enqueue number of destroy flow rules operations. */
+int
+port_queue_flow_destroy(portid_t port_id, queueid_t queue_id,
+ bool postpone, uint32_t n, const uint32_t *rule)
+{
+ struct rte_flow_op_attr op_attr = { .postpone = postpone };
+ struct rte_port *port;
+ struct port_flow **tmp;
+ uint32_t c = 0;
+ int ret = 0;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN) ||
+ port_id == (portid_t)RTE_PORT_ALL)
+ return -EINVAL;
+ port = &ports[port_id];
+
+ if (queue_id >= port->queue_nb) {
+ printf("Queue #%u is invalid\n", queue_id);
+ return -EINVAL;
+ }
+
+ tmp = &port->flow_list;
+ while (*tmp) {
+ uint32_t i;
+
+ for (i = 0; i != n; ++i) {
+ struct rte_flow_error error;
+ struct port_flow *pf = *tmp;
+
+ if (rule[i] != pf->id)
+ continue;
+ /*
+ * Poisoning to make sure PMD
+ * update it in case of error.
+ */
+ memset(&error, 0x33, sizeof(error));
+ if (rte_flow_async_destroy(port_id, queue_id, &op_attr,
+ pf->flow, NULL, &error)) {
+ ret = port_flow_complain(&error);
+ continue;
+ }
+ printf("Flow rule #%u destruction enqueued\n", pf->id);
+ *tmp = pf->next;
+ free(pf);
+ break;
+ }
+ if (i == n)
+ tmp = &(*tmp)->next;
+ ++c;
+ }
+ return ret;
+}
+
+/** Enqueue indirect action create operation. */
+int
+port_queue_action_handle_create(portid_t port_id, uint32_t queue_id,
+ bool postpone, uint32_t id,
+ const struct rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *action)
+{
+ const struct rte_flow_op_attr attr = { .postpone = postpone};
+ struct rte_port *port;
+ struct port_indirect_action *pia;
+ int ret;
+ struct rte_flow_error error;
+
+ ret = action_alloc(port_id, id, &pia);
+ if (ret)
+ return ret;
+
+ port = &ports[port_id];
+ if (queue_id >= port->queue_nb) {
+ printf("Queue #%u is invalid\n", queue_id);
+ return -EINVAL;
+ }
+
+ if (action->type == RTE_FLOW_ACTION_TYPE_AGE) {
+ struct rte_flow_action_age *age =
+ (struct rte_flow_action_age *)(uintptr_t)(action->conf);
+
+ pia->age_type = ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION;
+ age->context = &pia->age_type;
+ }
+ /* Poisoning to make sure PMDs update it in case of error. */
+ memset(&error, 0x88, sizeof(error));
+ pia->handle = rte_flow_async_action_handle_create(port_id, queue_id,
+ &attr, conf, action, NULL, &error);
+ if (!pia->handle) {
+ uint32_t destroy_id = pia->id;
+ port_queue_action_handle_destroy(port_id, queue_id,
+ postpone, 1, &destroy_id);
+ return port_flow_complain(&error);
+ }
+ pia->type = action->type;
+ printf("Indirect action #%u creation queued\n", pia->id);
+ return 0;
+}
+
+/** Enqueue indirect action destroy operation. */
+int
+port_queue_action_handle_destroy(portid_t port_id,
+ uint32_t queue_id, bool postpone,
+ uint32_t n, const uint32_t *actions)
+{
+ const struct rte_flow_op_attr attr = { .postpone = postpone};
+ struct rte_port *port;
+ struct port_indirect_action **tmp;
+ uint32_t c = 0;
+ int ret = 0;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN) ||
+ port_id == (portid_t)RTE_PORT_ALL)
+ return -EINVAL;
+ port = &ports[port_id];
+
+ if (queue_id >= port->queue_nb) {
+ printf("Queue #%u is invalid\n", queue_id);
+ return -EINVAL;
+ }
+
+ tmp = &port->actions_list;
+ while (*tmp) {
+ uint32_t i;
+
+ for (i = 0; i != n; ++i) {
+ struct rte_flow_error error;
+ struct port_indirect_action *pia = *tmp;
+
+ if (actions[i] != pia->id)
+ continue;
+ /*
+ * Poisoning to make sure PMDs update it in case
+ * of error.
+ */
+ memset(&error, 0x99, sizeof(error));
+
+ if (pia->handle &&
+ rte_flow_async_action_handle_destroy(port_id,
+ queue_id, &attr, pia->handle, NULL, &error)) {
+ ret = port_flow_complain(&error);
+ continue;
+ }
+ *tmp = pia->next;
+ printf("Indirect action #%u destruction queued\n",
+ pia->id);
+ free(pia);
+ break;
+ }
+ if (i == n)
+ tmp = &(*tmp)->next;
+ ++c;
+ }
+ return ret;
+}
+
+/** Enqueue indirect action update operation. */
+int
+port_queue_action_handle_update(portid_t port_id,
+ uint32_t queue_id, bool postpone, uint32_t id,
+ const struct rte_flow_action *action)
+{
+ const struct rte_flow_op_attr attr = { .postpone = postpone};
+ struct rte_port *port;
+ struct rte_flow_error error;
+ struct rte_flow_action_handle *action_handle;
+
+ action_handle = port_action_handle_get_by_id(port_id, id);
+ if (!action_handle)
+ return -EINVAL;
+
+ port = &ports[port_id];
+ if (queue_id >= port->queue_nb) {
+ printf("Queue #%u is invalid\n", queue_id);
+ return -EINVAL;
+ }
+
+ if (rte_flow_async_action_handle_update(port_id, queue_id, &attr,
+ action_handle, action, NULL, &error)) {
+ return port_flow_complain(&error);
+ }
+ printf("Indirect action #%u update queued\n", id);
+ return 0;
+}
+
+/** Push all the queue operations in the queue to the NIC. */
+int
+port_queue_flow_push(portid_t port_id, queueid_t queue_id)
+{
+ struct rte_port *port;
+ struct rte_flow_error error;
+ int ret = 0;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN) ||
+ port_id == (portid_t)RTE_PORT_ALL)
+ return -EINVAL;
+ port = &ports[port_id];
+
+ if (queue_id >= port->queue_nb) {
+ printf("Queue #%u is invalid\n", queue_id);
+ return -EINVAL;
+ }
+
+ memset(&error, 0x55, sizeof(error));
+ ret = rte_flow_push(port_id, queue_id, &error);
+ if (ret < 0) {
+ printf("Failed to push operations in the queue\n");
+ return -EINVAL;
+ }
+ printf("Queue #%u operations pushed\n", queue_id);
+ return ret;
+}
+
+/** Pull queue operation results from the queue. */
+int
+port_queue_flow_pull(portid_t port_id, queueid_t queue_id)
+{
+ struct rte_port *port;
+ struct rte_flow_op_result *res;
+ struct rte_flow_error error;
+ int ret = 0;
+ int success = 0;
+ int i;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN) ||
+ port_id == (portid_t)RTE_PORT_ALL)
+ return -EINVAL;
+ port = &ports[port_id];
+
+ if (queue_id >= port->queue_nb) {
+ printf("Queue #%u is invalid\n", queue_id);
+ return -EINVAL;
+ }
+
+ res = calloc(port->queue_sz, sizeof(struct rte_flow_op_result));
+ if (!res) {
+ printf("Failed to allocate memory for pulled results\n");
+ return -ENOMEM;
+ }
+
+ memset(&error, 0x66, sizeof(error));
+ ret = rte_flow_pull(port_id, queue_id, res,
+ port->queue_sz, &error);
+ if (ret < 0) {
+ printf("Failed to pull a operation results\n");
+ free(res);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ret; i++) {
+ if (res[i].status == RTE_FLOW_OP_SUCCESS)
+ success++;
+ }
+ printf("Queue #%u pulled %u operations (%u failed, %u succeeded)\n",
+ queue_id, ret, ret - success, success);
+ free(res);
+ return ret;
+}
+
+/** Create flow rule. */
+int
+port_flow_create(portid_t port_id,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action *actions,
+ const struct tunnel_ops *tunnel_ops)
+{
+ struct rte_flow *flow;
+ struct rte_port *port;
+ struct port_flow *pf;
+ uint32_t id = 0;
+ struct rte_flow_error error;
+ struct port_flow_tunnel *pft = NULL;
+ struct rte_flow_action_age *age = age_action_get(actions);
+
+ port = &ports[port_id];
if (port->flow_list) {
if (port->flow_list->id == UINT32_MAX) {
fprintf(stderr,
uint32_t i;
for (i = 0; i != n; ++i) {
- portid_t port_id_eff = port_id;
struct rte_flow_error error;
struct port_flow *pf = *tmp;
* of error.
*/
memset(&error, 0x33, sizeof(error));
-
- if (pf->rule.attr->transfer)
- port_id_eff = port->flow_transfer_proxy;
-
- if (port_id_is_invalid(port_id_eff, ENABLED_WARN) ||
- port_id_eff == (portid_t)RTE_PORT_ALL)
- return -EINVAL;
-
- if (rte_flow_destroy(port_id_eff, pf->flow, &error)) {
+ if (rte_flow_destroy(port_id, pf->flow, &error)) {
ret = port_flow_complain(&error);
continue;
}
fprintf(stderr, "Flow rule #%u not found\n", rule);
return -ENOENT;
}
-
- if (pf->rule.attr->transfer)
- port_id = port->flow_transfer_proxy;
-
- if (port_id_is_invalid(port_id, ENABLED_WARN) ||
- port_id == (portid_t)RTE_PORT_ALL)
- return -EINVAL;
-
ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR,
&name, sizeof(name),
(void *)(uintptr_t)action->type, &error);
nb_fwd_lcores, nb_fwd_ports);
RTE_ETH_FOREACH_DEV(pid) {
- struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf[0];
- struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf[0];
+ struct rte_eth_rxconf *rx_conf = &ports[pid].rxq[0].conf;
+ struct rte_eth_txconf *tx_conf = &ports[pid].txq[0].conf;
uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0];
uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0];
struct rte_eth_rxq_info rx_qinfo;
printf(" RX threshold registers: pthresh=%d hthresh=%d "
" wthresh=%d\n",
pthresh_tmp, hthresh_tmp, wthresh_tmp);
- printf(" RX Offloads=0x%"PRIx64"\n", offloads_tmp);
+ printf(" RX Offloads=0x%"PRIx64, offloads_tmp);
+ if (rx_conf->share_group > 0)
+ printf(" share_group=%u share_qid=%u",
+ rx_conf->share_group,
+ rx_conf->share_qid);
+ printf("\n");
}
/* per tx queue config only for first queue to be less verbose */
}
for (i = 0; i < nb_entries; i++) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
if (!(reta_conf[idx].mask & (1ULL << shift)))
continue;
printf("RSS RETA configuration: hash index=%u, queue=%u\n",
}
/*
- * Displays the RSS hash functions of a port, and, optionaly, the RSS hash
+ * Displays the RSS hash functions of a port, and, optionally, the RSS hash
* key of the port.
*/
void
}
printf("RSS functions:\n ");
for (i = 0; rss_type_table[i].str; i++) {
- if (rss_hf & rss_type_table[i].rss_type)
+ if (rss_type_table[i].rss_type == 0)
+ continue;
+ if ((rss_hf & rss_type_table[i].rss_type) == rss_type_table[i].rss_type)
printf("%s ", rss_type_table[i].str);
}
printf("\n");
unsigned int i;
rss_conf.rss_key = NULL;
- rss_conf.rss_key_len = hash_key_len;
+ rss_conf.rss_key_len = 0;
rss_conf.rss_hf = 0;
for (i = 0; rss_type_table[i].str; i++) {
if (!strcmp(rss_type_table[i].str, rss_type))
diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
if (diag == 0) {
rss_conf.rss_key = hash_key;
+ rss_conf.rss_key_len = hash_key_len;
diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf);
}
if (diag == 0)
}
}
+/*
+ * Check whether a shared rxq scheduled on other lcores.
+ */
+static bool
+fwd_stream_on_other_lcores(uint16_t domain_id, lcoreid_t src_lc,
+ portid_t src_port, queueid_t src_rxq,
+ uint32_t share_group, queueid_t share_rxq)
+{
+ streamid_t sm_id;
+ streamid_t nb_fs_per_lcore;
+ lcoreid_t nb_fc;
+ lcoreid_t lc_id;
+ struct fwd_stream *fs;
+ struct rte_port *port;
+ struct rte_eth_dev_info *dev_info;
+ struct rte_eth_rxconf *rxq_conf;
+
+ nb_fc = cur_fwd_config.nb_fwd_lcores;
+ /* Check remaining cores. */
+ for (lc_id = src_lc + 1; lc_id < nb_fc; lc_id++) {
+ sm_id = fwd_lcores[lc_id]->stream_idx;
+ nb_fs_per_lcore = fwd_lcores[lc_id]->stream_nb;
+ for (; sm_id < fwd_lcores[lc_id]->stream_idx + nb_fs_per_lcore;
+ sm_id++) {
+ fs = fwd_streams[sm_id];
+ port = &ports[fs->rx_port];
+ dev_info = &port->dev_info;
+ rxq_conf = &port->rxq[fs->rx_queue].conf;
+ if ((dev_info->dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE)
+ == 0 || rxq_conf->share_group == 0)
+ /* Not shared rxq. */
+ continue;
+ if (domain_id != port->dev_info.switch_info.domain_id)
+ continue;
+ if (rxq_conf->share_group != share_group)
+ continue;
+ if (rxq_conf->share_qid != share_rxq)
+ continue;
+ printf("Shared Rx queue group %u queue %hu can't be scheduled on different cores:\n",
+ share_group, share_rxq);
+ printf(" lcore %hhu Port %hu queue %hu\n",
+ src_lc, src_port, src_rxq);
+ printf(" lcore %hhu Port %hu queue %hu\n",
+ lc_id, fs->rx_port, fs->rx_queue);
+ printf("Please use --nb-cores=%hu to limit number of forwarding cores\n",
+ nb_rxq);
+ return true;
+ }
+ }
+ return false;
+}
+
+/*
+ * Check shared rxq configuration.
+ *
+ * Shared group must not being scheduled on different core.
+ */
+bool
+pkt_fwd_shared_rxq_check(void)
+{
+ streamid_t sm_id;
+ streamid_t nb_fs_per_lcore;
+ lcoreid_t nb_fc;
+ lcoreid_t lc_id;
+ struct fwd_stream *fs;
+ uint16_t domain_id;
+ struct rte_port *port;
+ struct rte_eth_dev_info *dev_info;
+ struct rte_eth_rxconf *rxq_conf;
+
+ if (rxq_share == 0)
+ return true;
+ nb_fc = cur_fwd_config.nb_fwd_lcores;
+ /*
+ * Check streams on each core, make sure the same switch domain +
+ * group + queue doesn't get scheduled on other cores.
+ */
+ for (lc_id = 0; lc_id < nb_fc; lc_id++) {
+ sm_id = fwd_lcores[lc_id]->stream_idx;
+ nb_fs_per_lcore = fwd_lcores[lc_id]->stream_nb;
+ for (; sm_id < fwd_lcores[lc_id]->stream_idx + nb_fs_per_lcore;
+ sm_id++) {
+ fs = fwd_streams[sm_id];
+ /* Update lcore info stream being scheduled. */
+ fs->lcore = fwd_lcores[lc_id];
+ port = &ports[fs->rx_port];
+ dev_info = &port->dev_info;
+ rxq_conf = &port->rxq[fs->rx_queue].conf;
+ if ((dev_info->dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE)
+ == 0 || rxq_conf->share_group == 0)
+ /* Not shared rxq. */
+ continue;
+ /* Check shared rxq not scheduled on remaining cores. */
+ domain_id = port->dev_info.switch_info.domain_id;
+ if (fwd_stream_on_other_lcores(domain_id, lc_id,
+ fs->rx_port,
+ fs->rx_queue,
+ rxq_conf->share_group,
+ rxq_conf->share_qid))
+ return false;
+ }
+ }
+ return true;
+}
+
/*
* Setup forwarding configuration for each logical core.
*/
for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
fwd_lcores[lc_id]->stream_nb = 0;
fwd_lcores[lc_id]->stream_idx = sm_id;
- for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) {
+ for (i = 0; i < RTE_ETH_MAX_VMDQ_POOL; i++) {
/* if the nb_queue is zero, means this tc is
* not enabled on the POOL
*/
tx_pkt_times_intra = tx_times[1];
}
+#ifdef RTE_LIB_GRO
void
setup_gro(const char *onoff, portid_t port_id)
{
} else
printf("Port %u doesn't enable GRO.\n", port_id);
}
+#endif /* RTE_LIB_GRO */
+#ifdef RTE_LIB_GSO
void
setup_gso(const char *mode, portid_t port_id)
{
gso_ports[port_id].enable = 0;
}
}
+#endif /* RTE_LIB_GSO */
char*
list_pkt_forwarding_modes(void)
vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
if (on) {
- vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD;
- port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
+ vlan_offload |= RTE_ETH_VLAN_EXTEND_OFFLOAD;
+ port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
} else {
- vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD;
- port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
+ vlan_offload &= ~RTE_ETH_VLAN_EXTEND_OFFLOAD;
+ port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
}
diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
if (on) {
- vlan_offload |= ETH_VLAN_STRIP_OFFLOAD;
- port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ vlan_offload |= RTE_ETH_VLAN_STRIP_OFFLOAD;
+ port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
} else {
- vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD;
- port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+ vlan_offload &= ~RTE_ETH_VLAN_STRIP_OFFLOAD;
+ port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
}
diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
if (on) {
- vlan_offload |= ETH_VLAN_FILTER_OFFLOAD;
- port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+ vlan_offload |= RTE_ETH_VLAN_FILTER_OFFLOAD;
+ port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
} else {
- vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD;
- port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
+ vlan_offload &= ~RTE_ETH_VLAN_FILTER_OFFLOAD;
+ port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
}
diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
if (on) {
- vlan_offload |= ETH_QINQ_STRIP_OFFLOAD;
- port_rx_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
+ vlan_offload |= RTE_ETH_QINQ_STRIP_OFFLOAD;
+ port_rx_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
} else {
- vlan_offload &= ~ETH_QINQ_STRIP_OFFLOAD;
- port_rx_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
+ vlan_offload &= ~RTE_ETH_QINQ_STRIP_OFFLOAD;
+ port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
}
diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
return;
if (ports[port_id].dev_conf.txmode.offloads &
- DEV_TX_OFFLOAD_QINQ_INSERT) {
+ RTE_ETH_TX_OFFLOAD_QINQ_INSERT) {
fprintf(stderr, "Error, as QinQ has been enabled.\n");
return;
}
if (ret != 0)
return;
- if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) == 0) {
+ if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) == 0) {
fprintf(stderr,
"Error: vlan insert is not supported by port %d\n",
port_id);
}
tx_vlan_reset(port_id);
- ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_VLAN_INSERT;
+ ports[port_id].dev_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
ports[port_id].tx_vlan_id = vlan_id;
}
if (ret != 0)
return;
- if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) == 0) {
+ if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_QINQ_INSERT) == 0) {
fprintf(stderr,
"Error: qinq insert not supported by port %d\n",
port_id);
}
tx_vlan_reset(port_id);
- ports[port_id].dev_conf.txmode.offloads |= (DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_QINQ_INSERT);
+ ports[port_id].dev_conf.txmode.offloads |= (RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+ RTE_ETH_TX_OFFLOAD_QINQ_INSERT);
ports[port_id].tx_vlan_id = vlan_id;
ports[port_id].tx_vlan_id_outer = vlan_id_outer;
}
tx_vlan_reset(portid_t port_id)
{
ports[port_id].dev_conf.txmode.offloads &=
- ~(DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_QINQ_INSERT);
+ ~(RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+ RTE_ETH_TX_OFFLOAD_QINQ_INSERT);
ports[port_id].tx_vlan_id = 0;
ports[port_id].tx_vlan_id_outer = 0;
}
{"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP},
{"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER},
{"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD},
+ {"ipv6-ex", RTE_ETH_FLOW_IPV6_EX},
+ {"ipv6-tcp-ex", RTE_ETH_FLOW_IPV6_TCP_EX},
+ {"ipv6-udp-ex", RTE_ETH_FLOW_IPV6_UDP_EX},
{"port", RTE_ETH_FLOW_PORT},
{"vxlan", RTE_ETH_FLOW_VXLAN},
{"geneve", RTE_ETH_FLOW_GENEVE},
{"nvgre", RTE_ETH_FLOW_NVGRE},
{"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE},
+ {"gtpu", RTE_ETH_FLOW_GTPU},
};
for (i = 0; i < RTE_DIM(flowtype_str_table); i++) {
ret = eth_link_get_nowait_print_err(port_id, &link);
if (ret < 0)
return 1;
- if (link.link_speed != ETH_SPEED_NUM_UNKNOWN &&
+ if (link.link_speed != RTE_ETH_SPEED_NUM_UNKNOWN &&
rate > link.link_speed) {
fprintf(stderr,
"Invalid rate value:%u bigger than link speed: %u\n",
{
port->mc_addr_nb--;
if (addr_idx == port->mc_addr_nb) {
- /* No need to recompact the set of multicast addressses. */
+ /* No need to recompact the set of multicast addresses. */
if (port->mc_addr_nb == 0) {
/* free the pool of multicast addresses. */
free(port->mc_addr_pool);