#include <rte_eal.h>
#include <rte_per_lcore.h>
#include <rte_lcore.h>
-#include <rte_atomic.h>
#include <rte_branch_prediction.h>
#include <rte_mempool.h>
#include <rte_mbuf.h>
#ifdef RTE_NET_BNXT
#include <rte_pmd_bnxt.h>
#endif
+#ifdef RTE_LIB_GRO
#include <rte_gro.h>
+#endif
#include <rte_hexdump.h>
#include "testpmd.h"
};
const struct rss_type_info rss_type_table[] = {
- { "all", ETH_RSS_ETH | ETH_RSS_VLAN | ETH_RSS_IP | ETH_RSS_TCP |
- ETH_RSS_UDP | ETH_RSS_SCTP | ETH_RSS_L2_PAYLOAD |
- ETH_RSS_L2TPV3 | ETH_RSS_ESP | ETH_RSS_AH | ETH_RSS_PFCP |
- ETH_RSS_GTPU | ETH_RSS_ECPRI | ETH_RSS_MPLS},
+ { "all", RTE_ETH_RSS_ETH | RTE_ETH_RSS_VLAN | RTE_ETH_RSS_IP | RTE_ETH_RSS_TCP |
+ RTE_ETH_RSS_UDP | RTE_ETH_RSS_SCTP | RTE_ETH_RSS_L2_PAYLOAD |
+ RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_ESP | RTE_ETH_RSS_AH | RTE_ETH_RSS_PFCP |
+ RTE_ETH_RSS_GTPU | RTE_ETH_RSS_ECPRI | RTE_ETH_RSS_MPLS},
{ "none", 0 },
- { "eth", ETH_RSS_ETH },
- { "l2-src-only", ETH_RSS_L2_SRC_ONLY },
- { "l2-dst-only", ETH_RSS_L2_DST_ONLY },
- { "vlan", ETH_RSS_VLAN },
- { "s-vlan", ETH_RSS_S_VLAN },
- { "c-vlan", ETH_RSS_C_VLAN },
- { "ipv4", ETH_RSS_IPV4 },
- { "ipv4-frag", ETH_RSS_FRAG_IPV4 },
- { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP },
- { "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP },
- { "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP },
- { "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER },
- { "ipv6", ETH_RSS_IPV6 },
- { "ipv6-frag", ETH_RSS_FRAG_IPV6 },
- { "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP },
- { "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP },
- { "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP },
- { "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER },
- { "l2-payload", ETH_RSS_L2_PAYLOAD },
- { "ipv6-ex", ETH_RSS_IPV6_EX },
- { "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX },
- { "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX },
- { "port", ETH_RSS_PORT },
- { "vxlan", ETH_RSS_VXLAN },
- { "geneve", ETH_RSS_GENEVE },
- { "nvgre", ETH_RSS_NVGRE },
- { "ip", ETH_RSS_IP },
- { "udp", ETH_RSS_UDP },
- { "tcp", ETH_RSS_TCP },
- { "sctp", ETH_RSS_SCTP },
- { "tunnel", ETH_RSS_TUNNEL },
+ { "eth", RTE_ETH_RSS_ETH },
+ { "l2-src-only", RTE_ETH_RSS_L2_SRC_ONLY },
+ { "l2-dst-only", RTE_ETH_RSS_L2_DST_ONLY },
+ { "vlan", RTE_ETH_RSS_VLAN },
+ { "s-vlan", RTE_ETH_RSS_S_VLAN },
+ { "c-vlan", RTE_ETH_RSS_C_VLAN },
+ { "ipv4", RTE_ETH_RSS_IPV4 },
+ { "ipv4-frag", RTE_ETH_RSS_FRAG_IPV4 },
+ { "ipv4-tcp", RTE_ETH_RSS_NONFRAG_IPV4_TCP },
+ { "ipv4-udp", RTE_ETH_RSS_NONFRAG_IPV4_UDP },
+ { "ipv4-sctp", RTE_ETH_RSS_NONFRAG_IPV4_SCTP },
+ { "ipv4-other", RTE_ETH_RSS_NONFRAG_IPV4_OTHER },
+ { "ipv6", RTE_ETH_RSS_IPV6 },
+ { "ipv6-frag", RTE_ETH_RSS_FRAG_IPV6 },
+ { "ipv6-tcp", RTE_ETH_RSS_NONFRAG_IPV6_TCP },
+ { "ipv6-udp", RTE_ETH_RSS_NONFRAG_IPV6_UDP },
+ { "ipv6-sctp", RTE_ETH_RSS_NONFRAG_IPV6_SCTP },
+ { "ipv6-other", RTE_ETH_RSS_NONFRAG_IPV6_OTHER },
+ { "l2-payload", RTE_ETH_RSS_L2_PAYLOAD },
+ { "ipv6-ex", RTE_ETH_RSS_IPV6_EX },
+ { "ipv6-tcp-ex", RTE_ETH_RSS_IPV6_TCP_EX },
+ { "ipv6-udp-ex", RTE_ETH_RSS_IPV6_UDP_EX },
+ { "port", RTE_ETH_RSS_PORT },
+ { "vxlan", RTE_ETH_RSS_VXLAN },
+ { "geneve", RTE_ETH_RSS_GENEVE },
+ { "nvgre", RTE_ETH_RSS_NVGRE },
+ { "ip", RTE_ETH_RSS_IP },
+ { "udp", RTE_ETH_RSS_UDP },
+ { "tcp", RTE_ETH_RSS_TCP },
+ { "sctp", RTE_ETH_RSS_SCTP },
+ { "tunnel", RTE_ETH_RSS_TUNNEL },
{ "l3-pre32", RTE_ETH_RSS_L3_PRE32 },
{ "l3-pre40", RTE_ETH_RSS_L3_PRE40 },
{ "l3-pre48", RTE_ETH_RSS_L3_PRE48 },
{ "l3-pre56", RTE_ETH_RSS_L3_PRE56 },
{ "l3-pre64", RTE_ETH_RSS_L3_PRE64 },
{ "l3-pre96", RTE_ETH_RSS_L3_PRE96 },
- { "l3-src-only", ETH_RSS_L3_SRC_ONLY },
- { "l3-dst-only", ETH_RSS_L3_DST_ONLY },
- { "l4-src-only", ETH_RSS_L4_SRC_ONLY },
- { "l4-dst-only", ETH_RSS_L4_DST_ONLY },
- { "esp", ETH_RSS_ESP },
- { "ah", ETH_RSS_AH },
- { "l2tpv3", ETH_RSS_L2TPV3 },
- { "pfcp", ETH_RSS_PFCP },
- { "pppoe", ETH_RSS_PPPOE },
- { "gtpu", ETH_RSS_GTPU },
- { "ecpri", ETH_RSS_ECPRI },
- { "mpls", ETH_RSS_MPLS },
- { "ipv4-chksum", ETH_RSS_IPV4_CHKSUM },
- { "l4-chksum", ETH_RSS_L4_CHKSUM },
+ { "l3-src-only", RTE_ETH_RSS_L3_SRC_ONLY },
+ { "l3-dst-only", RTE_ETH_RSS_L3_DST_ONLY },
+ { "l4-src-only", RTE_ETH_RSS_L4_SRC_ONLY },
+ { "l4-dst-only", RTE_ETH_RSS_L4_DST_ONLY },
+ { "esp", RTE_ETH_RSS_ESP },
+ { "ah", RTE_ETH_RSS_AH },
+ { "l2tpv3", RTE_ETH_RSS_L2TPV3 },
+ { "pfcp", RTE_ETH_RSS_PFCP },
+ { "pppoe", RTE_ETH_RSS_PPPOE },
+ { "gtpu", RTE_ETH_RSS_GTPU },
+ { "ecpri", RTE_ETH_RSS_ECPRI },
+ { "mpls", RTE_ETH_RSS_MPLS },
+ { "ipv4-chksum", RTE_ETH_RSS_IPV4_CHKSUM },
+ { "l4-chksum", RTE_ETH_RSS_L4_CHKSUM },
{ NULL, 0 },
};
device_infos_display_speeds(uint32_t speed_capa)
{
printf("\n\tDevice speed capability:");
- if (speed_capa == ETH_LINK_SPEED_AUTONEG)
+ if (speed_capa == RTE_ETH_LINK_SPEED_AUTONEG)
printf(" Autonegotiate (all speeds)");
- if (speed_capa & ETH_LINK_SPEED_FIXED)
+ if (speed_capa & RTE_ETH_LINK_SPEED_FIXED)
printf(" Disable autonegotiate (fixed speed) ");
- if (speed_capa & ETH_LINK_SPEED_10M_HD)
+ if (speed_capa & RTE_ETH_LINK_SPEED_10M_HD)
printf(" 10 Mbps half-duplex ");
- if (speed_capa & ETH_LINK_SPEED_10M)
+ if (speed_capa & RTE_ETH_LINK_SPEED_10M)
printf(" 10 Mbps full-duplex ");
- if (speed_capa & ETH_LINK_SPEED_100M_HD)
+ if (speed_capa & RTE_ETH_LINK_SPEED_100M_HD)
printf(" 100 Mbps half-duplex ");
- if (speed_capa & ETH_LINK_SPEED_100M)
+ if (speed_capa & RTE_ETH_LINK_SPEED_100M)
printf(" 100 Mbps full-duplex ");
- if (speed_capa & ETH_LINK_SPEED_1G)
+ if (speed_capa & RTE_ETH_LINK_SPEED_1G)
printf(" 1 Gbps ");
- if (speed_capa & ETH_LINK_SPEED_2_5G)
+ if (speed_capa & RTE_ETH_LINK_SPEED_2_5G)
printf(" 2.5 Gbps ");
- if (speed_capa & ETH_LINK_SPEED_5G)
+ if (speed_capa & RTE_ETH_LINK_SPEED_5G)
printf(" 5 Gbps ");
- if (speed_capa & ETH_LINK_SPEED_10G)
+ if (speed_capa & RTE_ETH_LINK_SPEED_10G)
printf(" 10 Gbps ");
- if (speed_capa & ETH_LINK_SPEED_20G)
+ if (speed_capa & RTE_ETH_LINK_SPEED_20G)
printf(" 20 Gbps ");
- if (speed_capa & ETH_LINK_SPEED_25G)
+ if (speed_capa & RTE_ETH_LINK_SPEED_25G)
printf(" 25 Gbps ");
- if (speed_capa & ETH_LINK_SPEED_40G)
+ if (speed_capa & RTE_ETH_LINK_SPEED_40G)
printf(" 40 Gbps ");
- if (speed_capa & ETH_LINK_SPEED_50G)
+ if (speed_capa & RTE_ETH_LINK_SPEED_50G)
printf(" 50 Gbps ");
- if (speed_capa & ETH_LINK_SPEED_56G)
+ if (speed_capa & RTE_ETH_LINK_SPEED_56G)
printf(" 56 Gbps ");
- if (speed_capa & ETH_LINK_SPEED_100G)
+ if (speed_capa & RTE_ETH_LINK_SPEED_100G)
printf(" 100 Gbps ");
- if (speed_capa & ETH_LINK_SPEED_200G)
+ if (speed_capa & RTE_ETH_LINK_SPEED_200G)
printf(" 200 Gbps ");
}
printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down"));
printf("Link speed: %s\n", rte_eth_link_speed_to_str(link.link_speed));
- printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
+ printf("Link duplex: %s\n", (link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex"));
- printf("Autoneg status: %s\n", (link.link_autoneg == ETH_LINK_AUTONEG) ?
+ printf("Autoneg status: %s\n", (link.link_autoneg == RTE_ETH_LINK_AUTONEG) ?
("On") : ("Off"));
if (!rte_eth_dev_get_mtu(port_id, &mtu))
vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
if (vlan_offload >= 0){
printf("VLAN offload: \n");
- if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD)
+ if (vlan_offload & RTE_ETH_VLAN_STRIP_OFFLOAD)
printf(" strip on, ");
else
printf(" strip off, ");
- if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD)
+ if (vlan_offload & RTE_ETH_VLAN_FILTER_OFFLOAD)
printf("filter on, ");
else
printf("filter off, ");
- if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)
+ if (vlan_offload & RTE_ETH_VLAN_EXTEND_OFFLOAD)
printf("extend on, ");
else
printf("extend off, ");
- if (vlan_offload & ETH_QINQ_STRIP_OFFLOAD)
+ if (vlan_offload & RTE_ETH_QINQ_STRIP_OFFLOAD)
printf("qinq strip on\n");
else
printf("qinq strip off\n");
if (port_id_is_invalid(port_id, ENABLED_WARN))
return;
- diag = rte_eth_dev_set_mtu(port_id, mtu);
- if (diag != 0) {
- fprintf(stderr, "Set MTU failed. diag=%d\n", diag);
- return;
+ if (port->need_reconfig == 0) {
+ diag = rte_eth_dev_set_mtu(port_id, mtu);
+ if (diag != 0) {
+ fprintf(stderr, "Set MTU failed. diag=%d\n", diag);
+ return;
+ }
}
port->dev_conf.rxmode.mtu = mtu;
error->cause), buf) : "",
error->message ? error->message : "(no stated reason)",
rte_strerror(err));
+
+ switch (error->type) {
+ case RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER:
+ fprintf(stderr, "The status suggests the use of \"transfer\" "
+ "as the possible cause of the failure. Make "
+ "sure that the flow in question and its "
+ "indirect components (if any) are managed "
+ "via \"transfer\" proxy port. Use command "
+ "\"show port (port_id) flow transfer proxy\" "
+ "to figure out the proxy port ID\n");
+ break;
+ default:
+ break;
+ }
+
return -err;
}
struct port_indirect_action *pia;
int ret;
struct rte_flow_error error;
- struct rte_port *port;
-
- if (port_id_is_invalid(port_id, ENABLED_WARN) ||
- port_id == (portid_t)RTE_PORT_ALL)
- return -EINVAL;
ret = action_alloc(port_id, id, &pia);
if (ret)
return ret;
-
- port = &ports[port_id];
-
- if (conf->transfer)
- port_id = port->flow_transfer_proxy;
-
- if (port_id_is_invalid(port_id, ENABLED_WARN) ||
- port_id == (portid_t)RTE_PORT_ALL)
- return -EINVAL;
-
if (action->type == RTE_FLOW_ACTION_TYPE_AGE) {
struct rte_flow_action_age *age =
(struct rte_flow_action_age *)(uintptr_t)(action->conf);
return port_flow_complain(&error);
}
pia->type = action->type;
- pia->transfer = conf->transfer;
printf("Indirect action #%u created\n", pia->id);
return 0;
}
for (i = 0; i != n; ++i) {
struct rte_flow_error error;
struct port_indirect_action *pia = *tmp;
- portid_t port_id_eff = port_id;
if (actions[i] != pia->id)
continue;
-
- if (pia->transfer)
- port_id_eff = port->flow_transfer_proxy;
-
- if (port_id_is_invalid(port_id_eff, ENABLED_WARN) ||
- port_id_eff == (portid_t)RTE_PORT_ALL)
- return -EINVAL;
-
/*
* Poisoning to make sure PMDs update it in case
* of error.
memset(&error, 0x33, sizeof(error));
if (pia->handle && rte_flow_action_handle_destroy(
- port_id_eff, pia->handle, &error)) {
+ port_id, pia->handle, &error)) {
ret = port_flow_complain(&error);
continue;
}
struct rte_flow_error error;
struct rte_flow_action_handle *action_handle;
struct port_indirect_action *pia;
- struct rte_port *port;
const void *update;
- if (port_id_is_invalid(port_id, ENABLED_WARN) ||
- port_id == (portid_t)RTE_PORT_ALL)
- return -EINVAL;
-
- port = &ports[port_id];
-
action_handle = port_action_handle_get_by_id(port_id, id);
if (!action_handle)
return -EINVAL;
update = action;
break;
}
-
- if (pia->transfer)
- port_id = port->flow_transfer_proxy;
-
- if (port_id_is_invalid(port_id, ENABLED_WARN) ||
- port_id == (portid_t)RTE_PORT_ALL)
- return -EINVAL;
-
if (rte_flow_action_handle_update(port_id, action_handle, update,
&error)) {
return port_flow_complain(&error);
struct rte_flow_query_age age;
struct rte_flow_action_conntrack ct;
} query;
- portid_t port_id_eff = port_id;
- struct rte_port *port;
-
- if (port_id_is_invalid(port_id, ENABLED_WARN) ||
- port_id == (portid_t)RTE_PORT_ALL)
- return -EINVAL;
-
- port = &ports[port_id];
pia = action_get_by_id(port_id, id);
if (!pia)
id, pia->type, port_id);
return -ENOTSUP;
}
-
- if (pia->transfer)
- port_id_eff = port->flow_transfer_proxy;
-
- if (port_id_is_invalid(port_id_eff, ENABLED_WARN) ||
- port_id_eff == (portid_t)RTE_PORT_ALL)
- return -EINVAL;
-
/* Poisoning to make sure PMDs update it in case of error. */
memset(&error, 0x55, sizeof(error));
memset(&query, 0, sizeof(query));
- if (rte_flow_action_handle_query(port_id_eff, pia->handle, &query,
- &error))
+ if (rte_flow_action_handle_query(port_id, pia->handle, &query, &error))
return port_flow_complain(&error);
switch (pia->type) {
case RTE_FLOW_ACTION_TYPE_AGE:
{
struct rte_flow_error error;
struct port_flow_tunnel *pft = NULL;
- struct rte_port *port;
-
- if (port_id_is_invalid(port_id, ENABLED_WARN) ||
- port_id == (portid_t)RTE_PORT_ALL)
- return -EINVAL;
-
- port = &ports[port_id];
-
- if (attr->transfer)
- port_id = port->flow_transfer_proxy;
-
- if (port_id_is_invalid(port_id, ENABLED_WARN) ||
- port_id == (portid_t)RTE_PORT_ALL)
- return -EINVAL;
+ int ret;
/* Poisoning to make sure PMDs update it in case of error. */
memset(&error, 0x11, sizeof(error));
if (pft->actions)
actions = pft->actions;
}
- if (rte_flow_validate(port_id, attr, pattern, actions, &error))
- return port_flow_complain(&error);
+ ret = rte_flow_validate(port_id, attr, pattern, actions, &error);
if (tunnel_ops->enabled)
port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft);
+ if (ret)
+ return port_flow_complain(&error);
printf("Flow rule validated\n");
return 0;
}
struct port_flow_tunnel *pft = NULL;
struct rte_flow_action_age *age = age_action_get(actions);
- if (port_id_is_invalid(port_id, ENABLED_WARN) ||
- port_id == (portid_t)RTE_PORT_ALL)
- return -EINVAL;
-
port = &ports[port_id];
-
- if (attr->transfer)
- port_id = port->flow_transfer_proxy;
-
- if (port_id_is_invalid(port_id, ENABLED_WARN) ||
- port_id == (portid_t)RTE_PORT_ALL)
- return -EINVAL;
-
if (port->flow_list) {
if (port->flow_list->id == UINT32_MAX) {
fprintf(stderr,
uint32_t i;
for (i = 0; i != n; ++i) {
- portid_t port_id_eff = port_id;
struct rte_flow_error error;
struct port_flow *pf = *tmp;
* of error.
*/
memset(&error, 0x33, sizeof(error));
-
- if (pf->rule.attr->transfer)
- port_id_eff = port->flow_transfer_proxy;
-
- if (port_id_is_invalid(port_id_eff, ENABLED_WARN) ||
- port_id_eff == (portid_t)RTE_PORT_ALL)
- return -EINVAL;
-
- if (rte_flow_destroy(port_id_eff, pf->flow, &error)) {
+ if (rte_flow_destroy(port_id, pf->flow, &error)) {
ret = port_flow_complain(&error);
continue;
}
fprintf(stderr, "Flow rule #%u not found\n", rule);
return -ENOENT;
}
-
- if (pf->rule.attr->transfer)
- port_id = port->flow_transfer_proxy;
-
- if (port_id_is_invalid(port_id, ENABLED_WARN) ||
- port_id == (portid_t)RTE_PORT_ALL)
- return -EINVAL;
-
ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR,
&name, sizeof(name),
(void *)(uintptr_t)action->type, &error);
printf(" RX threshold registers: pthresh=%d hthresh=%d "
" wthresh=%d\n",
pthresh_tmp, hthresh_tmp, wthresh_tmp);
- printf(" RX Offloads=0x%"PRIx64"\n", offloads_tmp);
+ printf(" RX Offloads=0x%"PRIx64, offloads_tmp);
+ if (rx_conf->share_group > 0)
+ printf(" share_group=%u share_qid=%u",
+ rx_conf->share_group,
+ rx_conf->share_qid);
+ printf("\n");
}
/* per tx queue config only for first queue to be less verbose */
}
for (i = 0; i < nb_entries; i++) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
if (!(reta_conf[idx].mask & (1ULL << shift)))
continue;
printf("RSS RETA configuration: hash index=%u, queue=%u\n",
}
/*
- * Displays the RSS hash functions of a port, and, optionaly, the RSS hash
+ * Displays the RSS hash functions of a port, and, optionally, the RSS hash
* key of the port.
*/
void
}
printf("RSS functions:\n ");
for (i = 0; rss_type_table[i].str; i++) {
- if (rss_hf & rss_type_table[i].rss_type)
+ if (rss_type_table[i].rss_type == 0)
+ continue;
+ if ((rss_hf & rss_type_table[i].rss_type) == rss_type_table[i].rss_type)
printf("%s ", rss_type_table[i].str);
}
printf("\n");
unsigned int i;
rss_conf.rss_key = NULL;
- rss_conf.rss_key_len = hash_key_len;
+ rss_conf.rss_key_len = 0;
rss_conf.rss_hf = 0;
for (i = 0; rss_type_table[i].str; i++) {
if (!strcmp(rss_type_table[i].str, rss_type))
diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
if (diag == 0) {
rss_conf.rss_key = hash_key;
+ rss_conf.rss_key_len = hash_key_len;
diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf);
}
if (diag == 0)
}
}
+/*
+ * Check whether a shared rxq scheduled on other lcores.
+ */
+static bool
+fwd_stream_on_other_lcores(uint16_t domain_id, lcoreid_t src_lc,
+ portid_t src_port, queueid_t src_rxq,
+ uint32_t share_group, queueid_t share_rxq)
+{
+ streamid_t sm_id;
+ streamid_t nb_fs_per_lcore;
+ lcoreid_t nb_fc;
+ lcoreid_t lc_id;
+ struct fwd_stream *fs;
+ struct rte_port *port;
+ struct rte_eth_dev_info *dev_info;
+ struct rte_eth_rxconf *rxq_conf;
+
+ nb_fc = cur_fwd_config.nb_fwd_lcores;
+ /* Check remaining cores. */
+ for (lc_id = src_lc + 1; lc_id < nb_fc; lc_id++) {
+ sm_id = fwd_lcores[lc_id]->stream_idx;
+ nb_fs_per_lcore = fwd_lcores[lc_id]->stream_nb;
+ for (; sm_id < fwd_lcores[lc_id]->stream_idx + nb_fs_per_lcore;
+ sm_id++) {
+ fs = fwd_streams[sm_id];
+ port = &ports[fs->rx_port];
+ dev_info = &port->dev_info;
+ rxq_conf = &port->rx_conf[fs->rx_queue];
+ if ((dev_info->dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE)
+ == 0 || rxq_conf->share_group == 0)
+ /* Not shared rxq. */
+ continue;
+ if (domain_id != port->dev_info.switch_info.domain_id)
+ continue;
+ if (rxq_conf->share_group != share_group)
+ continue;
+ if (rxq_conf->share_qid != share_rxq)
+ continue;
+ printf("Shared Rx queue group %u queue %hu can't be scheduled on different cores:\n",
+ share_group, share_rxq);
+ printf(" lcore %hhu Port %hu queue %hu\n",
+ src_lc, src_port, src_rxq);
+ printf(" lcore %hhu Port %hu queue %hu\n",
+ lc_id, fs->rx_port, fs->rx_queue);
+ printf("Please use --nb-cores=%hu to limit number of forwarding cores\n",
+ nb_rxq);
+ return true;
+ }
+ }
+ return false;
+}
+
+/*
+ * Check shared rxq configuration.
+ *
+ * Shared group must not being scheduled on different core.
+ */
+bool
+pkt_fwd_shared_rxq_check(void)
+{
+ streamid_t sm_id;
+ streamid_t nb_fs_per_lcore;
+ lcoreid_t nb_fc;
+ lcoreid_t lc_id;
+ struct fwd_stream *fs;
+ uint16_t domain_id;
+ struct rte_port *port;
+ struct rte_eth_dev_info *dev_info;
+ struct rte_eth_rxconf *rxq_conf;
+
+ if (rxq_share == 0)
+ return true;
+ nb_fc = cur_fwd_config.nb_fwd_lcores;
+ /*
+ * Check streams on each core, make sure the same switch domain +
+ * group + queue doesn't get scheduled on other cores.
+ */
+ for (lc_id = 0; lc_id < nb_fc; lc_id++) {
+ sm_id = fwd_lcores[lc_id]->stream_idx;
+ nb_fs_per_lcore = fwd_lcores[lc_id]->stream_nb;
+ for (; sm_id < fwd_lcores[lc_id]->stream_idx + nb_fs_per_lcore;
+ sm_id++) {
+ fs = fwd_streams[sm_id];
+ /* Update lcore info stream being scheduled. */
+ fs->lcore = fwd_lcores[lc_id];
+ port = &ports[fs->rx_port];
+ dev_info = &port->dev_info;
+ rxq_conf = &port->rx_conf[fs->rx_queue];
+ if ((dev_info->dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE)
+ == 0 || rxq_conf->share_group == 0)
+ /* Not shared rxq. */
+ continue;
+ /* Check shared rxq not scheduled on remaining cores. */
+ domain_id = port->dev_info.switch_info.domain_id;
+ if (fwd_stream_on_other_lcores(domain_id, lc_id,
+ fs->rx_port,
+ fs->rx_queue,
+ rxq_conf->share_group,
+ rxq_conf->share_qid))
+ return false;
+ }
+ }
+ return true;
+}
+
/*
* Setup forwarding configuration for each logical core.
*/
for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
fwd_lcores[lc_id]->stream_nb = 0;
fwd_lcores[lc_id]->stream_idx = sm_id;
- for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) {
+ for (i = 0; i < RTE_ETH_MAX_VMDQ_POOL; i++) {
/* if the nb_queue is zero, means this tc is
* not enabled on the POOL
*/
tx_pkt_times_intra = tx_times[1];
}
+#ifdef RTE_LIB_GRO
void
setup_gro(const char *onoff, portid_t port_id)
{
} else
printf("Port %u doesn't enable GRO.\n", port_id);
}
+#endif /* RTE_LIB_GRO */
+#ifdef RTE_LIB_GSO
void
setup_gso(const char *mode, portid_t port_id)
{
gso_ports[port_id].enable = 0;
}
}
+#endif /* RTE_LIB_GSO */
char*
list_pkt_forwarding_modes(void)
vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
if (on) {
- vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD;
- port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
+ vlan_offload |= RTE_ETH_VLAN_EXTEND_OFFLOAD;
+ port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
} else {
- vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD;
- port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
+ vlan_offload &= ~RTE_ETH_VLAN_EXTEND_OFFLOAD;
+ port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
}
diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
if (on) {
- vlan_offload |= ETH_VLAN_STRIP_OFFLOAD;
- port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ vlan_offload |= RTE_ETH_VLAN_STRIP_OFFLOAD;
+ port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
} else {
- vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD;
- port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+ vlan_offload &= ~RTE_ETH_VLAN_STRIP_OFFLOAD;
+ port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
}
diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
if (on) {
- vlan_offload |= ETH_VLAN_FILTER_OFFLOAD;
- port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+ vlan_offload |= RTE_ETH_VLAN_FILTER_OFFLOAD;
+ port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
} else {
- vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD;
- port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
+ vlan_offload &= ~RTE_ETH_VLAN_FILTER_OFFLOAD;
+ port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
}
diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
if (on) {
- vlan_offload |= ETH_QINQ_STRIP_OFFLOAD;
- port_rx_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
+ vlan_offload |= RTE_ETH_QINQ_STRIP_OFFLOAD;
+ port_rx_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
} else {
- vlan_offload &= ~ETH_QINQ_STRIP_OFFLOAD;
- port_rx_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
+ vlan_offload &= ~RTE_ETH_QINQ_STRIP_OFFLOAD;
+ port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
}
diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
return;
if (ports[port_id].dev_conf.txmode.offloads &
- DEV_TX_OFFLOAD_QINQ_INSERT) {
+ RTE_ETH_TX_OFFLOAD_QINQ_INSERT) {
fprintf(stderr, "Error, as QinQ has been enabled.\n");
return;
}
if (ret != 0)
return;
- if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) == 0) {
+ if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) == 0) {
fprintf(stderr,
"Error: vlan insert is not supported by port %d\n",
port_id);
}
tx_vlan_reset(port_id);
- ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_VLAN_INSERT;
+ ports[port_id].dev_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
ports[port_id].tx_vlan_id = vlan_id;
}
if (ret != 0)
return;
- if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) == 0) {
+ if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_QINQ_INSERT) == 0) {
fprintf(stderr,
"Error: qinq insert not supported by port %d\n",
port_id);
}
tx_vlan_reset(port_id);
- ports[port_id].dev_conf.txmode.offloads |= (DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_QINQ_INSERT);
+ ports[port_id].dev_conf.txmode.offloads |= (RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+ RTE_ETH_TX_OFFLOAD_QINQ_INSERT);
ports[port_id].tx_vlan_id = vlan_id;
ports[port_id].tx_vlan_id_outer = vlan_id_outer;
}
tx_vlan_reset(portid_t port_id)
{
ports[port_id].dev_conf.txmode.offloads &=
- ~(DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_QINQ_INSERT);
+ ~(RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+ RTE_ETH_TX_OFFLOAD_QINQ_INSERT);
ports[port_id].tx_vlan_id = 0;
ports[port_id].tx_vlan_id_outer = 0;
}
{"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP},
{"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER},
{"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD},
+ {"ipv6-ex", RTE_ETH_FLOW_IPV6_EX},
+ {"ipv6-tcp-ex", RTE_ETH_FLOW_IPV6_TCP_EX},
+ {"ipv6-udp-ex", RTE_ETH_FLOW_IPV6_UDP_EX},
{"port", RTE_ETH_FLOW_PORT},
{"vxlan", RTE_ETH_FLOW_VXLAN},
{"geneve", RTE_ETH_FLOW_GENEVE},
{"nvgre", RTE_ETH_FLOW_NVGRE},
{"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE},
+ {"gtpu", RTE_ETH_FLOW_GTPU},
};
for (i = 0; i < RTE_DIM(flowtype_str_table); i++) {
ret = eth_link_get_nowait_print_err(port_id, &link);
if (ret < 0)
return 1;
- if (link.link_speed != ETH_SPEED_NUM_UNKNOWN &&
+ if (link.link_speed != RTE_ETH_SPEED_NUM_UNKNOWN &&
rate > link.link_speed) {
fprintf(stderr,
"Invalid rate value:%u bigger than link speed: %u\n",
{
port->mc_addr_nb--;
if (addr_idx == port->mc_addr_nb) {
- /* No need to recompact the set of multicast addressses. */
+ /* No need to recompact the set of multicast addresses. */
if (port->mc_addr_nb == 0) {
/* free the pool of multicast addresses. */
free(port->mc_addr_pool);