}
ret = rte_eth_dev_flow_ctrl_get(i, &fc_conf);
- if (ret == 0 && fc_conf.mode != RTE_FC_NONE) {
+ if (ret == 0 && fc_conf.mode != RTE_ETH_FC_NONE) {
printf("\t -- flow control mode %s%s high %u low %u pause %u%s%s\n",
- fc_conf.mode == RTE_FC_RX_PAUSE ? "rx " :
- fc_conf.mode == RTE_FC_TX_PAUSE ? "tx " :
- fc_conf.mode == RTE_FC_FULL ? "full" : "???",
+ fc_conf.mode == RTE_ETH_FC_RX_PAUSE ? "rx " :
+ fc_conf.mode == RTE_ETH_FC_TX_PAUSE ? "tx " :
+ fc_conf.mode == RTE_ETH_FC_FULL ? "full" : "???",
fc_conf.autoneg ? " auto" : "",
fc_conf.high_water,
fc_conf.low_water,
struct test_perf *t = evt_test_priv(test);
struct rte_eth_conf port_conf = {
.rxmode = {
- .mq_mode = ETH_MQ_RX_RSS,
+ .mq_mode = RTE_ETH_MQ_RX_RSS,
.split_hdr_size = 0,
},
.rx_adv_conf = {
.rss_conf = {
.rss_key = NULL,
- .rss_hf = ETH_RSS_IP,
+ .rss_hf = RTE_ETH_RSS_IP,
},
},
};
struct rte_eth_rxconf rx_conf;
struct rte_eth_conf port_conf = {
.rxmode = {
- .mq_mode = ETH_MQ_RX_RSS,
+ .mq_mode = RTE_ETH_MQ_RX_RSS,
},
.rx_adv_conf = {
.rss_conf = {
.rss_key = NULL,
- .rss_hf = ETH_RSS_IP,
+ .rss_hf = RTE_ETH_RSS_IP,
},
},
};
if (!(caps & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT))
local_port_conf.rxmode.offloads |=
- DEV_RX_OFFLOAD_RSS_HASH;
+ RTE_ETH_RX_OFFLOAD_RSS_HASH;
ret = rte_eth_dev_info_get(i, &dev_info);
if (ret != 0) {
}
/* Enable mbuf fast free if PMD has the capability. */
- if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
local_port_conf.txmode.offloads |=
- DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
rx_conf = dev_info.default_rxconf;
rx_conf.offloads = port_conf.rxmode.offloads;
#define FLOW_ITEM_MASK(_x) (UINT64_C(1) << _x)
#define FLOW_ACTION_MASK(_x) (UINT64_C(1) << _x)
#define FLOW_ATTR_MASK(_x) (UINT64_C(1) << _x)
-#define GET_RSS_HF() (ETH_RSS_IP)
+#define GET_RSS_HF() (RTE_ETH_RSS_IP)
/* Configuration */
#define RXQ_NUM 4
static struct rte_eth_conf port_conf = {
.rxmode = {
.split_hdr_size = 0,
- .offloads = DEV_RX_OFFLOAD_CHECKSUM,
+ .offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
},
.rx_adv_conf = {
.rss_conf = {
.rss_key = NULL,
- .rss_hf = ETH_RSS_IP,
+ .rss_hf = RTE_ETH_RSS_IP,
},
},
.txmode = {
- .mq_mode = ETH_MQ_TX_NONE,
+ .mq_mode = RTE_ETH_MQ_TX_NONE,
},
};
RTE_LOG(INFO, USER1, "Port %u %s\n",
port,
link_status_text);
- if (link.link_status == ETH_LINK_DOWN)
+ if (link.link_status == RTE_ETH_LINK_DOWN)
all_ports_up = 0;
}
int duplex;
if (!strcmp(duplexstr, "half")) {
- duplex = ETH_LINK_HALF_DUPLEX;
+ duplex = RTE_ETH_LINK_HALF_DUPLEX;
} else if (!strcmp(duplexstr, "full")) {
- duplex = ETH_LINK_FULL_DUPLEX;
+ duplex = RTE_ETH_LINK_FULL_DUPLEX;
} else if (!strcmp(duplexstr, "auto")) {
- duplex = ETH_LINK_FULL_DUPLEX;
+ duplex = RTE_ETH_LINK_FULL_DUPLEX;
} else {
fprintf(stderr, "Unknown duplex parameter\n");
return -1;
}
if (!strcmp(speedstr, "10")) {
- *speed = (duplex == ETH_LINK_HALF_DUPLEX) ?
- ETH_LINK_SPEED_10M_HD : ETH_LINK_SPEED_10M;
+ *speed = (duplex == RTE_ETH_LINK_HALF_DUPLEX) ?
+ RTE_ETH_LINK_SPEED_10M_HD : RTE_ETH_LINK_SPEED_10M;
} else if (!strcmp(speedstr, "100")) {
- *speed = (duplex == ETH_LINK_HALF_DUPLEX) ?
- ETH_LINK_SPEED_100M_HD : ETH_LINK_SPEED_100M;
+ *speed = (duplex == RTE_ETH_LINK_HALF_DUPLEX) ?
+ RTE_ETH_LINK_SPEED_100M_HD : RTE_ETH_LINK_SPEED_100M;
} else {
- if (duplex != ETH_LINK_FULL_DUPLEX) {
+ if (duplex != RTE_ETH_LINK_FULL_DUPLEX) {
fprintf(stderr, "Invalid speed/duplex parameters\n");
return -1;
}
if (!strcmp(speedstr, "1000")) {
- *speed = ETH_LINK_SPEED_1G;
+ *speed = RTE_ETH_LINK_SPEED_1G;
} else if (!strcmp(speedstr, "10000")) {
- *speed = ETH_LINK_SPEED_10G;
+ *speed = RTE_ETH_LINK_SPEED_10G;
} else if (!strcmp(speedstr, "25000")) {
- *speed = ETH_LINK_SPEED_25G;
+ *speed = RTE_ETH_LINK_SPEED_25G;
} else if (!strcmp(speedstr, "40000")) {
- *speed = ETH_LINK_SPEED_40G;
+ *speed = RTE_ETH_LINK_SPEED_40G;
} else if (!strcmp(speedstr, "50000")) {
- *speed = ETH_LINK_SPEED_50G;
+ *speed = RTE_ETH_LINK_SPEED_50G;
} else if (!strcmp(speedstr, "100000")) {
- *speed = ETH_LINK_SPEED_100G;
+ *speed = RTE_ETH_LINK_SPEED_100G;
} else if (!strcmp(speedstr, "200000")) {
- *speed = ETH_LINK_SPEED_200G;
+ *speed = RTE_ETH_LINK_SPEED_200G;
} else if (!strcmp(speedstr, "auto")) {
- *speed = ETH_LINK_SPEED_AUTONEG;
+ *speed = RTE_ETH_LINK_SPEED_AUTONEG;
} else {
fprintf(stderr, "Unknown speed parameter\n");
return -1;
}
}
- if (*speed != ETH_LINK_SPEED_AUTONEG)
- *speed |= ETH_LINK_SPEED_FIXED;
+ if (*speed != RTE_ETH_LINK_SPEED_AUTONEG)
+ *speed |= RTE_ETH_LINK_SPEED_FIXED;
return 0;
}
int ret;
if (!strcmp(res->value, "all"))
- rss_conf.rss_hf = ETH_RSS_ETH | ETH_RSS_VLAN | ETH_RSS_IP |
- ETH_RSS_TCP | ETH_RSS_UDP | ETH_RSS_SCTP |
- ETH_RSS_L2_PAYLOAD | ETH_RSS_L2TPV3 | ETH_RSS_ESP |
- ETH_RSS_AH | ETH_RSS_PFCP | ETH_RSS_GTPU |
- ETH_RSS_ECPRI;
+ rss_conf.rss_hf = RTE_ETH_RSS_ETH | RTE_ETH_RSS_VLAN | RTE_ETH_RSS_IP |
+ RTE_ETH_RSS_TCP | RTE_ETH_RSS_UDP | RTE_ETH_RSS_SCTP |
+ RTE_ETH_RSS_L2_PAYLOAD | RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_ESP |
+ RTE_ETH_RSS_AH | RTE_ETH_RSS_PFCP | RTE_ETH_RSS_GTPU |
+ RTE_ETH_RSS_ECPRI;
else if (!strcmp(res->value, "eth"))
- rss_conf.rss_hf = ETH_RSS_ETH;
+ rss_conf.rss_hf = RTE_ETH_RSS_ETH;
else if (!strcmp(res->value, "vlan"))
- rss_conf.rss_hf = ETH_RSS_VLAN;
+ rss_conf.rss_hf = RTE_ETH_RSS_VLAN;
else if (!strcmp(res->value, "ip"))
- rss_conf.rss_hf = ETH_RSS_IP;
+ rss_conf.rss_hf = RTE_ETH_RSS_IP;
else if (!strcmp(res->value, "udp"))
- rss_conf.rss_hf = ETH_RSS_UDP;
+ rss_conf.rss_hf = RTE_ETH_RSS_UDP;
else if (!strcmp(res->value, "tcp"))
- rss_conf.rss_hf = ETH_RSS_TCP;
+ rss_conf.rss_hf = RTE_ETH_RSS_TCP;
else if (!strcmp(res->value, "sctp"))
- rss_conf.rss_hf = ETH_RSS_SCTP;
+ rss_conf.rss_hf = RTE_ETH_RSS_SCTP;
else if (!strcmp(res->value, "ether"))
- rss_conf.rss_hf = ETH_RSS_L2_PAYLOAD;
+ rss_conf.rss_hf = RTE_ETH_RSS_L2_PAYLOAD;
else if (!strcmp(res->value, "port"))
- rss_conf.rss_hf = ETH_RSS_PORT;
+ rss_conf.rss_hf = RTE_ETH_RSS_PORT;
else if (!strcmp(res->value, "vxlan"))
- rss_conf.rss_hf = ETH_RSS_VXLAN;
+ rss_conf.rss_hf = RTE_ETH_RSS_VXLAN;
else if (!strcmp(res->value, "geneve"))
- rss_conf.rss_hf = ETH_RSS_GENEVE;
+ rss_conf.rss_hf = RTE_ETH_RSS_GENEVE;
else if (!strcmp(res->value, "nvgre"))
- rss_conf.rss_hf = ETH_RSS_NVGRE;
+ rss_conf.rss_hf = RTE_ETH_RSS_NVGRE;
else if (!strcmp(res->value, "l3-pre32"))
rss_conf.rss_hf = RTE_ETH_RSS_L3_PRE32;
else if (!strcmp(res->value, "l3-pre40"))
else if (!strcmp(res->value, "l3-pre96"))
rss_conf.rss_hf = RTE_ETH_RSS_L3_PRE96;
else if (!strcmp(res->value, "l3-src-only"))
- rss_conf.rss_hf = ETH_RSS_L3_SRC_ONLY;
+ rss_conf.rss_hf = RTE_ETH_RSS_L3_SRC_ONLY;
else if (!strcmp(res->value, "l3-dst-only"))
- rss_conf.rss_hf = ETH_RSS_L3_DST_ONLY;
+ rss_conf.rss_hf = RTE_ETH_RSS_L3_DST_ONLY;
else if (!strcmp(res->value, "l4-src-only"))
- rss_conf.rss_hf = ETH_RSS_L4_SRC_ONLY;
+ rss_conf.rss_hf = RTE_ETH_RSS_L4_SRC_ONLY;
else if (!strcmp(res->value, "l4-dst-only"))
- rss_conf.rss_hf = ETH_RSS_L4_DST_ONLY;
+ rss_conf.rss_hf = RTE_ETH_RSS_L4_DST_ONLY;
else if (!strcmp(res->value, "l2-src-only"))
- rss_conf.rss_hf = ETH_RSS_L2_SRC_ONLY;
+ rss_conf.rss_hf = RTE_ETH_RSS_L2_SRC_ONLY;
else if (!strcmp(res->value, "l2-dst-only"))
- rss_conf.rss_hf = ETH_RSS_L2_DST_ONLY;
+ rss_conf.rss_hf = RTE_ETH_RSS_L2_DST_ONLY;
else if (!strcmp(res->value, "l2tpv3"))
- rss_conf.rss_hf = ETH_RSS_L2TPV3;
+ rss_conf.rss_hf = RTE_ETH_RSS_L2TPV3;
else if (!strcmp(res->value, "esp"))
- rss_conf.rss_hf = ETH_RSS_ESP;
+ rss_conf.rss_hf = RTE_ETH_RSS_ESP;
else if (!strcmp(res->value, "ah"))
- rss_conf.rss_hf = ETH_RSS_AH;
+ rss_conf.rss_hf = RTE_ETH_RSS_AH;
else if (!strcmp(res->value, "pfcp"))
- rss_conf.rss_hf = ETH_RSS_PFCP;
+ rss_conf.rss_hf = RTE_ETH_RSS_PFCP;
else if (!strcmp(res->value, "pppoe"))
- rss_conf.rss_hf = ETH_RSS_PPPOE;
+ rss_conf.rss_hf = RTE_ETH_RSS_PPPOE;
else if (!strcmp(res->value, "gtpu"))
- rss_conf.rss_hf = ETH_RSS_GTPU;
+ rss_conf.rss_hf = RTE_ETH_RSS_GTPU;
else if (!strcmp(res->value, "ecpri"))
- rss_conf.rss_hf = ETH_RSS_ECPRI;
+ rss_conf.rss_hf = RTE_ETH_RSS_ECPRI;
else if (!strcmp(res->value, "mpls"))
- rss_conf.rss_hf = ETH_RSS_MPLS;
+ rss_conf.rss_hf = RTE_ETH_RSS_MPLS;
else if (!strcmp(res->value, "ipv4-chksum"))
- rss_conf.rss_hf = ETH_RSS_IPV4_CHKSUM;
+ rss_conf.rss_hf = RTE_ETH_RSS_IPV4_CHKSUM;
else if (!strcmp(res->value, "none"))
rss_conf.rss_hf = 0;
else if (!strcmp(res->value, "level-default")) {
- rss_hf &= (~ETH_RSS_LEVEL_MASK);
- rss_conf.rss_hf = (rss_hf | ETH_RSS_LEVEL_PMD_DEFAULT);
+ rss_hf &= (~RTE_ETH_RSS_LEVEL_MASK);
+ rss_conf.rss_hf = (rss_hf | RTE_ETH_RSS_LEVEL_PMD_DEFAULT);
} else if (!strcmp(res->value, "level-outer")) {
- rss_hf &= (~ETH_RSS_LEVEL_MASK);
- rss_conf.rss_hf = (rss_hf | ETH_RSS_LEVEL_OUTERMOST);
+ rss_hf &= (~RTE_ETH_RSS_LEVEL_MASK);
+ rss_conf.rss_hf = (rss_hf | RTE_ETH_RSS_LEVEL_OUTERMOST);
} else if (!strcmp(res->value, "level-inner")) {
- rss_hf &= (~ETH_RSS_LEVEL_MASK);
- rss_conf.rss_hf = (rss_hf | ETH_RSS_LEVEL_INNERMOST);
+ rss_hf &= (~RTE_ETH_RSS_LEVEL_MASK);
+ rss_conf.rss_hf = (rss_hf | RTE_ETH_RSS_LEVEL_INNERMOST);
} else if (!strcmp(res->value, "default"))
use_default = 1;
else if (isdigit(res->value[0]) && atoi(res->value) > 0 &&
return -1;
}
- idx = hash_index / RTE_RETA_GROUP_SIZE;
- shift = hash_index % RTE_RETA_GROUP_SIZE;
+ idx = hash_index / RTE_ETH_RETA_GROUP_SIZE;
+ shift = hash_index % RTE_ETH_RETA_GROUP_SIZE;
reta_conf[idx].mask |= (1ULL << shift);
reta_conf[idx].reta[shift] = nb_queue;
}
} else
printf("The reta size of port %d is %u\n",
res->port_id, dev_info.reta_size);
- if (dev_info.reta_size > ETH_RSS_RETA_SIZE_512) {
+ if (dev_info.reta_size > RTE_ETH_RSS_RETA_SIZE_512) {
fprintf(stderr,
"Currently do not support more than %u entries of redirection table\n",
- ETH_RSS_RETA_SIZE_512);
+ RTE_ETH_RSS_RETA_SIZE_512);
return;
}
char *end;
char *str_fld[8];
uint16_t i;
- uint16_t num = (nb_entries + RTE_RETA_GROUP_SIZE - 1) /
- RTE_RETA_GROUP_SIZE;
+ uint16_t num = (nb_entries + RTE_ETH_RETA_GROUP_SIZE - 1) /
+ RTE_ETH_RETA_GROUP_SIZE;
int ret;
p = strchr(p0, '(');
if (ret != 0)
return;
- max_reta_size = RTE_MIN(dev_info.reta_size, ETH_RSS_RETA_SIZE_512);
+ max_reta_size = RTE_MIN(dev_info.reta_size, RTE_ETH_RSS_RETA_SIZE_512);
if (res->size == 0 || res->size > max_reta_size) {
fprintf(stderr, "Invalid redirection table size: %u (1-%u)\n",
res->size, max_reta_size);
return;
}
- if ((res->num_tcs != ETH_4_TCS) && (res->num_tcs != ETH_8_TCS)) {
+ if ((res->num_tcs != RTE_ETH_4_TCS) && (res->num_tcs != RTE_ETH_8_TCS)) {
fprintf(stderr,
"The invalid number of traffic class, only 4 or 8 allowed.\n");
return;
enum rte_vlan_type vlan_type;
if (!strcmp(res->vlan_type, "inner"))
- vlan_type = ETH_VLAN_TYPE_INNER;
+ vlan_type = RTE_ETH_VLAN_TYPE_INNER;
else if (!strcmp(res->vlan_type, "outer"))
- vlan_type = ETH_VLAN_TYPE_OUTER;
+ vlan_type = RTE_ETH_VLAN_TYPE_OUTER;
else {
fprintf(stderr, "Unknown vlan type\n");
return;
printf("Parse tunnel is %s\n",
(ports[port_id].parse_tunnel) ? "on" : "off");
printf("IP checksum offload is %s\n",
- (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) ? "hw" : "sw");
+ (tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) ? "hw" : "sw");
printf("UDP checksum offload is %s\n",
- (tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) ? "hw" : "sw");
+ (tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) ? "hw" : "sw");
printf("TCP checksum offload is %s\n",
- (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) ? "hw" : "sw");
+ (tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) ? "hw" : "sw");
printf("SCTP checksum offload is %s\n",
- (tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) ? "hw" : "sw");
+ (tx_offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) ? "hw" : "sw");
printf("Outer-Ip checksum offload is %s\n",
- (tx_offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ? "hw" : "sw");
+ (tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) ? "hw" : "sw");
printf("Outer-Udp checksum offload is %s\n",
- (tx_offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) ? "hw" : "sw");
+ (tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM) ? "hw" : "sw");
/* display warnings if configuration is not supported by the NIC */
ret = eth_dev_info_get_print_err(port_id, &dev_info);
if (ret != 0)
return;
- if ((tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) &&
- (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) == 0) {
+ if ((tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) &&
+ (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) == 0) {
fprintf(stderr,
"Warning: hardware IP checksum enabled but not supported by port %d\n",
port_id);
}
- if ((tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) &&
- (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) == 0) {
+ if ((tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) &&
+ (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) == 0) {
fprintf(stderr,
"Warning: hardware UDP checksum enabled but not supported by port %d\n",
port_id);
}
- if ((tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) &&
- (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) == 0) {
+ if ((tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) &&
+ (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) == 0) {
fprintf(stderr,
"Warning: hardware TCP checksum enabled but not supported by port %d\n",
port_id);
}
- if ((tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) &&
- (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM) == 0) {
+ if ((tx_offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) &&
+ (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) == 0) {
fprintf(stderr,
"Warning: hardware SCTP checksum enabled but not supported by port %d\n",
port_id);
}
- if ((tx_offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) &&
- (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) == 0) {
+ if ((tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) &&
+ (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) == 0) {
fprintf(stderr,
"Warning: hardware outer IP checksum enabled but not supported by port %d\n",
port_id);
}
- if ((tx_offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) &&
- (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
+ if ((tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM) &&
+ (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
== 0) {
fprintf(stderr,
"Warning: hardware outer UDP checksum enabled but not supported by port %d\n",
if (!strcmp(res->proto, "ip")) {
if (hw == 0 || (dev_info.tx_offload_capa &
- DEV_TX_OFFLOAD_IPV4_CKSUM)) {
- csum_offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM;
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)) {
+ csum_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
} else {
fprintf(stderr,
"IP checksum offload is not supported by port %u\n",
}
} else if (!strcmp(res->proto, "udp")) {
if (hw == 0 || (dev_info.tx_offload_capa &
- DEV_TX_OFFLOAD_UDP_CKSUM)) {
- csum_offloads |= DEV_TX_OFFLOAD_UDP_CKSUM;
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM)) {
+ csum_offloads |= RTE_ETH_TX_OFFLOAD_UDP_CKSUM;
} else {
fprintf(stderr,
"UDP checksum offload is not supported by port %u\n",
}
} else if (!strcmp(res->proto, "tcp")) {
if (hw == 0 || (dev_info.tx_offload_capa &
- DEV_TX_OFFLOAD_TCP_CKSUM)) {
- csum_offloads |= DEV_TX_OFFLOAD_TCP_CKSUM;
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) {
+ csum_offloads |= RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
} else {
fprintf(stderr,
"TCP checksum offload is not supported by port %u\n",
}
} else if (!strcmp(res->proto, "sctp")) {
if (hw == 0 || (dev_info.tx_offload_capa &
- DEV_TX_OFFLOAD_SCTP_CKSUM)) {
- csum_offloads |= DEV_TX_OFFLOAD_SCTP_CKSUM;
+ RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)) {
+ csum_offloads |= RTE_ETH_TX_OFFLOAD_SCTP_CKSUM;
} else {
fprintf(stderr,
"SCTP checksum offload is not supported by port %u\n",
}
} else if (!strcmp(res->proto, "outer-ip")) {
if (hw == 0 || (dev_info.tx_offload_capa &
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
csum_offloads |=
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM;
} else {
fprintf(stderr,
"Outer IP checksum offload is not supported by port %u\n",
}
} else if (!strcmp(res->proto, "outer-udp")) {
if (hw == 0 || (dev_info.tx_offload_capa &
- DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)) {
+ RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)) {
csum_offloads |=
- DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
+ RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
} else {
fprintf(stderr,
"Outer UDP checksum offload is not supported by port %u\n",
return;
if ((ports[res->port_id].tso_segsz != 0) &&
- (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) == 0) {
+ (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_TCP_TSO) == 0) {
fprintf(stderr, "Error: TSO is not supported by port %d\n",
res->port_id);
return;
if (ports[res->port_id].tso_segsz == 0) {
ports[res->port_id].dev_conf.txmode.offloads &=
- ~DEV_TX_OFFLOAD_TCP_TSO;
+ ~RTE_ETH_TX_OFFLOAD_TCP_TSO;
printf("TSO for non-tunneled packets is disabled\n");
} else {
ports[res->port_id].dev_conf.txmode.offloads |=
- DEV_TX_OFFLOAD_TCP_TSO;
+ RTE_ETH_TX_OFFLOAD_TCP_TSO;
printf("TSO segment size for non-tunneled packets is %d\n",
ports[res->port_id].tso_segsz);
}
return;
if ((ports[res->port_id].tso_segsz != 0) &&
- (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) == 0) {
+ (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_TCP_TSO) == 0) {
fprintf(stderr,
"Warning: TSO enabled but not supported by port %d\n",
res->port_id);
if (eth_dev_info_get_print_err(port_id, &dev_info) != 0)
return dev_info;
- if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO))
+ if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO))
fprintf(stderr,
"Warning: VXLAN TUNNEL TSO not supported therefore not enabled for port %d\n",
port_id);
- if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GRE_TNL_TSO))
+ if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO))
fprintf(stderr,
"Warning: GRE TUNNEL TSO not supported therefore not enabled for port %d\n",
port_id);
- if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPIP_TNL_TSO))
+ if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO))
fprintf(stderr,
"Warning: IPIP TUNNEL TSO not supported therefore not enabled for port %d\n",
port_id);
- if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO))
+ if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO))
fprintf(stderr,
"Warning: GENEVE TUNNEL TSO not supported therefore not enabled for port %d\n",
port_id);
- if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IP_TNL_TSO))
+ if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IP_TNL_TSO))
fprintf(stderr,
"Warning: IP TUNNEL TSO not supported therefore not enabled for port %d\n",
port_id);
- if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TNL_TSO))
+ if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO))
fprintf(stderr,
"Warning: UDP TUNNEL TSO not supported therefore not enabled for port %d\n",
port_id);
dev_info = check_tunnel_tso_nic_support(res->port_id);
if (ports[res->port_id].tunnel_tso_segsz == 0) {
ports[res->port_id].dev_conf.txmode.offloads &=
- ~(DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GRE_TNL_TSO |
- DEV_TX_OFFLOAD_IPIP_TNL_TSO |
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
- DEV_TX_OFFLOAD_IP_TNL_TSO |
- DEV_TX_OFFLOAD_UDP_TNL_TSO);
+ ~(RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO);
printf("TSO for tunneled packets is disabled\n");
} else {
- uint64_t tso_offloads = (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GRE_TNL_TSO |
- DEV_TX_OFFLOAD_IPIP_TNL_TSO |
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
- DEV_TX_OFFLOAD_IP_TNL_TSO |
- DEV_TX_OFFLOAD_UDP_TNL_TSO);
+ uint64_t tso_offloads = (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO);
ports[res->port_id].dev_conf.txmode.offloads |=
(tso_offloads & dev_info.tx_offload_capa);
fprintf(stderr,
"Warning: csum parse_tunnel must be set so that tunneled packets are recognized\n");
if (!(ports[res->port_id].dev_conf.txmode.offloads &
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM))
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM))
fprintf(stderr,
"Warning: csum set outer-ip must be set to hw if outer L3 is IPv4; not necessary for IPv6\n");
}
return;
}
- if (fc_conf.mode == RTE_FC_RX_PAUSE || fc_conf.mode == RTE_FC_FULL)
+ if (fc_conf.mode == RTE_ETH_FC_RX_PAUSE || fc_conf.mode == RTE_ETH_FC_FULL)
rx_fc_en = true;
- if (fc_conf.mode == RTE_FC_TX_PAUSE || fc_conf.mode == RTE_FC_FULL)
+ if (fc_conf.mode == RTE_ETH_FC_TX_PAUSE || fc_conf.mode == RTE_ETH_FC_FULL)
tx_fc_en = true;
printf("\n%s Flow control infos for port %-2d %s\n",
/*
* Rx on/off, flow control is enabled/disabled on RX side. This can indicate
- * the RTE_FC_TX_PAUSE, Transmit pause frame at the Rx side.
+ * the RTE_ETH_FC_TX_PAUSE, Transmit pause frame at the Rx side.
* Tx on/off, flow control is enabled/disabled on TX side. This can indicate
- * the RTE_FC_RX_PAUSE, Respond to the pause frame at the Tx side.
+ * the RTE_ETH_FC_RX_PAUSE, Respond to the pause frame at the Tx side.
*/
static enum rte_eth_fc_mode rx_tx_onoff_2_lfc_mode[2][2] = {
- {RTE_FC_NONE, RTE_FC_TX_PAUSE}, {RTE_FC_RX_PAUSE, RTE_FC_FULL}
+ {RTE_ETH_FC_NONE, RTE_ETH_FC_TX_PAUSE}, {RTE_ETH_FC_RX_PAUSE, RTE_ETH_FC_FULL}
};
/* Partial command line, retrieve current configuration */
return;
}
- if ((fc_conf.mode == RTE_FC_RX_PAUSE) ||
- (fc_conf.mode == RTE_FC_FULL))
+ if ((fc_conf.mode == RTE_ETH_FC_RX_PAUSE) ||
+ (fc_conf.mode == RTE_ETH_FC_FULL))
rx_fc_en = 1;
- if ((fc_conf.mode == RTE_FC_TX_PAUSE) ||
- (fc_conf.mode == RTE_FC_FULL))
+ if ((fc_conf.mode == RTE_ETH_FC_TX_PAUSE) ||
+ (fc_conf.mode == RTE_ETH_FC_FULL))
tx_fc_en = 1;
}
/*
* Rx on/off, flow control is enabled/disabled on RX side. This can indicate
- * the RTE_FC_TX_PAUSE, Transmit pause frame at the Rx side.
+ * the RTE_ETH_FC_TX_PAUSE, Transmit pause frame at the Rx side.
* Tx on/off, flow control is enabled/disabled on TX side. This can indicate
- * the RTE_FC_RX_PAUSE, Respond to the pause frame at the Tx side.
+ * the RTE_ETH_FC_RX_PAUSE, Respond to the pause frame at the Tx side.
*/
static enum rte_eth_fc_mode rx_tx_onoff_2_pfc_mode[2][2] = {
- {RTE_FC_NONE, RTE_FC_TX_PAUSE}, {RTE_FC_RX_PAUSE, RTE_FC_FULL}
+ {RTE_ETH_FC_NONE, RTE_ETH_FC_TX_PAUSE}, {RTE_ETH_FC_RX_PAUSE, RTE_ETH_FC_FULL}
};
memset(&pfc_conf, 0, sizeof(struct rte_eth_pfc_conf));
int is_on = (strcmp(res->on, "on") == 0) ? 1 : 0;
if (!strcmp(res->what,"rxmode")) {
if (!strcmp(res->mode, "AUPE"))
- vf_rxmode |= ETH_VMDQ_ACCEPT_UNTAG;
+ vf_rxmode |= RTE_ETH_VMDQ_ACCEPT_UNTAG;
else if (!strcmp(res->mode, "ROPE"))
- vf_rxmode |= ETH_VMDQ_ACCEPT_HASH_UC;
+ vf_rxmode |= RTE_ETH_VMDQ_ACCEPT_HASH_UC;
else if (!strcmp(res->mode, "BAM"))
- vf_rxmode |= ETH_VMDQ_ACCEPT_BROADCAST;
+ vf_rxmode |= RTE_ETH_VMDQ_ACCEPT_BROADCAST;
else if (!strncmp(res->mode, "MPE",3))
- vf_rxmode |= ETH_VMDQ_ACCEPT_MULTICAST;
+ vf_rxmode |= RTE_ETH_VMDQ_ACCEPT_MULTICAST;
}
RTE_SET_USED(is_on);
int ret;
tunnel_udp.udp_port = res->udp_port;
- tunnel_udp.prot_type = RTE_TUNNEL_TYPE_VXLAN;
+ tunnel_udp.prot_type = RTE_ETH_TUNNEL_TYPE_VXLAN;
if (!strcmp(res->what, "add"))
ret = rte_eth_dev_udp_tunnel_port_add(res->port_id,
tunnel_udp.udp_port = res->udp_port;
if (!strcmp(res->tunnel_type, "vxlan")) {
- tunnel_udp.prot_type = RTE_TUNNEL_TYPE_VXLAN;
+ tunnel_udp.prot_type = RTE_ETH_TUNNEL_TYPE_VXLAN;
} else if (!strcmp(res->tunnel_type, "geneve")) {
- tunnel_udp.prot_type = RTE_TUNNEL_TYPE_GENEVE;
+ tunnel_udp.prot_type = RTE_ETH_TUNNEL_TYPE_GENEVE;
} else if (!strcmp(res->tunnel_type, "vxlan-gpe")) {
- tunnel_udp.prot_type = RTE_TUNNEL_TYPE_VXLAN_GPE;
+ tunnel_udp.prot_type = RTE_ETH_TUNNEL_TYPE_VXLAN_GPE;
} else if (!strcmp(res->tunnel_type, "ecpri")) {
- tunnel_udp.prot_type = RTE_TUNNEL_TYPE_ECPRI;
+ tunnel_udp.prot_type = RTE_ETH_TUNNEL_TYPE_ECPRI;
} else {
fprintf(stderr, "Invalid tunnel type\n");
return;
if (ret != 0)
return;
- if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MACSEC_INSERT) {
+ if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MACSEC_INSERT) {
#ifdef RTE_NET_IXGBE
ret = rte_pmd_ixgbe_macsec_enable(port_id, en, rp);
#endif
switch (ret) {
case 0:
ports[port_id].dev_conf.txmode.offloads |=
- DEV_TX_OFFLOAD_MACSEC_INSERT;
+ RTE_ETH_TX_OFFLOAD_MACSEC_INSERT;
cmd_reconfig_device_queue(port_id, 1, 1);
break;
case -ENODEV:
if (ret != 0)
return;
- if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MACSEC_INSERT) {
+ if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MACSEC_INSERT) {
#ifdef RTE_NET_IXGBE
ret = rte_pmd_ixgbe_macsec_disable(port_id);
#endif
switch (ret) {
case 0:
ports[port_id].dev_conf.txmode.offloads &=
- ~DEV_TX_OFFLOAD_MACSEC_INSERT;
+ ~RTE_ETH_TX_OFFLOAD_MACSEC_INSERT;
cmd_reconfig_device_queue(port_id, 1, 1);
break;
case -ENODEV:
};
const struct rss_type_info rss_type_table[] = {
- { "all", ETH_RSS_ETH | ETH_RSS_VLAN | ETH_RSS_IP | ETH_RSS_TCP |
- ETH_RSS_UDP | ETH_RSS_SCTP | ETH_RSS_L2_PAYLOAD |
- ETH_RSS_L2TPV3 | ETH_RSS_ESP | ETH_RSS_AH | ETH_RSS_PFCP |
- ETH_RSS_GTPU | ETH_RSS_ECPRI | ETH_RSS_MPLS},
+ { "all", RTE_ETH_RSS_ETH | RTE_ETH_RSS_VLAN | RTE_ETH_RSS_IP | RTE_ETH_RSS_TCP |
+ RTE_ETH_RSS_UDP | RTE_ETH_RSS_SCTP | RTE_ETH_RSS_L2_PAYLOAD |
+ RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_ESP | RTE_ETH_RSS_AH | RTE_ETH_RSS_PFCP |
+ RTE_ETH_RSS_GTPU | RTE_ETH_RSS_ECPRI | RTE_ETH_RSS_MPLS},
{ "none", 0 },
- { "eth", ETH_RSS_ETH },
- { "l2-src-only", ETH_RSS_L2_SRC_ONLY },
- { "l2-dst-only", ETH_RSS_L2_DST_ONLY },
- { "vlan", ETH_RSS_VLAN },
- { "s-vlan", ETH_RSS_S_VLAN },
- { "c-vlan", ETH_RSS_C_VLAN },
- { "ipv4", ETH_RSS_IPV4 },
- { "ipv4-frag", ETH_RSS_FRAG_IPV4 },
- { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP },
- { "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP },
- { "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP },
- { "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER },
- { "ipv6", ETH_RSS_IPV6 },
- { "ipv6-frag", ETH_RSS_FRAG_IPV6 },
- { "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP },
- { "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP },
- { "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP },
- { "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER },
- { "l2-payload", ETH_RSS_L2_PAYLOAD },
- { "ipv6-ex", ETH_RSS_IPV6_EX },
- { "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX },
- { "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX },
- { "port", ETH_RSS_PORT },
- { "vxlan", ETH_RSS_VXLAN },
- { "geneve", ETH_RSS_GENEVE },
- { "nvgre", ETH_RSS_NVGRE },
- { "ip", ETH_RSS_IP },
- { "udp", ETH_RSS_UDP },
- { "tcp", ETH_RSS_TCP },
- { "sctp", ETH_RSS_SCTP },
- { "tunnel", ETH_RSS_TUNNEL },
+ { "eth", RTE_ETH_RSS_ETH },
+ { "l2-src-only", RTE_ETH_RSS_L2_SRC_ONLY },
+ { "l2-dst-only", RTE_ETH_RSS_L2_DST_ONLY },
+ { "vlan", RTE_ETH_RSS_VLAN },
+ { "s-vlan", RTE_ETH_RSS_S_VLAN },
+ { "c-vlan", RTE_ETH_RSS_C_VLAN },
+ { "ipv4", RTE_ETH_RSS_IPV4 },
+ { "ipv4-frag", RTE_ETH_RSS_FRAG_IPV4 },
+ { "ipv4-tcp", RTE_ETH_RSS_NONFRAG_IPV4_TCP },
+ { "ipv4-udp", RTE_ETH_RSS_NONFRAG_IPV4_UDP },
+ { "ipv4-sctp", RTE_ETH_RSS_NONFRAG_IPV4_SCTP },
+ { "ipv4-other", RTE_ETH_RSS_NONFRAG_IPV4_OTHER },
+ { "ipv6", RTE_ETH_RSS_IPV6 },
+ { "ipv6-frag", RTE_ETH_RSS_FRAG_IPV6 },
+ { "ipv6-tcp", RTE_ETH_RSS_NONFRAG_IPV6_TCP },
+ { "ipv6-udp", RTE_ETH_RSS_NONFRAG_IPV6_UDP },
+ { "ipv6-sctp", RTE_ETH_RSS_NONFRAG_IPV6_SCTP },
+ { "ipv6-other", RTE_ETH_RSS_NONFRAG_IPV6_OTHER },
+ { "l2-payload", RTE_ETH_RSS_L2_PAYLOAD },
+ { "ipv6-ex", RTE_ETH_RSS_IPV6_EX },
+ { "ipv6-tcp-ex", RTE_ETH_RSS_IPV6_TCP_EX },
+ { "ipv6-udp-ex", RTE_ETH_RSS_IPV6_UDP_EX },
+ { "port", RTE_ETH_RSS_PORT },
+ { "vxlan", RTE_ETH_RSS_VXLAN },
+ { "geneve", RTE_ETH_RSS_GENEVE },
+ { "nvgre", RTE_ETH_RSS_NVGRE },
+ { "ip", RTE_ETH_RSS_IP },
+ { "udp", RTE_ETH_RSS_UDP },
+ { "tcp", RTE_ETH_RSS_TCP },
+ { "sctp", RTE_ETH_RSS_SCTP },
+ { "tunnel", RTE_ETH_RSS_TUNNEL },
{ "l3-pre32", RTE_ETH_RSS_L3_PRE32 },
{ "l3-pre40", RTE_ETH_RSS_L3_PRE40 },
{ "l3-pre48", RTE_ETH_RSS_L3_PRE48 },
{ "l3-pre56", RTE_ETH_RSS_L3_PRE56 },
{ "l3-pre64", RTE_ETH_RSS_L3_PRE64 },
{ "l3-pre96", RTE_ETH_RSS_L3_PRE96 },
- { "l3-src-only", ETH_RSS_L3_SRC_ONLY },
- { "l3-dst-only", ETH_RSS_L3_DST_ONLY },
- { "l4-src-only", ETH_RSS_L4_SRC_ONLY },
- { "l4-dst-only", ETH_RSS_L4_DST_ONLY },
- { "esp", ETH_RSS_ESP },
- { "ah", ETH_RSS_AH },
- { "l2tpv3", ETH_RSS_L2TPV3 },
- { "pfcp", ETH_RSS_PFCP },
- { "pppoe", ETH_RSS_PPPOE },
- { "gtpu", ETH_RSS_GTPU },
- { "ecpri", ETH_RSS_ECPRI },
- { "mpls", ETH_RSS_MPLS },
- { "ipv4-chksum", ETH_RSS_IPV4_CHKSUM },
- { "l4-chksum", ETH_RSS_L4_CHKSUM },
+ { "l3-src-only", RTE_ETH_RSS_L3_SRC_ONLY },
+ { "l3-dst-only", RTE_ETH_RSS_L3_DST_ONLY },
+ { "l4-src-only", RTE_ETH_RSS_L4_SRC_ONLY },
+ { "l4-dst-only", RTE_ETH_RSS_L4_DST_ONLY },
+ { "esp", RTE_ETH_RSS_ESP },
+ { "ah", RTE_ETH_RSS_AH },
+ { "l2tpv3", RTE_ETH_RSS_L2TPV3 },
+ { "pfcp", RTE_ETH_RSS_PFCP },
+ { "pppoe", RTE_ETH_RSS_PPPOE },
+ { "gtpu", RTE_ETH_RSS_GTPU },
+ { "ecpri", RTE_ETH_RSS_ECPRI },
+ { "mpls", RTE_ETH_RSS_MPLS },
+ { "ipv4-chksum", RTE_ETH_RSS_IPV4_CHKSUM },
+ { "l4-chksum", RTE_ETH_RSS_L4_CHKSUM },
{ NULL, 0 },
};
device_infos_display_speeds(uint32_t speed_capa)
{
printf("\n\tDevice speed capability:");
- if (speed_capa == ETH_LINK_SPEED_AUTONEG)
+ if (speed_capa == RTE_ETH_LINK_SPEED_AUTONEG)
printf(" Autonegotiate (all speeds)");
- if (speed_capa & ETH_LINK_SPEED_FIXED)
+ if (speed_capa & RTE_ETH_LINK_SPEED_FIXED)
printf(" Disable autonegotiate (fixed speed) ");
- if (speed_capa & ETH_LINK_SPEED_10M_HD)
+ if (speed_capa & RTE_ETH_LINK_SPEED_10M_HD)
printf(" 10 Mbps half-duplex ");
- if (speed_capa & ETH_LINK_SPEED_10M)
+ if (speed_capa & RTE_ETH_LINK_SPEED_10M)
printf(" 10 Mbps full-duplex ");
- if (speed_capa & ETH_LINK_SPEED_100M_HD)
+ if (speed_capa & RTE_ETH_LINK_SPEED_100M_HD)
printf(" 100 Mbps half-duplex ");
- if (speed_capa & ETH_LINK_SPEED_100M)
+ if (speed_capa & RTE_ETH_LINK_SPEED_100M)
printf(" 100 Mbps full-duplex ");
- if (speed_capa & ETH_LINK_SPEED_1G)
+ if (speed_capa & RTE_ETH_LINK_SPEED_1G)
printf(" 1 Gbps ");
- if (speed_capa & ETH_LINK_SPEED_2_5G)
+ if (speed_capa & RTE_ETH_LINK_SPEED_2_5G)
printf(" 2.5 Gbps ");
- if (speed_capa & ETH_LINK_SPEED_5G)
+ if (speed_capa & RTE_ETH_LINK_SPEED_5G)
printf(" 5 Gbps ");
- if (speed_capa & ETH_LINK_SPEED_10G)
+ if (speed_capa & RTE_ETH_LINK_SPEED_10G)
printf(" 10 Gbps ");
- if (speed_capa & ETH_LINK_SPEED_20G)
+ if (speed_capa & RTE_ETH_LINK_SPEED_20G)
printf(" 20 Gbps ");
- if (speed_capa & ETH_LINK_SPEED_25G)
+ if (speed_capa & RTE_ETH_LINK_SPEED_25G)
printf(" 25 Gbps ");
- if (speed_capa & ETH_LINK_SPEED_40G)
+ if (speed_capa & RTE_ETH_LINK_SPEED_40G)
printf(" 40 Gbps ");
- if (speed_capa & ETH_LINK_SPEED_50G)
+ if (speed_capa & RTE_ETH_LINK_SPEED_50G)
printf(" 50 Gbps ");
- if (speed_capa & ETH_LINK_SPEED_56G)
+ if (speed_capa & RTE_ETH_LINK_SPEED_56G)
printf(" 56 Gbps ");
- if (speed_capa & ETH_LINK_SPEED_100G)
+ if (speed_capa & RTE_ETH_LINK_SPEED_100G)
printf(" 100 Gbps ");
- if (speed_capa & ETH_LINK_SPEED_200G)
+ if (speed_capa & RTE_ETH_LINK_SPEED_200G)
printf(" 200 Gbps ");
}
printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down"));
printf("Link speed: %s\n", rte_eth_link_speed_to_str(link.link_speed));
- printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
+ printf("Link duplex: %s\n", (link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex"));
- printf("Autoneg status: %s\n", (link.link_autoneg == ETH_LINK_AUTONEG) ?
+ printf("Autoneg status: %s\n", (link.link_autoneg == RTE_ETH_LINK_AUTONEG) ?
("On") : ("Off"));
if (!rte_eth_dev_get_mtu(port_id, &mtu))
vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
if (vlan_offload >= 0){
printf("VLAN offload: \n");
- if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD)
+ if (vlan_offload & RTE_ETH_VLAN_STRIP_OFFLOAD)
printf(" strip on, ");
else
printf(" strip off, ");
- if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD)
+ if (vlan_offload & RTE_ETH_VLAN_FILTER_OFFLOAD)
printf("filter on, ");
else
printf("filter off, ");
- if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)
+ if (vlan_offload & RTE_ETH_VLAN_EXTEND_OFFLOAD)
printf("extend on, ");
else
printf("extend off, ");
- if (vlan_offload & ETH_QINQ_STRIP_OFFLOAD)
+ if (vlan_offload & RTE_ETH_QINQ_STRIP_OFFLOAD)
printf("qinq strip on\n");
else
printf("qinq strip off\n");
}
for (i = 0; i < nb_entries; i++) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
if (!(reta_conf[idx].mask & (1ULL << shift)))
continue;
printf("RSS RETA configuration: hash index=%u, queue=%u\n",
for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
fwd_lcores[lc_id]->stream_nb = 0;
fwd_lcores[lc_id]->stream_idx = sm_id;
- for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) {
+ for (i = 0; i < RTE_ETH_MAX_VMDQ_POOL; i++) {
/* if the nb_queue is zero, means this tc is
* not enabled on the POOL
*/
vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
if (on) {
- vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD;
- port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
+ vlan_offload |= RTE_ETH_VLAN_EXTEND_OFFLOAD;
+ port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
} else {
- vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD;
- port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
+ vlan_offload &= ~RTE_ETH_VLAN_EXTEND_OFFLOAD;
+ port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
}
diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
if (on) {
- vlan_offload |= ETH_VLAN_STRIP_OFFLOAD;
- port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ vlan_offload |= RTE_ETH_VLAN_STRIP_OFFLOAD;
+ port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
} else {
- vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD;
- port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+ vlan_offload &= ~RTE_ETH_VLAN_STRIP_OFFLOAD;
+ port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
}
diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
if (on) {
- vlan_offload |= ETH_VLAN_FILTER_OFFLOAD;
- port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+ vlan_offload |= RTE_ETH_VLAN_FILTER_OFFLOAD;
+ port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
} else {
- vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD;
- port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
+ vlan_offload &= ~RTE_ETH_VLAN_FILTER_OFFLOAD;
+ port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
}
diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
if (on) {
- vlan_offload |= ETH_QINQ_STRIP_OFFLOAD;
- port_rx_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
+ vlan_offload |= RTE_ETH_QINQ_STRIP_OFFLOAD;
+ port_rx_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
} else {
- vlan_offload &= ~ETH_QINQ_STRIP_OFFLOAD;
- port_rx_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
+ vlan_offload &= ~RTE_ETH_QINQ_STRIP_OFFLOAD;
+ port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
}
diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
return;
if (ports[port_id].dev_conf.txmode.offloads &
- DEV_TX_OFFLOAD_QINQ_INSERT) {
+ RTE_ETH_TX_OFFLOAD_QINQ_INSERT) {
fprintf(stderr, "Error, as QinQ has been enabled.\n");
return;
}
if (ret != 0)
return;
- if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) == 0) {
+ if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) == 0) {
fprintf(stderr,
"Error: vlan insert is not supported by port %d\n",
port_id);
}
tx_vlan_reset(port_id);
- ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_VLAN_INSERT;
+ ports[port_id].dev_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
ports[port_id].tx_vlan_id = vlan_id;
}
if (ret != 0)
return;
- if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) == 0) {
+ if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_QINQ_INSERT) == 0) {
fprintf(stderr,
"Error: qinq insert not supported by port %d\n",
port_id);
}
tx_vlan_reset(port_id);
- ports[port_id].dev_conf.txmode.offloads |= (DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_QINQ_INSERT);
+ ports[port_id].dev_conf.txmode.offloads |= (RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+ RTE_ETH_TX_OFFLOAD_QINQ_INSERT);
ports[port_id].tx_vlan_id = vlan_id;
ports[port_id].tx_vlan_id_outer = vlan_id_outer;
}
tx_vlan_reset(portid_t port_id)
{
ports[port_id].dev_conf.txmode.offloads &=
- ~(DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_QINQ_INSERT);
+ ~(RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+ RTE_ETH_TX_OFFLOAD_QINQ_INSERT);
ports[port_id].tx_vlan_id = 0;
ports[port_id].tx_vlan_id_outer = 0;
}
ret = eth_link_get_nowait_print_err(port_id, &link);
if (ret < 0)
return 1;
- if (link.link_speed != ETH_SPEED_NUM_UNKNOWN &&
+ if (link.link_speed != RTE_ETH_SPEED_NUM_UNKNOWN &&
rate > link.link_speed) {
fprintf(stderr,
"Invalid rate value:%u bigger than link speed: %u\n",
if (info->l4_proto == IPPROTO_TCP && tso_segsz) {
ol_flags |= PKT_TX_IP_CKSUM;
} else {
- if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) {
+ if (tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) {
ol_flags |= PKT_TX_IP_CKSUM;
} else {
ipv4_hdr->hdr_checksum = 0;
udp_hdr = (struct rte_udp_hdr *)((char *)l3_hdr + info->l3_len);
/* do not recalculate udp cksum if it was 0 */
if (udp_hdr->dgram_cksum != 0) {
- if (tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) {
+ if (tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) {
ol_flags |= PKT_TX_UDP_CKSUM;
} else {
udp_hdr->dgram_cksum = 0;
tcp_hdr = (struct rte_tcp_hdr *)((char *)l3_hdr + info->l3_len);
if (tso_segsz)
ol_flags |= PKT_TX_TCP_SEG;
- else if (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) {
+ else if (tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) {
ol_flags |= PKT_TX_TCP_CKSUM;
} else {
tcp_hdr->cksum = 0;
((char *)l3_hdr + info->l3_len);
/* sctp payload must be a multiple of 4 to be
* offloaded */
- if ((tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) &&
+ if ((tx_offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) &&
((ipv4_hdr->total_length & 0x3) == 0)) {
ol_flags |= PKT_TX_SCTP_CKSUM;
} else {
ipv4_hdr->hdr_checksum = 0;
ol_flags |= PKT_TX_OUTER_IPV4;
- if (tx_offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
+ if (tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)
ol_flags |= PKT_TX_OUTER_IP_CKSUM;
else
ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
ol_flags |= PKT_TX_TCP_SEG;
/* Skip SW outer UDP checksum generation if HW supports it */
- if (tx_offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) {
+ if (tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM) {
if (info->outer_ethertype == _htons(RTE_ETHER_TYPE_IPV4))
udp_hdr->dgram_cksum
= rte_ipv4_phdr_cksum(ipv4_hdr, ol_flags);
if (info.is_tunnel == 1) {
if (info.tunnel_tso_segsz ||
(tx_offloads &
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
(tx_offloads &
- DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)) {
+ RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)) {
m->outer_l2_len = info.outer_l2_len;
m->outer_l3_len = info.outer_l3_len;
m->l2_len = info.l2_len;
rte_be_to_cpu_16(info.outer_ethertype),
info.outer_l3_len);
/* dump tx packet info */
- if ((tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_SCTP_CKSUM)) ||
+ if ((tx_offloads & (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)) ||
info.tso_segsz != 0)
printf("tx: m->l2_len=%d m->l3_len=%d "
"m->l4_len=%d\n",
m->l2_len, m->l3_len, m->l4_len);
if (info.is_tunnel == 1) {
if ((tx_offloads &
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
(tx_offloads &
- DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) ||
+ RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM) ||
(tx_ol_flags & PKT_TX_OUTER_IPV6))
printf("tx: m->outer_l2_len=%d "
"m->outer_l3_len=%d\n",
vlan_tci_outer = ports[fs->tx_port].tx_vlan_id_outer;
tx_offloads = ports[fs->tx_port].dev_conf.txmode.offloads;
- if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
+ if (tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
ol_flags |= PKT_TX_VLAN_PKT;
- if (tx_offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
+ if (tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
ol_flags |= PKT_TX_QINQ_PKT;
- if (tx_offloads & DEV_TX_OFFLOAD_MACSEC_INSERT)
+ if (tx_offloads & RTE_ETH_TX_OFFLOAD_MACSEC_INSERT)
ol_flags |= PKT_TX_MACSEC;
for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
fs->rx_packets += nb_rx;
txp = &ports[fs->tx_port];
tx_offloads = txp->dev_conf.txmode.offloads;
- if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
+ if (tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
ol_flags = PKT_TX_VLAN_PKT;
- if (tx_offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
+ if (tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
ol_flags |= PKT_TX_QINQ_PKT;
- if (tx_offloads & DEV_TX_OFFLOAD_MACSEC_INSERT)
+ if (tx_offloads & RTE_ETH_TX_OFFLOAD_MACSEC_INSERT)
ol_flags |= PKT_TX_MACSEC;
for (i = 0; i < nb_rx; i++) {
if (likely(i < nb_rx - 1))
{
uint64_t ol_flags = 0;
- ol_flags |= (tx_offload & DEV_TX_OFFLOAD_VLAN_INSERT) ?
+ ol_flags |= (tx_offload & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) ?
PKT_TX_VLAN : 0;
- ol_flags |= (tx_offload & DEV_TX_OFFLOAD_QINQ_INSERT) ?
+ ol_flags |= (tx_offload & RTE_ETH_TX_OFFLOAD_QINQ_INSERT) ?
PKT_TX_QINQ : 0;
- ol_flags |= (tx_offload & DEV_TX_OFFLOAD_MACSEC_INSERT) ?
+ ol_flags |= (tx_offload & RTE_ETH_TX_OFFLOAD_MACSEC_INSERT) ?
PKT_TX_MACSEC : 0;
return ol_flags;
static int
parse_link_speed(int n)
{
- uint32_t speed = ETH_LINK_SPEED_FIXED;
+ uint32_t speed = RTE_ETH_LINK_SPEED_FIXED;
switch (n) {
case 1000:
- speed |= ETH_LINK_SPEED_1G;
+ speed |= RTE_ETH_LINK_SPEED_1G;
break;
case 10000:
- speed |= ETH_LINK_SPEED_10G;
+ speed |= RTE_ETH_LINK_SPEED_10G;
break;
case 25000:
- speed |= ETH_LINK_SPEED_25G;
+ speed |= RTE_ETH_LINK_SPEED_25G;
break;
case 40000:
- speed |= ETH_LINK_SPEED_40G;
+ speed |= RTE_ETH_LINK_SPEED_40G;
break;
case 50000:
- speed |= ETH_LINK_SPEED_50G;
+ speed |= RTE_ETH_LINK_SPEED_50G;
break;
case 100000:
- speed |= ETH_LINK_SPEED_100G;
+ speed |= RTE_ETH_LINK_SPEED_100G;
break;
case 200000:
- speed |= ETH_LINK_SPEED_200G;
+ speed |= RTE_ETH_LINK_SPEED_200G;
break;
case 100:
case 10:
if (!strcmp(lgopts[opt_idx].name, "pkt-filter-size")) {
if (!strcmp(optarg, "64K"))
fdir_conf.pballoc =
- RTE_FDIR_PBALLOC_64K;
+ RTE_ETH_FDIR_PBALLOC_64K;
else if (!strcmp(optarg, "128K"))
fdir_conf.pballoc =
- RTE_FDIR_PBALLOC_128K;
+ RTE_ETH_FDIR_PBALLOC_128K;
else if (!strcmp(optarg, "256K"))
fdir_conf.pballoc =
- RTE_FDIR_PBALLOC_256K;
+ RTE_ETH_FDIR_PBALLOC_256K;
else
rte_exit(EXIT_FAILURE, "pkt-filter-size %s invalid -"
" must be: 64K or 128K or 256K\n",
}
#endif
if (!strcmp(lgopts[opt_idx].name, "disable-crc-strip"))
- rx_offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
+ rx_offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
if (!strcmp(lgopts[opt_idx].name, "enable-lro"))
- rx_offloads |= DEV_RX_OFFLOAD_TCP_LRO;
+ rx_offloads |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
if (!strcmp(lgopts[opt_idx].name, "enable-scatter"))
- rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
+ rx_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
if (!strcmp(lgopts[opt_idx].name, "enable-rx-cksum"))
- rx_offloads |= DEV_RX_OFFLOAD_CHECKSUM;
+ rx_offloads |= RTE_ETH_RX_OFFLOAD_CHECKSUM;
if (!strcmp(lgopts[opt_idx].name,
"enable-rx-timestamp"))
- rx_offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
+ rx_offloads |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
if (!strcmp(lgopts[opt_idx].name, "enable-hw-vlan"))
- rx_offloads |= DEV_RX_OFFLOAD_VLAN;
+ rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN;
if (!strcmp(lgopts[opt_idx].name,
"enable-hw-vlan-filter"))
- rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+ rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
if (!strcmp(lgopts[opt_idx].name,
"enable-hw-vlan-strip"))
- rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
if (!strcmp(lgopts[opt_idx].name,
"enable-hw-vlan-extend"))
- rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
+ rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
if (!strcmp(lgopts[opt_idx].name,
"enable-hw-qinq-strip"))
- rx_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
+ rx_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
if (!strcmp(lgopts[opt_idx].name, "enable-drop-en"))
rx_drop_en = 1;
if (!strcmp(lgopts[opt_idx].name, "forward-mode"))
set_pkt_forwarding_mode(optarg);
if (!strcmp(lgopts[opt_idx].name, "rss-ip"))
- rss_hf = ETH_RSS_IP;
+ rss_hf = RTE_ETH_RSS_IP;
if (!strcmp(lgopts[opt_idx].name, "rss-udp"))
- rss_hf = ETH_RSS_UDP;
+ rss_hf = RTE_ETH_RSS_UDP;
if (!strcmp(lgopts[opt_idx].name, "rss-level-inner"))
- rss_hf |= ETH_RSS_LEVEL_INNERMOST;
+ rss_hf |= RTE_ETH_RSS_LEVEL_INNERMOST;
if (!strcmp(lgopts[opt_idx].name, "rss-level-outer"))
- rss_hf |= ETH_RSS_LEVEL_OUTERMOST;
+ rss_hf |= RTE_ETH_RSS_LEVEL_OUTERMOST;
if (!strcmp(lgopts[opt_idx].name, "rxq")) {
n = atoi(optarg);
if (n >= 0 && check_nb_rxq((queueid_t)n) == 0)
if (!strcmp(lgopts[opt_idx].name, "rx-mq-mode")) {
char *end = NULL;
n = strtoul(optarg, &end, 16);
- if (n >= 0 && n <= ETH_MQ_RX_VMDQ_DCB_RSS)
+ if (n >= 0 && n <= RTE_ETH_MQ_RX_VMDQ_DCB_RSS)
rx_mq_mode = (enum rte_eth_rx_mq_mode)n;
else
rte_exit(EXIT_FAILURE,
"rx-mq-mode must be >= 0 and <= %d\n",
- ETH_MQ_RX_VMDQ_DCB_RSS);
+ RTE_ETH_MQ_RX_VMDQ_DCB_RSS);
}
if (!strcmp(lgopts[opt_idx].name, "record-core-cycles"))
record_core_cycles = 1;
/*
* Receive Side Scaling (RSS) configuration.
*/
-uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
+uint64_t rss_hf = RTE_ETH_RSS_IP; /* RSS IP by default. */
/*
* Port topology configuration
struct rte_eth_rxmode rx_mode;
struct rte_eth_txmode tx_mode = {
- .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
+ .offloads = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE,
};
-struct rte_fdir_conf fdir_conf = {
+struct rte_eth_fdir_conf fdir_conf = {
.mode = RTE_FDIR_MODE_NONE,
- .pballoc = RTE_FDIR_PBALLOC_64K,
+ .pballoc = RTE_ETH_FDIR_PBALLOC_64K,
.status = RTE_FDIR_REPORT_STATUS,
.mask = {
.vlan_tci_mask = 0xFFEF,
/*
* hexadecimal bitmask of RX mq mode can be enabled.
*/
-enum rte_eth_rx_mq_mode rx_mq_mode = ETH_MQ_RX_VMDQ_DCB_RSS;
+enum rte_eth_rx_mq_mode rx_mq_mode = RTE_ETH_MQ_RX_VMDQ_DCB_RSS;
/*
* Used to set forced link speed
if (ret != 0)
rte_exit(EXIT_FAILURE, "rte_eth_dev_info_get() failed\n");
- if (!(port->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
+ if (!(port->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
port->dev_conf.txmode.offloads &=
- ~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ ~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
/* Apply Rx offloads configuration */
for (i = 0; i < port->dev_info.max_rx_queues; i++)
init_port_config();
- gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
+ gso_types = RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | RTE_ETH_TX_OFFLOAD_UDP_TSO;
/*
* Records which Mbuf pool to use by each logical core, if needed.
*/
continue;
}
/* clear all_ports_up flag if any link down */
- if (link.link_status == ETH_LINK_DOWN) {
+ if (link.link_status == RTE_ETH_LINK_DOWN) {
all_ports_up = 0;
break;
}
if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) {
port->dev_conf.rxmode.mq_mode =
(enum rte_eth_rx_mq_mode)
- (rx_mq_mode & ETH_MQ_RX_RSS);
+ (rx_mq_mode & RTE_ETH_MQ_RX_RSS);
} else {
- port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
+ port->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_NONE;
port->dev_conf.rxmode.offloads &=
- ~DEV_RX_OFFLOAD_RSS_HASH;
+ ~RTE_ETH_RX_OFFLOAD_RSS_HASH;
for (i = 0;
i < port->dev_info.nb_rx_queues;
i++)
port->rx_conf[i].offloads &=
- ~DEV_RX_OFFLOAD_RSS_HASH;
+ ~RTE_ETH_RX_OFFLOAD_RSS_HASH;
}
}
vmdq_rx_conf->enable_default_pool = 0;
vmdq_rx_conf->default_pool = 0;
vmdq_rx_conf->nb_queue_pools =
- (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
+ (num_tcs == RTE_ETH_4_TCS ? RTE_ETH_32_POOLS : RTE_ETH_16_POOLS);
vmdq_tx_conf->nb_queue_pools =
- (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
+ (num_tcs == RTE_ETH_4_TCS ? RTE_ETH_32_POOLS : RTE_ETH_16_POOLS);
vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
vmdq_rx_conf->pool_map[i].pools =
1 << (i % vmdq_rx_conf->nb_queue_pools);
}
- for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
}
/* set DCB mode of RX and TX of multiple queues */
eth_conf->rxmode.mq_mode =
(enum rte_eth_rx_mq_mode)
- (rx_mq_mode & ETH_MQ_RX_VMDQ_DCB);
- eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
+ (rx_mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB);
+ eth_conf->txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB;
} else {
struct rte_eth_dcb_rx_conf *rx_conf =
ð_conf->rx_adv_conf.dcb_rx_conf;
rx_conf->nb_tcs = num_tcs;
tx_conf->nb_tcs = num_tcs;
- for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
rx_conf->dcb_tc[i] = i % num_tcs;
tx_conf->dcb_tc[i] = i % num_tcs;
}
eth_conf->rxmode.mq_mode =
(enum rte_eth_rx_mq_mode)
- (rx_mq_mode & ETH_MQ_RX_DCB_RSS);
+ (rx_mq_mode & RTE_ETH_MQ_RX_DCB_RSS);
eth_conf->rx_adv_conf.rss_conf = rss_conf;
- eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
+ eth_conf->txmode.mq_mode = RTE_ETH_MQ_TX_DCB;
}
if (pfc_en)
eth_conf->dcb_capability_en =
- ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
+ RTE_ETH_DCB_PG_SUPPORT | RTE_ETH_DCB_PFC_SUPPORT;
else
- eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
+ eth_conf->dcb_capability_en = RTE_ETH_DCB_PG_SUPPORT;
return 0;
}
retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
if (retval < 0)
return retval;
- port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+ port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
/* re-configure the device . */
retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
rxtx_port_config(pid);
/* VLAN filter */
- rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+ rte_port->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
for (i = 0; i < RTE_DIM(vlan_tags); i++)
rx_vft_set(pid, vlan_tags[i], 1);
extern uint8_t bitrate_enabled;
#endif
-extern struct rte_fdir_conf fdir_conf;
+extern struct rte_eth_fdir_conf fdir_conf;
extern uint32_t max_rx_pkt_len;
tx_offloads = txp->dev_conf.txmode.offloads;
vlan_tci = txp->tx_vlan_id;
vlan_tci_outer = txp->tx_vlan_id_outer;
- if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
+ if (tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
ol_flags = PKT_TX_VLAN_PKT;
- if (tx_offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
+ if (tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
ol_flags |= PKT_TX_QINQ_PKT;
- if (tx_offloads & DEV_TX_OFFLOAD_MACSEC_INSERT)
+ if (tx_offloads & RTE_ETH_TX_OFFLOAD_MACSEC_INSERT)
ol_flags |= PKT_TX_MACSEC;
/*
{
int ret = 0;
struct rte_eth_link link_status = {
- .link_speed = ETH_SPEED_NUM_2_5G,
- .link_status = ETH_LINK_UP,
- .link_autoneg = ETH_LINK_AUTONEG,
- .link_duplex = ETH_LINK_FULL_DUPLEX
+ .link_speed = RTE_ETH_SPEED_NUM_2_5G,
+ .link_status = RTE_ETH_LINK_UP,
+ .link_autoneg = RTE_ETH_LINK_AUTONEG,
+ .link_duplex = RTE_ETH_LINK_FULL_DUPLEX
};
char text[RTE_ETH_LINK_MAX_STR_LEN];
TEST_ASSERT_BUFFERS_ARE_EQUAL("Link up at 2.5 Gbps FDX Autoneg",
text, strlen(text), "Invalid default link status string");
- link_status.link_duplex = ETH_LINK_HALF_DUPLEX;
- link_status.link_autoneg = ETH_LINK_FIXED;
- link_status.link_speed = ETH_SPEED_NUM_10M,
+ link_status.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+ link_status.link_autoneg = RTE_ETH_LINK_FIXED;
+ link_status.link_speed = RTE_ETH_SPEED_NUM_10M;
ret = rte_eth_link_to_str(text, sizeof(text), &link_status);
printf("Default link up #2: %s\n", text);
RTE_TEST_ASSERT(ret > 0, "Failed to format default string\n");
text, strlen(text), "Invalid default link status "
"string with HDX");
- link_status.link_speed = ETH_SPEED_NUM_UNKNOWN;
+ link_status.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
ret = rte_eth_link_to_str(text, sizeof(text), &link_status);
printf("Default link up #3: %s\n", text);
RTE_TEST_ASSERT(ret > 0, "Failed to format default string\n");
text, strlen(text), "Invalid default link status "
"string with HDX");
- link_status.link_speed = ETH_SPEED_NUM_NONE;
+ link_status.link_speed = RTE_ETH_SPEED_NUM_NONE;
ret = rte_eth_link_to_str(text, sizeof(text), &link_status);
printf("Default link up #3: %s\n", text);
RTE_TEST_ASSERT(ret > 0, "Failed to format default string\n");
"string with HDX");
/* test max str len */
- link_status.link_speed = ETH_SPEED_NUM_200G;
- link_status.link_duplex = ETH_LINK_HALF_DUPLEX;
- link_status.link_autoneg = ETH_LINK_AUTONEG;
+ link_status.link_speed = RTE_ETH_SPEED_NUM_200G;
+ link_status.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+ link_status.link_autoneg = RTE_ETH_LINK_AUTONEG;
ret = rte_eth_link_to_str(text, sizeof(text), &link_status);
printf("Default link up #4:len = %d, %s\n", ret, text);
RTE_TEST_ASSERT(ret < RTE_ETH_LINK_MAX_STR_LEN,
{
int ret = 0;
struct rte_eth_link link_status = {
- .link_speed = ETH_SPEED_NUM_2_5G,
- .link_status = ETH_LINK_DOWN,
- .link_autoneg = ETH_LINK_AUTONEG,
- .link_duplex = ETH_LINK_FULL_DUPLEX
+ .link_speed = RTE_ETH_SPEED_NUM_2_5G,
+ .link_status = RTE_ETH_LINK_DOWN,
+ .link_autoneg = RTE_ETH_LINK_AUTONEG,
+ .link_duplex = RTE_ETH_LINK_FULL_DUPLEX
};
char text[RTE_ETH_LINK_MAX_STR_LEN];
int ret = 0;
struct rte_eth_link link_status = {
.link_speed = 55555,
- .link_status = ETH_LINK_UP,
- .link_autoneg = ETH_LINK_AUTONEG,
- .link_duplex = ETH_LINK_FULL_DUPLEX
+ .link_status = RTE_ETH_LINK_UP,
+ .link_autoneg = RTE_ETH_LINK_AUTONEG,
+ .link_duplex = RTE_ETH_LINK_FULL_DUPLEX
};
char text[RTE_ETH_LINK_MAX_STR_LEN];
const char *value;
uint32_t link_speed;
} speed_str_map[] = {
- { "None", ETH_SPEED_NUM_NONE },
- { "10 Mbps", ETH_SPEED_NUM_10M },
- { "100 Mbps", ETH_SPEED_NUM_100M },
- { "1 Gbps", ETH_SPEED_NUM_1G },
- { "2.5 Gbps", ETH_SPEED_NUM_2_5G },
- { "5 Gbps", ETH_SPEED_NUM_5G },
- { "10 Gbps", ETH_SPEED_NUM_10G },
- { "20 Gbps", ETH_SPEED_NUM_20G },
- { "25 Gbps", ETH_SPEED_NUM_25G },
- { "40 Gbps", ETH_SPEED_NUM_40G },
- { "50 Gbps", ETH_SPEED_NUM_50G },
- { "56 Gbps", ETH_SPEED_NUM_56G },
- { "100 Gbps", ETH_SPEED_NUM_100G },
- { "200 Gbps", ETH_SPEED_NUM_200G },
- { "Unknown", ETH_SPEED_NUM_UNKNOWN },
+ { "None", RTE_ETH_SPEED_NUM_NONE },
+ { "10 Mbps", RTE_ETH_SPEED_NUM_10M },
+ { "100 Mbps", RTE_ETH_SPEED_NUM_100M },
+ { "1 Gbps", RTE_ETH_SPEED_NUM_1G },
+ { "2.5 Gbps", RTE_ETH_SPEED_NUM_2_5G },
+ { "5 Gbps", RTE_ETH_SPEED_NUM_5G },
+ { "10 Gbps", RTE_ETH_SPEED_NUM_10G },
+ { "20 Gbps", RTE_ETH_SPEED_NUM_20G },
+ { "25 Gbps", RTE_ETH_SPEED_NUM_25G },
+ { "40 Gbps", RTE_ETH_SPEED_NUM_40G },
+ { "50 Gbps", RTE_ETH_SPEED_NUM_50G },
+ { "56 Gbps", RTE_ETH_SPEED_NUM_56G },
+ { "100 Gbps", RTE_ETH_SPEED_NUM_100G },
+ { "200 Gbps", RTE_ETH_SPEED_NUM_200G },
+ { "Unknown", RTE_ETH_SPEED_NUM_UNKNOWN },
{ "Invalid", 50505 }
};
{
static const struct rte_eth_conf port_conf_default = {
.rxmode = {
- .mq_mode = ETH_MQ_RX_NONE,
+ .mq_mode = RTE_ETH_MQ_RX_NONE,
},
.intr_conf = {
.rxq = 1,
{
static const struct rte_eth_conf port_conf_default = {
.rxmode = {
- .mq_mode = ETH_MQ_RX_NONE,
+ .mq_mode = RTE_ETH_MQ_RX_NONE,
},
};
static const struct rte_eth_conf port_conf = {
.txmode = {
- .mq_mode = ETH_DCB_NONE,
+ .mq_mode = RTE_ETH_MQ_TX_NONE,
},
};
static struct rte_eth_conf default_pmd_conf = {
.rxmode = {
- .mq_mode = ETH_MQ_RX_NONE,
+ .mq_mode = RTE_ETH_MQ_RX_NONE,
.split_hdr_size = 0,
},
.txmode = {
- .mq_mode = ETH_MQ_TX_NONE,
+ .mq_mode = RTE_ETH_MQ_TX_NONE,
},
.lpbk_mode = 0,
};
static struct rte_eth_conf default_pmd_conf = {
.rxmode = {
- .mq_mode = ETH_MQ_RX_NONE,
+ .mq_mode = RTE_ETH_MQ_RX_NONE,
.split_hdr_size = 0,
},
.txmode = {
- .mq_mode = ETH_MQ_TX_NONE,
+ .mq_mode = RTE_ETH_MQ_TX_NONE,
},
.lpbk_mode = 0,
};
struct rte_eth_rss_conf rss_conf;
uint8_t rss_key[40];
- struct rte_eth_rss_reta_entry64 reta_conf[512 / RTE_RETA_GROUP_SIZE];
+ struct rte_eth_rss_reta_entry64 reta_conf[512 / RTE_ETH_RETA_GROUP_SIZE];
uint8_t is_slave;
struct rte_ring *rxtx_queue[RXTX_QUEUE_COUNT];
struct link_bonding_rssconf_unittest_params {
uint8_t bond_port_id;
struct rte_eth_dev_info bond_dev_info;
- struct rte_eth_rss_reta_entry64 bond_reta_conf[512 / RTE_RETA_GROUP_SIZE];
+ struct rte_eth_rss_reta_entry64 bond_reta_conf[512 / RTE_ETH_RETA_GROUP_SIZE];
struct slave_conf slave_ports[SLAVE_COUNT];
struct rte_mempool *mbuf_pool;
*/
static struct rte_eth_conf default_pmd_conf = {
.rxmode = {
- .mq_mode = ETH_MQ_RX_NONE,
+ .mq_mode = RTE_ETH_MQ_RX_NONE,
.split_hdr_size = 0,
},
.txmode = {
- .mq_mode = ETH_MQ_TX_NONE,
+ .mq_mode = RTE_ETH_MQ_TX_NONE,
},
.lpbk_mode = 0,
};
static struct rte_eth_conf rss_pmd_conf = {
.rxmode = {
- .mq_mode = ETH_MQ_RX_RSS,
+ .mq_mode = RTE_ETH_MQ_RX_RSS,
.split_hdr_size = 0,
},
.txmode = {
- .mq_mode = ETH_MQ_TX_NONE,
+ .mq_mode = RTE_ETH_MQ_TX_NONE,
},
.rx_adv_conf = {
.rss_conf = {
.rss_key = NULL,
- .rss_hf = ETH_RSS_IPV6,
+ .rss_hf = RTE_ETH_RSS_IPV6,
},
},
.lpbk_mode = 0,
static int
reta_set(uint16_t port_id, uint8_t value, int reta_size)
{
- struct rte_eth_rss_reta_entry64 reta_conf[512/RTE_RETA_GROUP_SIZE];
+ struct rte_eth_rss_reta_entry64 reta_conf[512/RTE_ETH_RETA_GROUP_SIZE];
int i, j;
- for (i = 0; i < reta_size / RTE_RETA_GROUP_SIZE; i++) {
+ for (i = 0; i < reta_size / RTE_ETH_RETA_GROUP_SIZE; i++) {
/* select all fields to set */
reta_conf[i].mask = ~0LL;
- for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+ for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
reta_conf[i].reta[j] = value;
}
for (i = 0; i < test_params.bond_dev_info.reta_size;
i++) {
- int index = i / RTE_RETA_GROUP_SIZE;
- int shift = i % RTE_RETA_GROUP_SIZE;
+ int index = i / RTE_ETH_RETA_GROUP_SIZE;
+ int shift = i % RTE_ETH_RETA_GROUP_SIZE;
if (port->reta_conf[index].reta[shift] !=
test_params.bond_reta_conf[index].reta[shift])
bond_reta_fetch(void) {
unsigned j;
- for (j = 0; j < test_params.bond_dev_info.reta_size / RTE_RETA_GROUP_SIZE;
+ for (j = 0; j < test_params.bond_dev_info.reta_size / RTE_ETH_RETA_GROUP_SIZE;
j++)
test_params.bond_reta_conf[j].mask = ~0LL;
slave_reta_fetch(struct slave_conf *port) {
unsigned j;
- for (j = 0; j < port->dev_info.reta_size / RTE_RETA_GROUP_SIZE; j++)
+ for (j = 0; j < port->dev_info.reta_size / RTE_ETH_RETA_GROUP_SIZE; j++)
port->reta_conf[j].mask = ~0LL;
TEST_ASSERT_SUCCESS(rte_eth_dev_rss_reta_query(port->port_id,
static struct rte_eth_conf port_conf = {
.rxmode = {
- .mq_mode = ETH_MQ_RX_NONE,
+ .mq_mode = RTE_ETH_MQ_RX_NONE,
.split_hdr_size = 0,
},
.txmode = {
- .mq_mode = ETH_MQ_TX_NONE,
+ .mq_mode = RTE_ETH_MQ_TX_NONE,
},
.lpbk_mode = 1, /* enable loopback */
};
continue;
}
/* clear all_ports_up flag if any link down */
- if (link.link_status == ETH_LINK_DOWN) {
+ if (link.link_status == RTE_ETH_LINK_DOWN) {
all_ports_up = 0;
break;
}
/* bulk alloc rx, full-featured tx */
tx_conf.tx_rs_thresh = 32;
tx_conf.tx_free_thresh = 32;
- port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_CHECKSUM;
+ port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_CHECKSUM;
return 0;
} else if (!strcmp(mode, "hybrid")) {
/* bulk alloc rx, vector tx
*/
tx_conf.tx_rs_thresh = 32;
tx_conf.tx_free_thresh = 32;
- port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_CHECKSUM;
+ port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_CHECKSUM;
return 0;
} else if (!strcmp(mode, "full")) {
/* full feature rx,tx pair */
tx_conf.tx_rs_thresh = 32;
tx_conf.tx_free_thresh = 32;
- port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_SCATTER;
+ port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
return 0;
}
void *pkt = NULL;
struct virtual_ethdev_private *prv = eth_dev->data->dev_private;
- eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+ eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
eth_dev->data->dev_started = 0;
while (rte_ring_dequeue(prv->rx_queue, &pkt) != -ENOENT)
rte_pktmbuf_free(pkt);
int wait_to_complete __rte_unused)
{
if (!bonded_eth_dev->data->dev_started)
- bonded_eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+ bonded_eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
return 0;
}
eth_dev->data->nb_rx_queues = (uint16_t)1;
eth_dev->data->nb_tx_queues = (uint16_t)1;
- eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
- eth_dev->data->dev_link.link_speed = ETH_SPEED_NUM_10G;
- eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
+ eth_dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_10G;
+ eth_dev->data->dev_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
eth_dev->data->mac_addrs = rte_zmalloc(name, RTE_ETHER_ADDR_LEN, 0);
if (eth_dev->data->mac_addrs == NULL)
- HW managed packets enqueued from ethdev to eventdev exposed through event eth
RX adapter.
- N:1 ethernet device Rx queue to Event queue mapping.
-- Lockfree Tx from event eth Tx adapter using ``DEV_TX_OFFLOAD_MT_LOCKFREE``
+- Lockfree Tx from event eth Tx adapter using ``RTE_ETH_TX_OFFLOAD_MT_LOCKFREE``
capability while maintaining receive packet order.
- Full Rx/Tx offload support defined through ethdev queue configuration.
- HW managed event vectorization on CN10K for packets enqueued from ethdev to
- HW managed packets enqueued from ethdev to eventdev exposed through event eth
RX adapter.
- N:1 ethernet device Rx queue to Event queue mapping.
-- Lockfree Tx from event eth Tx adapter using ``DEV_TX_OFFLOAD_MT_LOCKFREE``
+- Lockfree Tx from event eth Tx adapter using ``RTE_ETH_TX_OFFLOAD_MT_LOCKFREE``
capability while maintaining receive packet order.
- Full Rx/Tx offload support defined through ethdev queue config.
------------------------
The PMD will re-insert the VLAN tag transparently to the packet if the kernel
-strips it, as long as the ``DEV_RX_OFFLOAD_VLAN_STRIP`` is not enabled by the
+strips it, as long as the ``RTE_ETH_RX_OFFLOAD_VLAN_STRIP`` is not enabled by the
application.
 * TX: only the following reduced set of transmit offloads is supported in
vector mode::
- Â DEV_TX_OFFLOAD_MBUF_FAST_FREE
+ Â RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE
 * RX: only the following reduced set of receive offloads is supported in
vector mode (note that jumbo MTU is allowed only when the MTU setting
- does not require `DEV_RX_OFFLOAD_SCATTER` to be enabled)::
-
- Â DEV_RX_OFFLOAD_VLAN_STRIP
- Â DEV_RX_OFFLOAD_KEEP_CRC
- Â DEV_RX_OFFLOAD_IPV4_CKSUM
- Â DEV_RX_OFFLOAD_UDP_CKSUM
- Â DEV_RX_OFFLOAD_TCP_CKSUM
- Â DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM
- Â DEV_RX_OFFLOAD_OUTER_UDP_CKSUM
- Â DEV_RX_OFFLOAD_RSS_HASH
- Â DEV_RX_OFFLOAD_VLAN_FILTER
+ does not require `RTE_ETH_RX_OFFLOAD_SCATTER` to be enabled)::
+
+ Â RTE_ETH_RX_OFFLOAD_VLAN_STRIP
+ Â RTE_ETH_RX_OFFLOAD_KEEP_CRC
+ Â RTE_ETH_RX_OFFLOAD_IPV4_CKSUM
+ Â RTE_ETH_RX_OFFLOAD_UDP_CKSUM
+ Â RTE_ETH_RX_OFFLOAD_TCP_CKSUM
+ Â RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM
+ Â RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM
+ Â RTE_ETH_RX_OFFLOAD_RSS_HASH
+ Â RTE_ETH_RX_OFFLOAD_VLAN_FILTER
The BNXT Vector PMD is enabled in DPDK builds by default. The decision to enable
vector processing is made at run-time when the port is started; if no transmit
.. code-block:: console
vlan_offload = rte_eth_dev_get_vlan_offload(port);
- vlan_offload |= ETH_VLAN_STRIP_OFFLOAD;
+ vlan_offload |= RTE_ETH_VLAN_STRIP_OFFLOAD;
rte_eth_dev_set_vlan_offload(port, vlan_offload);
Another alternative is modify the adapter's ingress VLAN rewrite mode so that
Supports getting the speed capabilities that the current device is capable of.
-* **[provides] rte_eth_dev_info**: ``speed_capa:ETH_LINK_SPEED_*``.
+* **[provides] rte_eth_dev_info**: ``speed_capa:RTE_ETH_LINK_SPEED_*``.
* **[related] API**: ``rte_eth_dev_info_get()``.
Lock-free Tx queue
------------------
-If a PMD advertises DEV_TX_OFFLOAD_MT_LOCKFREE capable, multiple threads can
+If a PMD advertises RTE_ETH_TX_OFFLOAD_MT_LOCKFREE capable, multiple threads can
invoke rte_eth_tx_burst() concurrently on the same Tx queue without SW lock.
-* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_MT_LOCKFREE``.
-* **[provides] rte_eth_dev_info**: ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_MT_LOCKFREE``.
+* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_MT_LOCKFREE``.
+* **[provides] rte_eth_dev_info**: ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_MT_LOCKFREE``.
* **[related] API**: ``rte_eth_tx_burst()``.
Supports optimization for fast release of mbufs following successful Tx.
Requires that per queue, all mbufs come from the same mempool and has refcnt = 1.
-* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_MBUF_FAST_FREE``.
-* **[provides] rte_eth_dev_info**: ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_MBUF_FAST_FREE``.
+* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE``.
+* **[provides] rte_eth_dev_info**: ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE``.
.. _nic_features_free_tx_mbuf_on_demand:
Supports receiving segmented mbufs.
-* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_SCATTER``.
+* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_SCATTER``.
* **[implements] datapath**: ``Scattered Rx function``.
* **[implements] rte_eth_dev_data**: ``scattered_rx``.
* **[provides] eth_dev_ops**: ``rxq_info_get:scattered_rx``.
Supports Large Receive Offload.
-* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_TCP_LRO``.
+* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_TCP_LRO``.
``dev_conf.rxmode.max_lro_pkt_size``.
* **[implements] datapath**: ``LRO functionality``.
* **[implements] rte_eth_dev_data**: ``lro``.
* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_LRO``, ``mbuf.tso_segsz``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_TCP_LRO``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_TCP_LRO``.
* **[provides] rte_eth_dev_info**: ``max_lro_pkt_size``.
Supports TCP Segmentation Offloading.
-* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_TCP_TSO``.
+* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_TCP_TSO``.
* **[uses] rte_eth_desc_lim**: ``nb_seg_max``, ``nb_mtu_seg_max``.
* **[uses] mbuf**: ``mbuf.ol_flags:`` ``PKT_TX_TCP_SEG``, ``PKT_TX_IPV4``, ``PKT_TX_IPV6``, ``PKT_TX_IP_CKSUM``.
* **[uses] mbuf**: ``mbuf.tso_segsz``, ``mbuf.l2_len``, ``mbuf.l3_len``, ``mbuf.l4_len``.
* **[implements] datapath**: ``TSO functionality``.
-* **[provides] rte_eth_dev_info**: ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_TCP_TSO,DEV_TX_OFFLOAD_UDP_TSO``.
+* **[provides] rte_eth_dev_info**: ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_TCP_TSO,RTE_ETH_TX_OFFLOAD_UDP_TSO``.
.. _nic_features_promiscuous_mode:
Supports RSS hashing on RX.
-* **[uses] user config**: ``dev_conf.rxmode.mq_mode`` = ``ETH_MQ_RX_RSS_FLAG``.
+* **[uses] user config**: ``dev_conf.rxmode.mq_mode`` = ``RTE_ETH_MQ_RX_RSS_FLAG``.
* **[uses] user config**: ``dev_conf.rx_adv_conf.rss_conf``.
-* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_RSS_HASH``.
+* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_RSS_HASH``.
* **[provides] rte_eth_dev_info**: ``flow_type_rss_offloads``.
* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_RSS_HASH``, ``mbuf.rss``.
Supports RX RSS hashing on Inner headers.
* **[uses] rte_flow_action_rss**: ``level``.
-* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_RSS_HASH``.
+* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_RSS_HASH``.
* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_RSS_HASH``, ``mbuf.rss``.
Supports Virtual Machine Device Queues (VMDq).
-* **[uses] user config**: ``dev_conf.rxmode.mq_mode`` = ``ETH_MQ_RX_VMDQ_FLAG``.
+* **[uses] user config**: ``dev_conf.rxmode.mq_mode`` = ``RTE_ETH_MQ_RX_VMDQ_FLAG``.
* **[uses] user config**: ``dev_conf.rx_adv_conf.vmdq_dcb_conf``.
* **[uses] user config**: ``dev_conf.rx_adv_conf.vmdq_rx_conf``.
* **[uses] user config**: ``dev_conf.tx_adv_conf.vmdq_dcb_tx_conf``.
Supports Data Center Bridging (DCB).
-* **[uses] user config**: ``dev_conf.rxmode.mq_mode`` = ``ETH_MQ_RX_DCB_FLAG``.
+* **[uses] user config**: ``dev_conf.rxmode.mq_mode`` = ``RTE_ETH_MQ_RX_DCB_FLAG``.
* **[uses] user config**: ``dev_conf.rx_adv_conf.vmdq_dcb_conf``.
* **[uses] user config**: ``dev_conf.rx_adv_conf.dcb_rx_conf``.
* **[uses] user config**: ``dev_conf.tx_adv_conf.vmdq_dcb_tx_conf``.
Supports filtering of a VLAN Tag identifier.
-* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_VLAN_FILTER``.
+* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_VLAN_FILTER``.
* **[implements] eth_dev_ops**: ``vlan_filter_set``.
* **[related] API**: ``rte_eth_dev_vlan_filter()``.
operations of security protocol while packet is received in NIC. NIC is not aware
of protocol operations. See Security library and PMD documentation for more details.
-* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_SECURITY``,
-* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_SECURITY``.
+* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_SECURITY``,
+* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_SECURITY``.
* **[uses] mbuf**: ``mbuf.l2_len``.
* **[implements] rte_security_ops**: ``session_create``, ``session_update``,
``session_stats_get``, ``session_destroy``, ``set_pkt_metadata``, ``capabilities_get``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_SECURITY``,
- ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_SECURITY``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_SECURITY``,
+ ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_SECURITY``.
* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_SEC_OFFLOAD``,
``mbuf.ol_flags:PKT_TX_SEC_OFFLOAD``, ``mbuf.ol_flags:PKT_RX_SEC_OFFLOAD_FAILED``.
* **[provides] rte_security_ops, capabilities_get**: ``action: RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO``
packet is received at NIC. The NIC is capable of understanding the security
protocol operations. See security library and PMD documentation for more details.
-* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_SECURITY``,
-* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_SECURITY``.
+* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_SECURITY``,
+* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_SECURITY``.
* **[uses] mbuf**: ``mbuf.l2_len``.
* **[implements] rte_security_ops**: ``session_create``, ``session_update``,
``session_stats_get``, ``session_destroy``, ``set_pkt_metadata``, ``get_userdata``,
``capabilities_get``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_SECURITY``,
- ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_SECURITY``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_SECURITY``,
+ ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_SECURITY``.
* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_SEC_OFFLOAD``,
``mbuf.ol_flags:PKT_TX_SEC_OFFLOAD``, ``mbuf.ol_flags:PKT_RX_SEC_OFFLOAD_FAILED``.
* **[provides] rte_security_ops, capabilities_get**: ``action: RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL``
Supports CRC stripping by hardware.
A PMD assumed to support CRC stripping by default. PMD should advertise if it supports keeping CRC.
-* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_KEEP_CRC``.
+* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_KEEP_CRC``.
.. _nic_features_vlan_offload:
Supports VLAN offload to hardware.
-* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_VLAN_STRIP,DEV_RX_OFFLOAD_VLAN_FILTER,DEV_RX_OFFLOAD_VLAN_EXTEND``.
-* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_VLAN_INSERT``.
+* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_VLAN_STRIP,RTE_ETH_RX_OFFLOAD_VLAN_FILTER,RTE_ETH_RX_OFFLOAD_VLAN_EXTEND``.
+* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_VLAN_INSERT``.
* **[uses] mbuf**: ``mbuf.ol_flags:PKT_TX_VLAN``, ``mbuf.vlan_tci``.
* **[implements] eth_dev_ops**: ``vlan_offload_set``.
* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_VLAN_STRIPPED``, ``mbuf.ol_flags:PKT_RX_VLAN`` ``mbuf.vlan_tci``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_VLAN_STRIP``,
- ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_VLAN_INSERT``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_VLAN_STRIP``,
+ ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_VLAN_INSERT``.
* **[related] API**: ``rte_eth_dev_set_vlan_offload()``,
``rte_eth_dev_get_vlan_offload()``.
Supports QinQ (queue in queue) offload.
-* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_QINQ_STRIP``.
-* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_QINQ_INSERT``.
+* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_QINQ_STRIP``.
+* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_QINQ_INSERT``.
* **[uses] mbuf**: ``mbuf.ol_flags:PKT_TX_QINQ``, ``mbuf.vlan_tci_outer``.
* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_QINQ_STRIPPED``, ``mbuf.ol_flags:PKT_RX_QINQ``,
``mbuf.ol_flags:PKT_RX_VLAN_STRIPPED``, ``mbuf.ol_flags:PKT_RX_VLAN``
``mbuf.vlan_tci``, ``mbuf.vlan_tci_outer``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_QINQ_STRIP``,
- ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_QINQ_INSERT``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_QINQ_STRIP``,
+ ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_QINQ_INSERT``.
.. _nic_features_fec:
improves signal quality but also brings a delay to signals. This function can be enabled or disabled as required.
* **[implements] eth_dev_ops**: ``fec_get_capability``, ``fec_get``, ``fec_set``.
-* **[provides] rte_eth_fec_capa**: ``speed:ETH_SPEED_NUM_*``, ``capa:RTE_ETH_FEC_MODE_TO_CAPA()``.
+* **[provides] rte_eth_fec_capa**: ``speed:RTE_ETH_SPEED_NUM_*``, ``capa:RTE_ETH_FEC_MODE_TO_CAPA()``.
* **[related] API**: ``rte_eth_fec_get_capability()``, ``rte_eth_fec_get()``, ``rte_eth_fec_set()``.
Supports L3 checksum offload.
-* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_IPV4_CKSUM``.
-* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_IPV4_CKSUM``.
+* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_IPV4_CKSUM``.
+* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_IPV4_CKSUM``.
* **[uses] mbuf**: ``mbuf.ol_flags:PKT_TX_IP_CKSUM``,
``mbuf.ol_flags:PKT_TX_IPV4`` | ``PKT_TX_IPV6``.
* **[uses] mbuf**: ``mbuf.l2_len``, ``mbuf.l3_len``.
* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_IP_CKSUM_UNKNOWN`` |
``PKT_RX_IP_CKSUM_BAD`` | ``PKT_RX_IP_CKSUM_GOOD`` |
``PKT_RX_IP_CKSUM_NONE``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_IPV4_CKSUM``,
- ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_IPV4_CKSUM``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_IPV4_CKSUM``,
+ ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_IPV4_CKSUM``.
.. _nic_features_l4_checksum_offload:
Supports L4 checksum offload.
-* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_UDP_CKSUM,DEV_RX_OFFLOAD_TCP_CKSUM,DEV_RX_OFFLOAD_SCTP_CKSUM``.
-* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_UDP_CKSUM,DEV_TX_OFFLOAD_TCP_CKSUM,DEV_TX_OFFLOAD_SCTP_CKSUM``.
+* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_UDP_CKSUM,RTE_ETH_RX_OFFLOAD_TCP_CKSUM,RTE_ETH_RX_OFFLOAD_SCTP_CKSUM``.
+* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_UDP_CKSUM,RTE_ETH_TX_OFFLOAD_TCP_CKSUM,RTE_ETH_TX_OFFLOAD_SCTP_CKSUM``.
* **[uses] mbuf**: ``mbuf.ol_flags:PKT_TX_IPV4`` | ``PKT_TX_IPV6``,
``mbuf.ol_flags:PKT_TX_L4_NO_CKSUM`` | ``PKT_TX_TCP_CKSUM`` |
``PKT_TX_SCTP_CKSUM`` | ``PKT_TX_UDP_CKSUM``.
* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_L4_CKSUM_UNKNOWN`` |
``PKT_RX_L4_CKSUM_BAD`` | ``PKT_RX_L4_CKSUM_GOOD`` |
``PKT_RX_L4_CKSUM_NONE``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_UDP_CKSUM,DEV_RX_OFFLOAD_TCP_CKSUM,DEV_RX_OFFLOAD_SCTP_CKSUM``,
- ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_UDP_CKSUM,DEV_TX_OFFLOAD_TCP_CKSUM,DEV_TX_OFFLOAD_SCTP_CKSUM``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_UDP_CKSUM,RTE_ETH_RX_OFFLOAD_TCP_CKSUM,RTE_ETH_RX_OFFLOAD_SCTP_CKSUM``,
+ ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_UDP_CKSUM,RTE_ETH_TX_OFFLOAD_TCP_CKSUM,RTE_ETH_TX_OFFLOAD_SCTP_CKSUM``.
.. _nic_features_hw_timestamp:
Supports Timestamp.
-* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_TIMESTAMP``.
+* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_TIMESTAMP``.
* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_TIMESTAMP``.
* **[provides] mbuf**: ``mbuf.timestamp``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa: DEV_RX_OFFLOAD_TIMESTAMP``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa: RTE_ETH_RX_OFFLOAD_TIMESTAMP``.
* **[related] eth_dev_ops**: ``read_clock``.
.. _nic_features_macsec_offload:
Supports MACsec.
-* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_MACSEC_STRIP``.
-* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_MACSEC_INSERT``.
+* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_MACSEC_STRIP``.
+* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_MACSEC_INSERT``.
* **[uses] mbuf**: ``mbuf.ol_flags:PKT_TX_MACSEC``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_MACSEC_STRIP``,
- ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_MACSEC_INSERT``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_MACSEC_STRIP``,
+ ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_MACSEC_INSERT``.
.. _nic_features_inner_l3_checksum:
Supports inner packet L3 checksum.
-* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM``.
-* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM``.
+* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM``.
+* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM``.
* **[uses] mbuf**: ``mbuf.ol_flags:PKT_TX_IP_CKSUM``,
``mbuf.ol_flags:PKT_TX_IPV4`` | ``PKT_TX_IPV6``,
``mbuf.ol_flags:PKT_TX_OUTER_IP_CKSUM``,
``mbuf.ol_flags:PKT_TX_OUTER_IPV4`` | ``PKT_TX_OUTER_IPV6``.
* **[uses] mbuf**: ``mbuf.outer_l2_len``, ``mbuf.outer_l3_len``.
* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_OUTER_IP_CKSUM_BAD``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM``,
- ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM``,
+ ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM``.
.. _nic_features_inner_l4_checksum:
Supports inner packet L4 checksum.
-* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_OUTER_UDP_CKSUM``.
+* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM``.
* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_OUTER_L4_CKSUM_UNKNOWN`` |
``PKT_RX_OUTER_L4_CKSUM_BAD`` | ``PKT_RX_OUTER_L4_CKSUM_GOOD`` | ``PKT_RX_OUTER_L4_CKSUM_INVALID``.
-* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_OUTER_UDP_CKSUM``.
+* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM``.
* **[uses] mbuf**: ``mbuf.ol_flags:PKT_TX_OUTER_IPV4`` | ``PKT_TX_OUTER_IPV6``.
``mbuf.ol_flags:PKT_TX_OUTER_UDP_CKSUM``.
* **[uses] mbuf**: ``mbuf.outer_l2_len``, ``mbuf.outer_l3_len``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_OUTER_UDP_CKSUM``,
- ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_OUTER_UDP_CKSUM``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM``,
+ ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM``.
.. _nic_features_shared_rx_queue:
To guarantee the constraint, the following capabilities in ``dev_conf.rxmode.offloads``
will be checked:
-* ``DEV_RX_OFFLOAD_VLAN_EXTEND``
+* ``RTE_ETH_RX_OFFLOAD_VLAN_EXTEND``
-* ``DEV_RX_OFFLOAD_CHECKSUM``
+* ``RTE_ETH_RX_OFFLOAD_CHECKSUM``
-* ``DEV_RX_OFFLOAD_HEADER_SPLIT``
+* ``RTE_ETH_RX_OFFLOAD_HEADER_SPLIT``
* ``fdir_conf->mode``
* If the max number of VFs (max_vfs) is set in the range of 1 to 32:
If the number of Rx queues is specified as 4 (``--rxq=4`` in testpmd), then there are totally 32
- pools (ETH_32_POOLS), and each VF could have 4 Rx queues;
+ pools (RTE_ETH_32_POOLS), and each VF could have 4 Rx queues;
If the number of Rx queues is specified as 2 (``--rxq=2`` in testpmd), then there are totally 32
- pools (ETH_32_POOLS), and each VF could have 2 Rx queues;
+ pools (RTE_ETH_32_POOLS), and each VF could have 2 Rx queues;
* If the max number of VFs (max_vfs) is in the range of 33 to 64:
If the number of Rx queues in specified as 4 (``--rxq=4`` in testpmd), then error message is expected
as ``rxq`` is not correct at this case;
- If the number of rxq is 2 (``--rxq=2`` in testpmd), then there is totally 64 pools (ETH_64_POOLS),
+ If the number of rxq is 2 (``--rxq=2`` in testpmd), then there is totally 64 pools (RTE_ETH_64_POOLS),
and each VF have 2 Rx queues;
- On host, to enable VF RSS functionality, rx mq mode should be set as ETH_MQ_RX_VMDQ_RSS
- or ETH_MQ_RX_RSS mode, and SRIOV mode should be activated (max_vfs >= 1).
+ On host, to enable VF RSS functionality, rx mq mode should be set as RTE_ETH_MQ_RX_VMDQ_RSS
+ or RTE_ETH_MQ_RX_RSS mode, and SRIOV mode should be activated (max_vfs >= 1).
It also needs config VF RSS information like hash function, RSS key, RSS key length.
.. note::
To guarantee the constraint, capabilities in dev_conf.rxmode.offloads will be checked:
-* DEV_RX_OFFLOAD_VLAN_STRIP
+* RTE_ETH_RX_OFFLOAD_VLAN_STRIP
-* DEV_RX_OFFLOAD_VLAN_EXTEND
+* RTE_ETH_RX_OFFLOAD_VLAN_EXTEND
-* DEV_RX_OFFLOAD_CHECKSUM
+* RTE_ETH_RX_OFFLOAD_CHECKSUM
-* DEV_RX_OFFLOAD_HEADER_SPLIT
+* RTE_ETH_RX_OFFLOAD_HEADER_SPLIT
* dev_conf
~~~~~
When running l3fwd with vPMD, there is one thing to note.
-In the configuration, ensure that DEV_RX_OFFLOAD_CHECKSUM in port_conf.rxmode.offloads is NOT set.
+In the configuration, ensure that RTE_ETH_RX_OFFLOAD_CHECKSUM in port_conf.rxmode.offloads is NOT set.
Otherwise, by default, RX vPMD is disabled.
load_balancer
~~~~~~~~~~~~~
-As in the case of l3fwd, to enable vPMD, do NOT set DEV_RX_OFFLOAD_CHECKSUM in port_conf.rxmode.offloads.
+As in the case of l3fwd, to enable vPMD, do NOT set RTE_ETH_RX_OFFLOAD_CHECKSUM in port_conf.rxmode.offloads.
In addition, for improved performance, use -bsz "(32,32),(64,64),(32,32)" in load_balancer to avoid using the default burst size of 144.
- CRC:
- - ``DEV_RX_OFFLOAD_KEEP_CRC`` cannot be supported with decapsulation
+ - ``RTE_ETH_RX_OFFLOAD_KEEP_CRC`` cannot be supported with decapsulation
for some NICs (such as ConnectX-6 Dx, ConnectX-6 Lx, and BlueField-2).
The capability bit ``scatter_fcs_w_decap_disable`` shows NIC support.
small-packet traffic.
When MPRQ is enabled, MTU can be larger than the size of
- user-provided mbuf even if DEV_RX_OFFLOAD_SCATTER isn't enabled. PMD will
+ user-provided mbuf even if RTE_ETH_RX_OFFLOAD_SCATTER isn't enabled. PMD will
configure large stride size enough to accommodate MTU as long as
device allows. Note that this can waste system memory compared to enabling Rx
scatter and multi-segment packet.
be added in next releases
TAP reports on supported RSS functions as part of dev_infos_get callback:
-``ETH_RSS_IP``, ``ETH_RSS_UDP`` and ``ETH_RSS_TCP``.
+``RTE_ETH_RSS_IP``, ``RTE_ETH_RSS_UDP`` and ``RTE_ETH_RSS_TCP``.
**Known limitation:** TAP supports all of the above hash functions together
and not in partial combinations.
- the bit mask of required GSO types. The GSO library uses the same macros as
those that describe a physical device's TX offloading capabilities (i.e.
- ``DEV_TX_OFFLOAD_*_TSO``) for gso_types. For example, if an application
+ ``RTE_ETH_TX_OFFLOAD_*_TSO``) for gso_types. For example, if an application
wants to segment TCP/IPv4 packets, it should set gso_types to
- ``DEV_TX_OFFLOAD_TCP_TSO``. The only other supported values currently
- supported for gso_types are ``DEV_TX_OFFLOAD_VXLAN_TNL_TSO``, and
- ``DEV_TX_OFFLOAD_GRE_TNL_TSO``; a combination of these macros is also
+ ``RTE_ETH_TX_OFFLOAD_TCP_TSO``. The only other supported values currently
+ supported for gso_types are ``RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO``, and
+ ``RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO``; a combination of these macros is also
allowed.
- a flag, that indicates whether the IPv4 headers of output segments should
mb->ol_flags |= PKT_TX_IPV4 | PKT_TX_IP_CSUM
set out_ip checksum to 0 in the packet
- This is supported on hardware advertising DEV_TX_OFFLOAD_IPV4_CKSUM.
+ This is supported on hardware advertising RTE_ETH_TX_OFFLOAD_IPV4_CKSUM.
- calculate checksum of out_ip and out_udp::
set out_ip checksum to 0 in the packet
set out_udp checksum to pseudo header using rte_ipv4_phdr_cksum()
- This is supported on hardware advertising DEV_TX_OFFLOAD_IPV4_CKSUM
- and DEV_TX_OFFLOAD_UDP_CKSUM.
+ This is supported on hardware advertising RTE_ETH_TX_OFFLOAD_IPV4_CKSUM
+ and RTE_ETH_TX_OFFLOAD_UDP_CKSUM.
- calculate checksum of in_ip::
set in_ip checksum to 0 in the packet
This is similar to case 1), but l2_len is different. It is supported
- on hardware advertising DEV_TX_OFFLOAD_IPV4_CKSUM.
+ on hardware advertising RTE_ETH_TX_OFFLOAD_IPV4_CKSUM.
Note that it can only work if outer L4 checksum is 0.
- calculate checksum of in_ip and in_tcp::
set in_tcp checksum to pseudo header using rte_ipv4_phdr_cksum()
This is similar to case 2), but l2_len is different. It is supported
- on hardware advertising DEV_TX_OFFLOAD_IPV4_CKSUM and
- DEV_TX_OFFLOAD_TCP_CKSUM.
+ on hardware advertising RTE_ETH_TX_OFFLOAD_IPV4_CKSUM and
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM.
Note that it can only work if outer L4 checksum is 0.
- segment inner TCP::
set in_tcp checksum to pseudo header without including the IP
payload length using rte_ipv4_phdr_cksum()
- This is supported on hardware advertising DEV_TX_OFFLOAD_TCP_TSO.
+ This is supported on hardware advertising RTE_ETH_TX_OFFLOAD_TCP_TSO.
Note that it can only work if outer L4 checksum is 0.
- calculate checksum of out_ip, in_ip, in_tcp::
set in_ip checksum to 0 in the packet
set in_tcp checksum to pseudo header using rte_ipv4_phdr_cksum()
- This is supported on hardware advertising DEV_TX_OFFLOAD_IPV4_CKSUM,
- DEV_TX_OFFLOAD_UDP_CKSUM and DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM.
+ This is supported on hardware advertising RTE_ETH_TX_OFFLOAD_IPV4_CKSUM,
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM and RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM.
The list of flags and their precise meaning is described in the mbuf API
documentation (rte_mbuf.h). Also refer to the testpmd source code
Avoiding lock contention is a key issue in a multi-core environment.
To address this issue, PMDs are designed to work with per-core private resources as much as possible.
-For example, a PMD maintains a separate transmit queue per-core, per-port, if the PMD is not ``DEV_TX_OFFLOAD_MT_LOCKFREE`` capable.
+For example, a PMD maintains a separate transmit queue per-core, per-port, if the PMD is not ``RTE_ETH_TX_OFFLOAD_MT_LOCKFREE`` capable.
In the same way, every receive queue of a port is assigned to and polled by a single logical core (lcore).
To comply with Non-Uniform Memory Access (NUMA), memory management is designed to assign to each logical core
Multiple logical cores should never share receive or transmit queues for interfaces since this would require global locks and hinder performance.
-If the PMD is ``DEV_TX_OFFLOAD_MT_LOCKFREE`` capable, multiple threads can invoke ``rte_eth_tx_burst()``
+If the PMD is ``RTE_ETH_TX_OFFLOAD_MT_LOCKFREE`` capable, multiple threads can invoke ``rte_eth_tx_burst()``
concurrently on the same tx queue without SW lock. This PMD feature found in some NICs and useful in the following use cases:
* Remove explicit spinlock in some applications where lcores are not mapped to Tx queues with 1:1 relation.
* In the eventdev use case, avoid dedicating a separate TX core for transmitting and thus
enables more scaling as all workers can send the packets.
-See `Hardware Offload`_ for ``DEV_TX_OFFLOAD_MT_LOCKFREE`` capability probing details.
+See `Hardware Offload`_ for ``RTE_ETH_TX_OFFLOAD_MT_LOCKFREE`` capability probing details.
Device Identification, Ownership and Configuration
--------------------------------------------------
The ``dev_info->[rt]x_offload_capa`` returned from ``rte_eth_dev_info_get()`` includes all pure per-port and per-queue offloading capabilities.
Supported offloads can be either per-port or per-queue.
-Offloads are enabled using the existing ``DEV_TX_OFFLOAD_*`` or ``DEV_RX_OFFLOAD_*`` flags.
+Offloads are enabled using the existing ``RTE_ETH_TX_OFFLOAD_*`` or ``RTE_ETH_RX_OFFLOAD_*`` flags.
Any requested offloading by an application must be within the device capabilities.
Any offloading is disabled by default if it is not set in the parameter
``dev_conf->[rt]xmode.offloads`` to ``rte_eth_dev_configure()`` and
.. table:: RSS
- +---------------+---------------------------------------------+
- | Field | Value |
- +===============+=============================================+
- | ``func`` | RSS hash function to apply |
- +---------------+---------------------------------------------+
- | ``level`` | encapsulation level for ``types`` |
- +---------------+---------------------------------------------+
- | ``types`` | specific RSS hash types (see ``ETH_RSS_*``) |
- +---------------+---------------------------------------------+
- | ``key_len`` | hash key length in bytes |
- +---------------+---------------------------------------------+
- | ``queue_num`` | number of entries in ``queue`` |
- +---------------+---------------------------------------------+
- | ``key`` | hash key |
- +---------------+---------------------------------------------+
- | ``queue`` | queue indices to use |
- +---------------+---------------------------------------------+
+ +---------------+-------------------------------------------------+
+ | Field | Value |
+ +===============+=================================================+
+ | ``func`` | RSS hash function to apply |
+ +---------------+-------------------------------------------------+
+ | ``level`` | encapsulation level for ``types`` |
+ +---------------+-------------------------------------------------+
+ | ``types`` | specific RSS hash types (see ``RTE_ETH_RSS_*``) |
+ +---------------+-------------------------------------------------+
+ | ``key_len`` | hash key length in bytes |
+ +---------------+-------------------------------------------------+
+ | ``queue_num`` | number of entries in ``queue`` |
+ +---------------+-------------------------------------------------+
+ | ``key`` | hash key |
+ +---------------+-------------------------------------------------+
+ | ``queue`` | queue indices to use |
+ +---------------+-------------------------------------------------+
Action: ``PF``
^^^^^^^^^^^^^^
For Inline Crypto and Inline protocol offload, device specific defined metadata is
updated in the mbuf using ``rte_security_set_pkt_metadata()`` if
-``DEV_TX_OFFLOAD_SEC_NEED_MDATA`` is set.
+``RTE_ETH_TX_OFFLOAD_SEC_NEED_MDATA`` is set.
For inline protocol offloaded ingress traffic, the application can register a
pointer, ``userdata`` , in the security session. When the packet is received,
``RTE_ETH_FLOW_MAX`` is one sample of the mentioned case, adding a new flow
type will break the ABI because of ``flex_mask[RTE_ETH_FLOW_MAX]`` array
usage in following public struct hierarchy:
- ``rte_eth_fdir_flex_conf -> rte_fdir_conf -> rte_eth_conf (in the middle)``.
+ ``rte_eth_fdir_flex_conf -> rte_eth_fdir_conf -> rte_eth_conf (in the middle)``.
Need to identify this kind of usages and fix in 20.11, otherwise this blocks
us extending existing enum/define.
One solution can be using a fixed size array instead of ``.*MAX.*`` value.
-* ethdev: Will add ``RTE_ETH_`` prefix to all ethdev macros/enums in v21.11.
- Macros will be added for backward compatibility.
- Backward compatibility macros will be removed on v22.11.
- A few old backward compatibility macros from 2013 that does not have
- proper prefix will be removed on v21.11.
-
* ethdev: The flow director API, including ``rte_eth_conf.fdir_conf`` field,
and the related structures (``rte_fdir_*`` and ``rte_eth_fdir_*``),
will be removed in DPDK 20.11.
-* ethdev: New offload flags ``DEV_RX_OFFLOAD_FLOW_MARK`` will be added in 19.11.
+* ethdev: New offload flags ``RTE_ETH_RX_OFFLOAD_FLOW_MARK`` will be added in 19.11.
This will allow application to enable or disable PMDs from updating
``rte_mbuf::hash::fdir``.
This scheme will allow PMDs to avoid writes to ``rte_mbuf`` fields on Rx and
Also, make sure to start the actual text at the margin.
=======================================================
+* ethdev: All enums & macros updated to have ``RTE_ETH`` prefix and structures
+ updated to have ``rte_eth`` prefix. DPDK components updated to use new names.
+
* ethdev: Input parameters for ``eth_rx_queue_count_t`` was changed.
Instead of pointer to ``rte_eth_dev`` and queue index, now it accepts pointer
to internal queue data as input parameter. While this change is transparent
device will ensure the ordering. Ordering will be lost when tried in PARALLEL.
* ``--rxoffload MASK``: RX HW offload capabilities to enable/use on this port
- (bitmask of DEV_RX_OFFLOAD_* values). It is an optional parameter and
+ (bitmask of RTE_ETH_RX_OFFLOAD_* values). It is an optional parameter and
allows user to disable some of the RX HW offload capabilities.
By default all HW RX offloads are enabled.
* ``--txoffload MASK``: TX HW offload capabilities to enable/use on this port
- (bitmask of DEV_TX_OFFLOAD_* values). It is an optional parameter and
+ (bitmask of RTE_ETH_TX_OFFLOAD_* values). It is an optional parameter and
allows user to disable some of the TX HW offload capabilities.
By default all HW TX offloads are enabled.
Set the hexadecimal bitmask of RX multi queue mode which can be enabled.
The default value is 0x7::
- ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_DCB_FLAG | ETH_MQ_RX_VMDQ_FLAG
+ RTE_ETH_MQ_RX_RSS_FLAG | RTE_ETH_MQ_RX_DCB_FLAG | RTE_ETH_MQ_RX_VMDQ_FLAG
* ``--record-core-cycles``
struct usdpaa_ioctl_link_status_args_old {
/* network device node name */
char if_name[IF_NAME_MAX_LEN];
- /* link status(ETH_LINK_UP/DOWN) */
+ /* link status(RTE_ETH_LINK_UP/DOWN) */
int link_status;
};
struct usdpaa_ioctl_link_status_args {
/* network device node name */
char if_name[IF_NAME_MAX_LEN];
- /* link status(ETH_LINK_UP/DOWN) */
+ /* link status(RTE_ETH_LINK_UP/DOWN) */
int link_status;
- /* link speed (ETH_SPEED_NUM_)*/
+ /* link speed (RTE_ETH_SPEED_NUM_)*/
int link_speed;
- /* link duplex (ETH_LINK_[HALF/FULL]_DUPLEX)*/
+ /* link duplex (RTE_ETH_LINK_[HALF/FULL]_DUPLEX)*/
int link_duplex;
- /* link autoneg (ETH_LINK_AUTONEG/FIXED)*/
+ /* link autoneg (RTE_ETH_LINK_AUTONEG/FIXED)*/
int link_autoneg;
};
struct usdpaa_ioctl_update_link_status_args {
/* network device node name */
char if_name[IF_NAME_MAX_LEN];
- /* link status(ETH_LINK_UP/DOWN) */
+ /* link status(RTE_ETH_LINK_UP/DOWN) */
int link_status;
};
struct usdpaa_ioctl_update_link_speed {
/* network device node name*/
char if_name[IF_NAME_MAX_LEN];
- /* link speed (ETH_SPEED_NUM_)*/
+ /* link speed (RTE_ETH_SPEED_NUM_)*/
int link_speed;
- /* link duplex (ETH_LINK_[HALF/FULL]_DUPLEX)*/
+ /* link duplex (RTE_ETH_LINK_[HALF/FULL]_DUPLEX)*/
int link_duplex;
};
struct roc_npc_action_rss {
enum roc_npc_rss_hash_function func;
uint32_t level;
- uint64_t types; /**< Specific RSS hash types (see ETH_RSS_*). */
+ uint64_t types; /**< Specific RSS hash types (see RTE_ETH_RSS_*). */
uint32_t key_len; /**< Hash key length in bytes. */
uint32_t queue_num; /**< Number of entries in @p queue. */
const uint8_t *key; /**< Hash key. */
};
static struct rte_eth_link pmd_link = {
- .link_speed = ETH_SPEED_NUM_10G,
- .link_duplex = ETH_LINK_FULL_DUPLEX,
- .link_status = ETH_LINK_DOWN,
- .link_autoneg = ETH_LINK_FIXED,
+ .link_speed = RTE_ETH_SPEED_NUM_10G,
+ .link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+ .link_status = RTE_ETH_LINK_DOWN,
+ .link_autoneg = RTE_ETH_LINK_FIXED,
};
RTE_LOG_REGISTER_DEFAULT(af_packet_logtype, NOTICE);
static int
eth_dev_start(struct rte_eth_dev *dev)
{
- dev->data->dev_link.link_status = ETH_LINK_UP;
+ dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
return 0;
}
internals->tx_queue[i].sockfd = -1;
}
- dev->data->dev_link.link_status = ETH_LINK_DOWN;
+ dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
return 0;
}
const struct rte_eth_rxmode *rxmode = &dev_conf->rxmode;
struct pmd_internals *internals = dev->data->dev_private;
- internals->vlan_strip = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+ internals->vlan_strip = !!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
return 0;
}
dev_info->max_rx_queues = (uint16_t)internals->nb_queues;
dev_info->max_tx_queues = (uint16_t)internals->nb_queues;
dev_info->min_rx_bufsize = 0;
- dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS |
- DEV_TX_OFFLOAD_VLAN_INSERT;
- dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
+ dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
+ dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
return 0;
}
};
static const struct rte_eth_link pmd_link = {
- .link_speed = ETH_SPEED_NUM_10G,
- .link_duplex = ETH_LINK_FULL_DUPLEX,
- .link_status = ETH_LINK_DOWN,
- .link_autoneg = ETH_LINK_AUTONEG
+ .link_speed = RTE_ETH_SPEED_NUM_10G,
+ .link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+ .link_status = RTE_ETH_LINK_DOWN,
+ .link_autoneg = RTE_ETH_LINK_AUTONEG
};
/* List which tracks PMDs to facilitate sharing UMEMs across them. */
static int
eth_dev_start(struct rte_eth_dev *dev)
{
- dev->data->dev_link.link_status = ETH_LINK_UP;
+ dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
return 0;
}
static int
eth_dev_stop(struct rte_eth_dev *dev)
{
- dev->data->dev_link.link_status = ETH_LINK_DOWN;
+ dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
return 0;
}
.nb_align = ARK_TX_MIN_QUEUE}; /* power of 2 */
/* ARK PMD supports all line rates, how do we indicate that here ?? */
- dev_info->speed_capa = (ETH_LINK_SPEED_1G |
- ETH_LINK_SPEED_10G |
- ETH_LINK_SPEED_25G |
- ETH_LINK_SPEED_40G |
- ETH_LINK_SPEED_50G |
- ETH_LINK_SPEED_100G);
-
- dev_info->rx_offload_capa = DEV_RX_OFFLOAD_TIMESTAMP;
+ dev_info->speed_capa = (RTE_ETH_LINK_SPEED_1G |
+ RTE_ETH_LINK_SPEED_10G |
+ RTE_ETH_LINK_SPEED_25G |
+ RTE_ETH_LINK_SPEED_40G |
+ RTE_ETH_LINK_SPEED_50G |
+ RTE_ETH_LINK_SPEED_100G);
+
+ dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_TIMESTAMP;
return 0;
}
.remove = eth_atl_pci_remove,
};
-#define ATL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP \
- | DEV_RX_OFFLOAD_IPV4_CKSUM \
- | DEV_RX_OFFLOAD_UDP_CKSUM \
- | DEV_RX_OFFLOAD_TCP_CKSUM \
- | DEV_RX_OFFLOAD_MACSEC_STRIP \
- | DEV_RX_OFFLOAD_VLAN_FILTER)
-
-#define ATL_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT \
- | DEV_TX_OFFLOAD_IPV4_CKSUM \
- | DEV_TX_OFFLOAD_UDP_CKSUM \
- | DEV_TX_OFFLOAD_TCP_CKSUM \
- | DEV_TX_OFFLOAD_TCP_TSO \
- | DEV_TX_OFFLOAD_MACSEC_INSERT \
- | DEV_TX_OFFLOAD_MULTI_SEGS)
+#define ATL_RX_OFFLOADS (RTE_ETH_RX_OFFLOAD_VLAN_STRIP \
+ | RTE_ETH_RX_OFFLOAD_IPV4_CKSUM \
+ | RTE_ETH_RX_OFFLOAD_UDP_CKSUM \
+ | RTE_ETH_RX_OFFLOAD_TCP_CKSUM \
+ | RTE_ETH_RX_OFFLOAD_MACSEC_STRIP \
+ | RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
+
+#define ATL_TX_OFFLOADS (RTE_ETH_TX_OFFLOAD_VLAN_INSERT \
+ | RTE_ETH_TX_OFFLOAD_IPV4_CKSUM \
+ | RTE_ETH_TX_OFFLOAD_UDP_CKSUM \
+ | RTE_ETH_TX_OFFLOAD_TCP_CKSUM \
+ | RTE_ETH_TX_OFFLOAD_TCP_TSO \
+ | RTE_ETH_TX_OFFLOAD_MACSEC_INSERT \
+ | RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
#define SFP_EEPROM_SIZE 0x100
/* set adapter started */
hw->adapter_stopped = 0;
- if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
+ if (dev->data->dev_conf.link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
PMD_INIT_LOG(ERR,
"Invalid link_speeds for port %u, fix speed not supported",
dev->data->port_id);
uint32_t link_speeds = dev->data->dev_conf.link_speeds;
uint32_t speed_mask = 0;
- if (link_speeds == ETH_LINK_SPEED_AUTONEG) {
+ if (link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
speed_mask = hw->aq_nic_cfg->link_speed_msk;
} else {
- if (link_speeds & ETH_LINK_SPEED_10G)
+ if (link_speeds & RTE_ETH_LINK_SPEED_10G)
speed_mask |= AQ_NIC_RATE_10G;
- if (link_speeds & ETH_LINK_SPEED_5G)
+ if (link_speeds & RTE_ETH_LINK_SPEED_5G)
speed_mask |= AQ_NIC_RATE_5G;
- if (link_speeds & ETH_LINK_SPEED_1G)
+ if (link_speeds & RTE_ETH_LINK_SPEED_1G)
speed_mask |= AQ_NIC_RATE_1G;
- if (link_speeds & ETH_LINK_SPEED_2_5G)
+ if (link_speeds & RTE_ETH_LINK_SPEED_2_5G)
speed_mask |= AQ_NIC_RATE_2G5;
- if (link_speeds & ETH_LINK_SPEED_100M)
+ if (link_speeds & RTE_ETH_LINK_SPEED_100M)
speed_mask |= AQ_NIC_RATE_100M;
}
dev_info->reta_size = HW_ATL_B0_RSS_REDIRECTION_MAX;
dev_info->flow_type_rss_offloads = ATL_RSS_OFFLOAD_ALL;
- dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
- dev_info->speed_capa |= ETH_LINK_SPEED_100M;
- dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
- dev_info->speed_capa |= ETH_LINK_SPEED_5G;
+ dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G;
+ dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100M;
+ dev_info->speed_capa |= RTE_ETH_LINK_SPEED_2_5G;
+ dev_info->speed_capa |= RTE_ETH_LINK_SPEED_5G;
return 0;
}
u32 fc = AQ_NIC_FC_OFF;
int err = 0;
- link.link_status = ETH_LINK_DOWN;
+ link.link_status = RTE_ETH_LINK_DOWN;
link.link_speed = 0;
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
- link.link_autoneg = hw->is_autoneg ? ETH_LINK_AUTONEG : ETH_LINK_FIXED;
+ link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+ link.link_autoneg = hw->is_autoneg ? RTE_ETH_LINK_AUTONEG : RTE_ETH_LINK_FIXED;
memset(&old, 0, sizeof(old));
/* load old link status */
return 0;
}
- link.link_status = ETH_LINK_UP;
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_status = RTE_ETH_LINK_UP;
+ link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
link.link_speed = hw->aq_link_status.mbps;
rte_eth_linkstatus_set(dev, &link);
PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
(int)(dev->data->port_id),
(unsigned int)link.link_speed,
- link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+ link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
"full-duplex" : "half-duplex");
} else {
PMD_DRV_LOG(INFO, " Port %d: Link Down",
hw->aq_fw_ops->get_flow_control(hw, &fc);
if (fc == AQ_NIC_FC_OFF)
- fc_conf->mode = RTE_FC_NONE;
+ fc_conf->mode = RTE_ETH_FC_NONE;
else if ((fc & AQ_NIC_FC_RX) && (fc & AQ_NIC_FC_TX))
- fc_conf->mode = RTE_FC_FULL;
+ fc_conf->mode = RTE_ETH_FC_FULL;
else if (fc & AQ_NIC_FC_RX)
- fc_conf->mode = RTE_FC_RX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
else if (fc & AQ_NIC_FC_TX)
- fc_conf->mode = RTE_FC_TX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
return 0;
}
if (hw->aq_fw_ops->set_flow_control == NULL)
return -ENOTSUP;
- if (fc_conf->mode == RTE_FC_NONE)
+ if (fc_conf->mode == RTE_ETH_FC_NONE)
hw->aq_nic_cfg->flow_control = AQ_NIC_FC_OFF;
- else if (fc_conf->mode == RTE_FC_RX_PAUSE)
+ else if (fc_conf->mode == RTE_ETH_FC_RX_PAUSE)
hw->aq_nic_cfg->flow_control = AQ_NIC_FC_RX;
- else if (fc_conf->mode == RTE_FC_TX_PAUSE)
+ else if (fc_conf->mode == RTE_ETH_FC_TX_PAUSE)
hw->aq_nic_cfg->flow_control = AQ_NIC_FC_TX;
- else if (fc_conf->mode == RTE_FC_FULL)
+ else if (fc_conf->mode == RTE_ETH_FC_FULL)
hw->aq_nic_cfg->flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
if (old_flow_control != hw->aq_nic_cfg->flow_control)
PMD_INIT_FUNC_TRACE();
- ret = atl_enable_vlan_filter(dev, mask & ETH_VLAN_FILTER_MASK);
+ ret = atl_enable_vlan_filter(dev, mask & RTE_ETH_VLAN_FILTER_MASK);
- cfg->vlan_strip = !!(mask & ETH_VLAN_STRIP_MASK);
+ cfg->vlan_strip = !!(mask & RTE_ETH_VLAN_STRIP_MASK);
for (i = 0; i < dev->data->nb_rx_queues; i++)
hw_atl_rpo_rx_desc_vlan_stripping_set(hw, cfg->vlan_strip, i);
- if (mask & ETH_VLAN_EXTEND_MASK)
+ if (mask & RTE_ETH_VLAN_EXTEND_MASK)
ret = -ENOTSUP;
return ret;
PMD_INIT_FUNC_TRACE();
switch (vlan_type) {
- case ETH_VLAN_TYPE_INNER:
+ case RTE_ETH_VLAN_TYPE_INNER:
hw_atl_rpf_vlan_inner_etht_set(hw, tpid);
break;
- case ETH_VLAN_TYPE_OUTER:
+ case RTE_ETH_VLAN_TYPE_OUTER:
hw_atl_rpf_vlan_outer_etht_set(hw, tpid);
break;
default:
#include "hw_atl/hw_atl_utils.h"
#define ATL_RSS_OFFLOAD_ALL ( \
- ETH_RSS_IPV4 | \
- ETH_RSS_NONFRAG_IPV4_TCP | \
- ETH_RSS_NONFRAG_IPV4_UDP | \
- ETH_RSS_IPV6 | \
- ETH_RSS_NONFRAG_IPV6_TCP | \
- ETH_RSS_NONFRAG_IPV6_UDP | \
- ETH_RSS_IPV6_EX | \
- ETH_RSS_IPV6_TCP_EX | \
- ETH_RSS_IPV6_UDP_EX)
+ RTE_ETH_RSS_IPV4 | \
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+ RTE_ETH_RSS_IPV6 | \
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+ RTE_ETH_RSS_IPV6_EX | \
+ RTE_ETH_RSS_IPV6_TCP_EX | \
+ RTE_ETH_RSS_IPV6_UDP_EX)
#define ATL_DEV_PRIVATE_TO_HW(adapter) \
(&((struct atl_adapter *)adapter)->hw)
rxq->rx_free_thresh = rx_conf->rx_free_thresh;
rxq->l3_csum_enabled = dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_IPV4_CKSUM;
+ RTE_ETH_RX_OFFLOAD_IPV4_CKSUM;
rxq->l4_csum_enabled = dev->data->dev_conf.rxmode.offloads &
- (DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM);
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ (RTE_ETH_RX_OFFLOAD_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_TCP_CKSUM);
+ if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
PMD_DRV_LOG(ERR, "PMD does not support KEEP_CRC offload");
/* allocate memory for the software ring */
/* Setup required number of queues */
_avp_set_queue_counts(eth_dev);
- mask = (ETH_VLAN_STRIP_MASK |
- ETH_VLAN_FILTER_MASK |
- ETH_VLAN_EXTEND_MASK);
+ mask = (RTE_ETH_VLAN_STRIP_MASK |
+ RTE_ETH_VLAN_FILTER_MASK |
+ RTE_ETH_VLAN_EXTEND_MASK);
ret = avp_vlan_offload_set(eth_dev, mask);
if (ret < 0) {
PMD_DRV_LOG(ERR, "VLAN offload set failed by host, ret=%d\n",
struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
struct rte_eth_link *link = ð_dev->data->dev_link;
- link->link_speed = ETH_SPEED_NUM_10G;
- link->link_duplex = ETH_LINK_FULL_DUPLEX;
+ link->link_speed = RTE_ETH_SPEED_NUM_10G;
+ link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
link->link_status = !!(avp->flags & AVP_F_LINKUP);
return -1;
dev_info->max_rx_pktlen = avp->max_rx_pkt_len;
dev_info->max_mac_addrs = AVP_MAX_MAC_ADDRS;
if (avp->host_features & RTE_AVP_FEATURE_VLAN_OFFLOAD) {
- dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
- dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
+ dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+ dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
}
return 0;
struct rte_eth_conf *dev_conf = ð_dev->data->dev_conf;
uint64_t offloads = dev_conf->rxmode.offloads;
- if (mask & ETH_VLAN_STRIP_MASK) {
+ if (mask & RTE_ETH_VLAN_STRIP_MASK) {
if (avp->host_features & RTE_AVP_FEATURE_VLAN_OFFLOAD) {
- if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
avp->features |= RTE_AVP_FEATURE_VLAN_OFFLOAD;
else
avp->features &= ~RTE_AVP_FEATURE_VLAN_OFFLOAD;
}
}
- if (mask & ETH_VLAN_FILTER_MASK) {
- if (offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+ if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+ if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
PMD_DRV_LOG(ERR, "VLAN filter offload not supported\n");
}
- if (mask & ETH_VLAN_EXTEND_MASK) {
- if (offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+ if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+ if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
PMD_DRV_LOG(ERR, "VLAN extend offload not supported\n");
}
pdata->rss_hf = rss_conf->rss_hf;
rss_hf = rss_conf->rss_hf;
- if (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_IPV6))
+ if (rss_hf & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6))
AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
- if (rss_hf & (ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV6_TCP))
+ if (rss_hf & (RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV6_TCP))
AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
- if (rss_hf & (ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP))
+ if (rss_hf & (RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP))
AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
}
struct axgbe_port *pdata = dev->data->dev_private;
/* Checksum offload to hardware */
pdata->rx_csum_enable = dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_CHECKSUM;
+ RTE_ETH_RX_OFFLOAD_CHECKSUM;
return 0;
}
{
struct axgbe_port *pdata = dev->data->dev_private;
- if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
+ if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS)
pdata->rss_enable = 1;
- else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE)
+ else if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_NONE)
pdata->rss_enable = 0;
else
return -1;
rte_bit_relaxed_clear32(AXGBE_DOWN, &pdata->dev_state);
max_pkt_len = dev_data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
- if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
+ if ((dev_data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ||
max_pkt_len > pdata->rx_buf_size)
dev_data->scattered_rx = 1;
}
for (i = 0; i < reta_size; i++) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
if ((reta_conf[idx].mask & (1ULL << shift)) == 0)
continue;
pdata->rss_table[i] = reta_conf[idx].reta[shift];
}
for (i = 0; i < reta_size; i++) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
if ((reta_conf[idx].mask & (1ULL << shift)) == 0)
continue;
reta_conf[idx].reta[shift] = pdata->rss_table[i];
pdata->rss_hf = rss_conf->rss_hf & AXGBE_RSS_OFFLOAD;
- if (pdata->rss_hf & (ETH_RSS_IPV4 | ETH_RSS_IPV6))
+ if (pdata->rss_hf & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6))
AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
if (pdata->rss_hf &
- (ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV6_TCP))
+ (RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV6_TCP))
AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
if (pdata->rss_hf &
- (ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP))
+ (RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP))
AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
/* Set the RSS options */
link.link_status = pdata->phy_link;
link.link_speed = pdata->phy_speed;
link.link_autoneg = !(dev->data->dev_conf.link_speeds &
- ETH_LINK_SPEED_FIXED);
+ RTE_ETH_LINK_SPEED_FIXED);
ret = rte_eth_linkstatus_set(dev, &link);
if (ret == -1)
PMD_DRV_LOG(ERR, "No change in link status\n");
dev_info->max_rx_pktlen = AXGBE_RX_MAX_BUF_SIZE;
dev_info->max_mac_addrs = pdata->hw_feat.addn_mac + 1;
dev_info->max_hash_mac_addrs = pdata->hw_feat.hash_table_size;
- dev_info->speed_capa = ETH_LINK_SPEED_10G;
+ dev_info->speed_capa = RTE_ETH_LINK_SPEED_10G;
dev_info->rx_offload_capa =
- DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_VLAN_FILTER |
- DEV_RX_OFFLOAD_VLAN_EXTEND |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_KEEP_CRC;
+ RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+ RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+ RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_SCATTER |
+ RTE_ETH_RX_OFFLOAD_KEEP_CRC;
dev_info->tx_offload_capa =
- DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_QINQ_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM;
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+ RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
if (pdata->hw_feat.rss) {
dev_info->flow_type_rss_offloads = AXGBE_RSS_OFFLOAD;
fc.autoneg = pdata->pause_autoneg;
if (pdata->rx_pause && pdata->tx_pause)
- fc.mode = RTE_FC_FULL;
+ fc.mode = RTE_ETH_FC_FULL;
else if (pdata->rx_pause)
- fc.mode = RTE_FC_RX_PAUSE;
+ fc.mode = RTE_ETH_FC_RX_PAUSE;
else if (pdata->tx_pause)
- fc.mode = RTE_FC_TX_PAUSE;
+ fc.mode = RTE_ETH_FC_TX_PAUSE;
else
- fc.mode = RTE_FC_NONE;
+ fc.mode = RTE_ETH_FC_NONE;
fc_conf->high_water = (1024 + (fc.low_water[0] << 9)) / 1024;
fc_conf->low_water = (1024 + (fc.high_water[0] << 9)) / 1024;
AXGMAC_IOWRITE(pdata, reg, reg_val);
fc.mode = fc_conf->mode;
- if (fc.mode == RTE_FC_FULL) {
+ if (fc.mode == RTE_ETH_FC_FULL) {
pdata->tx_pause = 1;
pdata->rx_pause = 1;
- } else if (fc.mode == RTE_FC_RX_PAUSE) {
+ } else if (fc.mode == RTE_ETH_FC_RX_PAUSE) {
pdata->tx_pause = 0;
pdata->rx_pause = 1;
- } else if (fc.mode == RTE_FC_TX_PAUSE) {
+ } else if (fc.mode == RTE_ETH_FC_TX_PAUSE) {
pdata->tx_pause = 1;
pdata->rx_pause = 0;
} else {
fc.mode = pfc_conf->fc.mode;
- if (fc.mode == RTE_FC_FULL) {
+ if (fc.mode == RTE_ETH_FC_FULL) {
pdata->tx_pause = 1;
pdata->rx_pause = 1;
AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 1);
- } else if (fc.mode == RTE_FC_RX_PAUSE) {
+ } else if (fc.mode == RTE_ETH_FC_RX_PAUSE) {
pdata->tx_pause = 0;
pdata->rx_pause = 1;
AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 1);
- } else if (fc.mode == RTE_FC_TX_PAUSE) {
+ } else if (fc.mode == RTE_ETH_FC_TX_PAUSE) {
pdata->tx_pause = 1;
pdata->rx_pause = 0;
AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0);
PMD_DRV_LOG(DEBUG, "EDVLP: qinq = 0x%x\n", qinq);
switch (vlan_type) {
- case ETH_VLAN_TYPE_INNER:
- PMD_DRV_LOG(DEBUG, "ETH_VLAN_TYPE_INNER\n");
+ case RTE_ETH_VLAN_TYPE_INNER:
+ PMD_DRV_LOG(DEBUG, "RTE_ETH_VLAN_TYPE_INNER\n");
if (qinq) {
if (tpid != 0x8100 && tpid != 0x88a8)
PMD_DRV_LOG(ERR,
"Inner type not supported in single tag\n");
}
break;
- case ETH_VLAN_TYPE_OUTER:
- PMD_DRV_LOG(DEBUG, "ETH_VLAN_TYPE_OUTER\n");
+ case RTE_ETH_VLAN_TYPE_OUTER:
+ PMD_DRV_LOG(DEBUG, "RTE_ETH_VLAN_TYPE_OUTER\n");
if (qinq) {
PMD_DRV_LOG(DEBUG, "double tagging is enabled\n");
/*Enable outer VLAN tag*/
"tag supported 0x8100/0x88A8\n");
}
break;
- case ETH_VLAN_TYPE_MAX:
- PMD_DRV_LOG(ERR, "ETH_VLAN_TYPE_MAX\n");
+ case RTE_ETH_VLAN_TYPE_MAX:
+ PMD_DRV_LOG(ERR, "RTE_ETH_VLAN_TYPE_MAX\n");
break;
- case ETH_VLAN_TYPE_UNKNOWN:
- PMD_DRV_LOG(ERR, "ETH_VLAN_TYPE_UNKNOWN\n");
+ case RTE_ETH_VLAN_TYPE_UNKNOWN:
+ PMD_DRV_LOG(ERR, "RTE_ETH_VLAN_TYPE_UNKNOWN\n");
break;
}
return 0;
AXGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 0);
AXGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, VLTI, 1);
- if (mask & ETH_VLAN_STRIP_MASK) {
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+ if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
PMD_DRV_LOG(DEBUG, "Strip ON for device = %s\n",
pdata->eth_dev->device->name);
pdata->hw_if.enable_rx_vlan_stripping(pdata);
pdata->hw_if.disable_rx_vlan_stripping(pdata);
}
}
- if (mask & ETH_VLAN_FILTER_MASK) {
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
+ if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
PMD_DRV_LOG(DEBUG, "Filter ON for device = %s\n",
pdata->eth_dev->device->name);
pdata->hw_if.enable_rx_vlan_filtering(pdata);
pdata->hw_if.disable_rx_vlan_filtering(pdata);
}
}
- if (mask & ETH_VLAN_EXTEND_MASK) {
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
+ if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) {
PMD_DRV_LOG(DEBUG, "enabling vlan extended mode\n");
axgbe_vlan_extend_enable(pdata);
/* Set global registers with default ethertype*/
- axgbe_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
+ axgbe_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_OUTER,
RTE_ETHER_TYPE_VLAN);
- axgbe_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER,
+ axgbe_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_INNER,
RTE_ETHER_TYPE_VLAN);
} else {
PMD_DRV_LOG(DEBUG, "disabling vlan extended mode\n");
/* Receive Side Scaling */
#define AXGBE_RSS_OFFLOAD ( \
- ETH_RSS_IPV4 | \
- ETH_RSS_NONFRAG_IPV4_TCP | \
- ETH_RSS_NONFRAG_IPV4_UDP | \
- ETH_RSS_IPV6 | \
- ETH_RSS_NONFRAG_IPV6_TCP | \
- ETH_RSS_NONFRAG_IPV6_UDP)
+ RTE_ETH_RSS_IPV4 | \
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+ RTE_ETH_RSS_IPV6 | \
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP)
#define AXGBE_RSS_HASH_KEY_SIZE 40
#define AXGBE_RSS_MAX_TABLE_SIZE 256
pdata->an_int = 0;
axgbe_an73_clear_interrupts(pdata);
pdata->eth_dev->data->dev_link.link_status =
- ETH_LINK_DOWN;
+ RTE_ETH_LINK_DOWN;
} else if (pdata->an_state == AXGBE_AN_ERROR) {
PMD_DRV_LOG(ERR, "error during auto-negotiation, state=%u\n",
cur_state);
(DMA_CH_INC * rxq->queue_id));
rxq->dma_tail_reg = (volatile uint32_t *)((uint8_t *)rxq->dma_regs +
DMA_CH_RDTR_LO);
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
rxq->crc_len = RTE_ETHER_CRC_LEN;
else
rxq->crc_len = 0;
mbuf->vlan_tci =
AXGMAC_GET_BITS_LE(desc->write.desc0,
RX_NORMAL_DESC0, OVT);
- if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
mbuf->ol_flags |= PKT_RX_VLAN_STRIPPED;
else
mbuf->ol_flags &= ~PKT_RX_VLAN_STRIPPED;
mbuf->vlan_tci =
AXGMAC_GET_BITS_LE(desc->write.desc0,
RX_NORMAL_DESC0, OVT);
- if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
mbuf->ol_flags |= PKT_RX_VLAN_STRIPPED;
else
mbuf->ol_flags &= ~PKT_RX_VLAN_STRIPPED;
link.link_speed = sc->link_vars.line_speed;
switch (sc->link_vars.duplex) {
case DUPLEX_FULL:
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
break;
case DUPLEX_HALF:
- link.link_duplex = ETH_LINK_HALF_DUPLEX;
+ link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
break;
}
link.link_autoneg = !(dev->data->dev_conf.link_speeds &
- ETH_LINK_SPEED_FIXED);
+ RTE_ETH_LINK_SPEED_FIXED);
link.link_status = sc->link_vars.link_up;
return rte_eth_linkstatus_set(dev, &link);
if (sc->old_bulletin.valid_bitmap & (1 << CHANNEL_DOWN)) {
PMD_DRV_LOG(ERR, sc, "PF indicated channel is down."
"VF device is no longer operational");
- dev->data->dev_link.link_status = ETH_LINK_DOWN;
+ dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
}
return ret;
dev_info->min_rx_bufsize = BNX2X_MIN_RX_BUF_SIZE;
dev_info->max_rx_pktlen = BNX2X_MAX_RX_PKT_LEN;
dev_info->max_mac_addrs = BNX2X_MAX_MAC_ADDRS;
- dev_info->speed_capa = ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G;
+ dev_info->speed_capa = RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_20G;
dev_info->rx_desc_lim.nb_max = MAX_RX_AVAIL;
dev_info->rx_desc_lim.nb_min = MIN_RX_SIZE_NONTPA;
bnx2x_load_firmware(sc);
assert(sc->firmware);
- if (eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+ if (eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
sc->udp_rss = 1;
sc->rx_budget = BNX2X_RX_BUDGET;
#define BNXT_FW_STATUS_SHUTDOWN 0x100000
#define BNXT_ETH_RSS_SUPPORT ( \
- ETH_RSS_IPV4 | \
- ETH_RSS_NONFRAG_IPV4_TCP | \
- ETH_RSS_NONFRAG_IPV4_UDP | \
- ETH_RSS_IPV6 | \
- ETH_RSS_NONFRAG_IPV6_TCP | \
- ETH_RSS_NONFRAG_IPV6_UDP | \
- ETH_RSS_LEVEL_MASK)
-
-#define BNXT_DEV_TX_OFFLOAD_SUPPORT (DEV_TX_OFFLOAD_IPV4_CKSUM | \
- DEV_TX_OFFLOAD_TCP_CKSUM | \
- DEV_TX_OFFLOAD_UDP_CKSUM | \
- DEV_TX_OFFLOAD_TCP_TSO | \
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
- DEV_TX_OFFLOAD_VXLAN_TNL_TSO | \
- DEV_TX_OFFLOAD_GRE_TNL_TSO | \
- DEV_TX_OFFLOAD_IPIP_TNL_TSO | \
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO | \
- DEV_TX_OFFLOAD_QINQ_INSERT | \
- DEV_TX_OFFLOAD_MULTI_SEGS)
-
-#define BNXT_DEV_RX_OFFLOAD_SUPPORT (DEV_RX_OFFLOAD_VLAN_FILTER | \
- DEV_RX_OFFLOAD_IPV4_CKSUM | \
- DEV_RX_OFFLOAD_UDP_CKSUM | \
- DEV_RX_OFFLOAD_TCP_CKSUM | \
- DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
- DEV_RX_OFFLOAD_OUTER_UDP_CKSUM | \
- DEV_RX_OFFLOAD_KEEP_CRC | \
- DEV_RX_OFFLOAD_VLAN_EXTEND | \
- DEV_RX_OFFLOAD_TCP_LRO | \
- DEV_RX_OFFLOAD_SCATTER | \
- DEV_RX_OFFLOAD_RSS_HASH)
+ RTE_ETH_RSS_IPV4 | \
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+ RTE_ETH_RSS_IPV6 | \
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+ RTE_ETH_RSS_LEVEL_MASK)
+
+#define BNXT_DEV_TX_OFFLOAD_SUPPORT (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_TCP_TSO | \
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | \
+ RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | \
+ RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO | \
+ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | \
+ RTE_ETH_TX_OFFLOAD_QINQ_INSERT | \
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
+
+#define BNXT_DEV_RX_OFFLOAD_SUPPORT (RTE_ETH_RX_OFFLOAD_VLAN_FILTER | \
+ RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | \
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM | \
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM | \
+ RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
+ RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM | \
+ RTE_ETH_RX_OFFLOAD_KEEP_CRC | \
+ RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | \
+ RTE_ETH_RX_OFFLOAD_TCP_LRO | \
+ RTE_ETH_RX_OFFLOAD_SCATTER | \
+ RTE_ETH_RX_OFFLOAD_RSS_HASH)
#define BNXT_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input)
goto err_out;
/* Alloc RSS context only if RSS mode is enabled */
- if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) {
+ if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) {
int j, nr_ctxs = bnxt_rss_ctxts(bp);
/* RSS table size in Thor is 512.
* setting is not available at this time, it will not be
* configured correctly in the CFA.
*/
- if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
vnic->vlan_strip = true;
else
vnic->vlan_strip = false;
bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
rc = bnxt_hwrm_vnic_tpa_cfg(bp, vnic,
- (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) ?
+ (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) ?
true : false);
if (rc)
goto err_out;
link_speed = bp->link_info->support_pam4_speeds;
if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB)
- speed_capa |= ETH_LINK_SPEED_100M;
+ speed_capa |= RTE_ETH_LINK_SPEED_100M;
if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MBHD)
- speed_capa |= ETH_LINK_SPEED_100M_HD;
+ speed_capa |= RTE_ETH_LINK_SPEED_100M_HD;
if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB)
- speed_capa |= ETH_LINK_SPEED_1G;
+ speed_capa |= RTE_ETH_LINK_SPEED_1G;
if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB)
- speed_capa |= ETH_LINK_SPEED_2_5G;
+ speed_capa |= RTE_ETH_LINK_SPEED_2_5G;
if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10GB)
- speed_capa |= ETH_LINK_SPEED_10G;
+ speed_capa |= RTE_ETH_LINK_SPEED_10G;
if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB)
- speed_capa |= ETH_LINK_SPEED_20G;
+ speed_capa |= RTE_ETH_LINK_SPEED_20G;
if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_25GB)
- speed_capa |= ETH_LINK_SPEED_25G;
+ speed_capa |= RTE_ETH_LINK_SPEED_25G;
if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_40GB)
- speed_capa |= ETH_LINK_SPEED_40G;
+ speed_capa |= RTE_ETH_LINK_SPEED_40G;
if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB)
- speed_capa |= ETH_LINK_SPEED_50G;
+ speed_capa |= RTE_ETH_LINK_SPEED_50G;
if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB)
- speed_capa |= ETH_LINK_SPEED_100G;
+ speed_capa |= RTE_ETH_LINK_SPEED_100G;
if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_50G)
- speed_capa |= ETH_LINK_SPEED_50G;
+ speed_capa |= RTE_ETH_LINK_SPEED_50G;
if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_100G)
- speed_capa |= ETH_LINK_SPEED_100G;
+ speed_capa |= RTE_ETH_LINK_SPEED_100G;
if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_200G)
- speed_capa |= ETH_LINK_SPEED_200G;
+ speed_capa |= RTE_ETH_LINK_SPEED_200G;
if (bp->link_info->auto_mode ==
HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE)
- speed_capa |= ETH_LINK_SPEED_FIXED;
+ speed_capa |= RTE_ETH_LINK_SPEED_FIXED;
return speed_capa;
}
dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT;
if (bp->flags & BNXT_FLAG_PTP_SUPPORTED)
- dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP;
+ dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
if (bp->vnic_cap_flags & BNXT_VNIC_CAP_VLAN_RX_STRIP)
- dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_VLAN_STRIP;
- dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+ dev_info->tx_queue_offload_capa = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT |
dev_info->tx_queue_offload_capa;
if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
- dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_VLAN_INSERT;
+ dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT;
dev_info->speed_capa = bnxt_get_speed_capabilities(bp);
*/
/* VMDq resources */
- vpool = 64; /* ETH_64_POOLS */
- vrxq = 128; /* ETH_VMDQ_DCB_NUM_QUEUES */
+ vpool = 64; /* RTE_ETH_64_POOLS */
+ vrxq = 128; /* RTE_ETH_VMDQ_DCB_NUM_QUEUES */
for (i = 0; i < 4; vpool >>= 1, i++) {
if (max_vnics > vpool) {
for (j = 0; j < 5; vrxq >>= 1, j++) {
(uint32_t)(eth_dev->data->nb_rx_queues) > bp->max_ring_grps)
goto resource_error;
- if (!(eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) &&
+ if (!(eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) &&
bp->max_vnics < eth_dev->data->nb_rx_queues)
goto resource_error;
bp->rx_cp_nr_rings = bp->rx_nr_rings;
bp->tx_cp_nr_rings = bp->tx_nr_rings;
- if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
- rx_offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+ if (eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+ rx_offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
eth_dev->data->dev_conf.rxmode.offloads = rx_offloads;
bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu);
PMD_DRV_LOG(INFO, "Port %d Link Up - speed %u Mbps - %s\n",
eth_dev->data->port_id,
(uint32_t)link->link_speed,
- (link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
+ (link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
else
PMD_DRV_LOG(INFO, "Port %d Link Down\n",
uint16_t buf_size;
int i;
- if (eth_dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER)
+ if (eth_dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
return 1;
- if (eth_dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO)
+ if (eth_dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)
return 1;
for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
* a limited subset have been enabled.
*/
if (eth_dev->data->dev_conf.rxmode.offloads &
- ~(DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_KEEP_CRC |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
- DEV_RX_OFFLOAD_RSS_HASH |
- DEV_RX_OFFLOAD_VLAN_FILTER))
+ ~(RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+ RTE_ETH_RX_OFFLOAD_KEEP_CRC |
+ RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_RSS_HASH |
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER))
goto use_scalar_rx;
#if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT)
* or tx offloads.
*/
if (eth_dev->data->scattered_rx ||
- (offloads & ~DEV_TX_OFFLOAD_MBUF_FAST_FREE) ||
+ (offloads & ~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) ||
BNXT_TRUFLOW_EN(bp))
goto use_scalar_tx;
bnxt_link_update_op(eth_dev, 1);
- if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
- vlan_mask |= ETH_VLAN_FILTER_MASK;
- if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
- vlan_mask |= ETH_VLAN_STRIP_MASK;
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
+ vlan_mask |= RTE_ETH_VLAN_FILTER_MASK;
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+ vlan_mask |= RTE_ETH_VLAN_STRIP_MASK;
rc = bnxt_vlan_offload_set_op(eth_dev, vlan_mask);
if (rc)
goto error;
/* Retrieve link info from hardware */
rc = bnxt_get_hwrm_link_config(bp, &new);
if (rc) {
- new.link_speed = ETH_LINK_SPEED_100M;
- new.link_duplex = ETH_LINK_FULL_DUPLEX;
+ new.link_speed = RTE_ETH_LINK_SPEED_100M;
+ new.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
PMD_DRV_LOG(ERR,
"Failed to retrieve link rc = 0x%x!\n", rc);
goto out;
if (!vnic->rss_table)
return -EINVAL;
- if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
+ if (!(dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG))
return -EINVAL;
if (reta_size != tbl_size) {
for (i = 0; i < reta_size; i++) {
struct bnxt_rx_queue *rxq;
- idx = i / RTE_RETA_GROUP_SIZE;
- sft = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ sft = i % RTE_ETH_RETA_GROUP_SIZE;
if (!(reta_conf[idx].mask & (1ULL << sft)))
continue;
}
for (idx = 0, i = 0; i < reta_size; i++) {
- idx = i / RTE_RETA_GROUP_SIZE;
- sft = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ sft = i % RTE_ETH_RETA_GROUP_SIZE;
if (reta_conf[idx].mask & (1ULL << sft)) {
uint16_t qid;
* If RSS enablement were different than dev_configure,
* then return -EINVAL
*/
- if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+ if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
if (!rss_conf->rss_hf)
PMD_DRV_LOG(ERR, "Hash type NONE\n");
} else {
vnic->hash_type = bnxt_rte_to_hwrm_hash_types(rss_conf->rss_hf);
vnic->hash_mode =
bnxt_rte_to_hwrm_hash_level(bp, rss_conf->rss_hf,
- ETH_RSS_LEVEL(rss_conf->rss_hf));
+ RTE_ETH_RSS_LEVEL(rss_conf->rss_hf));
/*
* If hashkey is not specified, use the previously configured
hash_types = vnic->hash_type;
rss_conf->rss_hf = 0;
if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4) {
- rss_conf->rss_hf |= ETH_RSS_IPV4;
+ rss_conf->rss_hf |= RTE_ETH_RSS_IPV4;
hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
}
if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4) {
- rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+ rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
hash_types &=
~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
}
if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4) {
- rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+ rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
hash_types &=
~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
}
if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6) {
- rss_conf->rss_hf |= ETH_RSS_IPV6;
+ rss_conf->rss_hf |= RTE_ETH_RSS_IPV6;
hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
}
if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6) {
- rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+ rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
hash_types &=
~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
}
if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6) {
- rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+ rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
hash_types &=
~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
}
fc_conf->autoneg = 1;
switch (bp->link_info->pause) {
case 0:
- fc_conf->mode = RTE_FC_NONE;
+ fc_conf->mode = RTE_ETH_FC_NONE;
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX:
- fc_conf->mode = RTE_FC_TX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX:
- fc_conf->mode = RTE_FC_RX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
break;
case (HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX |
HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX):
- fc_conf->mode = RTE_FC_FULL;
+ fc_conf->mode = RTE_ETH_FC_FULL;
break;
}
return 0;
}
switch (fc_conf->mode) {
- case RTE_FC_NONE:
+ case RTE_ETH_FC_NONE:
bp->link_info->auto_pause = 0;
bp->link_info->force_pause = 0;
break;
- case RTE_FC_RX_PAUSE:
+ case RTE_ETH_FC_RX_PAUSE:
if (fc_conf->autoneg) {
bp->link_info->auto_pause =
HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
}
break;
- case RTE_FC_TX_PAUSE:
+ case RTE_ETH_FC_TX_PAUSE:
if (fc_conf->autoneg) {
bp->link_info->auto_pause =
HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX;
HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX;
}
break;
- case RTE_FC_FULL:
+ case RTE_ETH_FC_FULL:
if (fc_conf->autoneg) {
bp->link_info->auto_pause =
HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX |
return rc;
switch (udp_tunnel->prot_type) {
- case RTE_TUNNEL_TYPE_VXLAN:
+ case RTE_ETH_TUNNEL_TYPE_VXLAN:
if (bp->vxlan_port_cnt) {
PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
udp_tunnel->udp_port);
tunnel_type =
HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN;
break;
- case RTE_TUNNEL_TYPE_GENEVE:
+ case RTE_ETH_TUNNEL_TYPE_GENEVE:
if (bp->geneve_port_cnt) {
PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
udp_tunnel->udp_port);
return rc;
switch (udp_tunnel->prot_type) {
- case RTE_TUNNEL_TYPE_VXLAN:
+ case RTE_ETH_TUNNEL_TYPE_VXLAN:
if (!bp->vxlan_port_cnt) {
PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
return -EINVAL;
HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN;
port = bp->vxlan_fw_dst_port_id;
break;
- case RTE_TUNNEL_TYPE_GENEVE:
+ case RTE_ETH_TUNNEL_TYPE_GENEVE:
if (!bp->geneve_port_cnt) {
PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
return -EINVAL;
int rc;
vnic = BNXT_GET_DEFAULT_VNIC(bp);
- if (!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)) {
+ if (!(rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) {
/* Remove any VLAN filters programmed */
for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++)
bnxt_del_vlan_filter(bp, i);
bnxt_add_vlan_filter(bp, 0);
}
PMD_DRV_LOG(DEBUG, "VLAN Filtering: %d\n",
- !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER));
+ !!(rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER));
return 0;
}
/* Destroy vnic filters and vnic */
if (bp->eth_dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_VLAN_FILTER) {
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++)
bnxt_del_vlan_filter(bp, i);
}
return rc;
if (bp->eth_dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_VLAN_FILTER) {
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
rc = bnxt_add_vlan_filter(bp, 0);
if (rc)
return rc;
return rc;
PMD_DRV_LOG(DEBUG, "VLAN Strip Offload: %d\n",
- !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP));
+ !!(rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP));
return rc;
}
if (!dev->data->dev_started)
return 0;
- if (mask & ETH_VLAN_FILTER_MASK) {
+ if (mask & RTE_ETH_VLAN_FILTER_MASK) {
/* Enable or disable VLAN filtering */
rc = bnxt_config_vlan_hw_filter(bp, rx_offloads);
if (rc)
return rc;
}
- if (mask & ETH_VLAN_STRIP_MASK) {
+ if (mask & RTE_ETH_VLAN_STRIP_MASK) {
/* Enable or disable VLAN stripping */
rc = bnxt_config_vlan_hw_stripping(bp, rx_offloads);
if (rc)
return rc;
}
- if (mask & ETH_VLAN_EXTEND_MASK) {
- if (rx_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+ if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
PMD_DRV_LOG(DEBUG, "Extend VLAN supported\n");
else
PMD_DRV_LOG(INFO, "Extend VLAN unsupported\n");
{
struct bnxt *bp = dev->data->dev_private;
int qinq = dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_VLAN_EXTEND;
+ RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
- if (vlan_type != ETH_VLAN_TYPE_INNER &&
- vlan_type != ETH_VLAN_TYPE_OUTER) {
+ if (vlan_type != RTE_ETH_VLAN_TYPE_INNER &&
+ vlan_type != RTE_ETH_VLAN_TYPE_OUTER) {
PMD_DRV_LOG(ERR,
"Unsupported vlan type.");
return -EINVAL;
return -EINVAL;
}
- if (vlan_type == ETH_VLAN_TYPE_OUTER) {
+ if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER) {
switch (tpid) {
case RTE_ETHER_TYPE_QINQ:
bp->outer_tpid_bd =
}
bp->outer_tpid_bd |= tpid;
PMD_DRV_LOG(INFO, "outer_tpid_bd = %x\n", bp->outer_tpid_bd);
- } else if (vlan_type == ETH_VLAN_TYPE_INNER) {
+ } else if (vlan_type == RTE_ETH_VLAN_TYPE_INNER) {
PMD_DRV_LOG(ERR,
"Can accelerate only outer vlan in QinQ\n");
return -EINVAL;
bnxt_del_dflt_mac_filter(bp, vnic);
memcpy(bp->mac_addr, addr, RTE_ETHER_ADDR_LEN);
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
+ if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
/* This filter will allow only untagged packets */
rc = bnxt_add_vlan_filter(bp, 0);
} else {
RTE_LOG_REGISTER_SUFFIX(bnxt_logtype_driver, driver, NOTICE);
RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map);
-RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio-pci");
+
}
}
- if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
vnic->vlan_strip = true;
else
vnic->vlan_strip = false;
}
/* If RSS types is 0, use a best effort configuration */
- types = rss->types ? rss->types : ETH_RSS_IPV4;
+ types = rss->types ? rss->types : RTE_ETH_RSS_IPV4;
hash_type = bnxt_rte_to_hwrm_hash_types(types);
rxq = bp->rx_queues[act_q->index];
- if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) && rxq &&
+ if (!(dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) && rxq &&
vnic->fw_vnic_id != INVALID_HW_RING_ID)
goto use_vnic;
uint16_t j = dst_id - 1;
//TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
- if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
+ if ((dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG) &&
conf->pool_map[j].pools & (1UL << j)) {
PMD_DRV_LOG(DEBUG,
"Add vlan %u to vmdq pool %u\n",
{
uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
- if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
+ if ((conf_link_speed & RTE_ETH_LINK_SPEED_FIXED) == RTE_ETH_LINK_SPEED_AUTONEG)
return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
switch (conf_link_speed) {
- case ETH_LINK_SPEED_10M_HD:
- case ETH_LINK_SPEED_100M_HD:
+ case RTE_ETH_LINK_SPEED_10M_HD:
+ case RTE_ETH_LINK_SPEED_100M_HD:
/* FALLTHROUGH */
return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
}
{
uint16_t eth_link_speed = 0;
- if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
- return ETH_LINK_SPEED_AUTONEG;
+ if (conf_link_speed == RTE_ETH_LINK_SPEED_AUTONEG)
+ return RTE_ETH_LINK_SPEED_AUTONEG;
- switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
- case ETH_LINK_SPEED_100M:
- case ETH_LINK_SPEED_100M_HD:
+ switch (conf_link_speed & ~RTE_ETH_LINK_SPEED_FIXED) {
+ case RTE_ETH_LINK_SPEED_100M:
+ case RTE_ETH_LINK_SPEED_100M_HD:
/* FALLTHROUGH */
eth_link_speed =
HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
break;
- case ETH_LINK_SPEED_1G:
+ case RTE_ETH_LINK_SPEED_1G:
eth_link_speed =
HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
break;
- case ETH_LINK_SPEED_2_5G:
+ case RTE_ETH_LINK_SPEED_2_5G:
eth_link_speed =
HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
break;
- case ETH_LINK_SPEED_10G:
+ case RTE_ETH_LINK_SPEED_10G:
eth_link_speed =
HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
break;
- case ETH_LINK_SPEED_20G:
+ case RTE_ETH_LINK_SPEED_20G:
eth_link_speed =
HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
break;
- case ETH_LINK_SPEED_25G:
+ case RTE_ETH_LINK_SPEED_25G:
eth_link_speed =
HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
break;
- case ETH_LINK_SPEED_40G:
+ case RTE_ETH_LINK_SPEED_40G:
eth_link_speed =
HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
break;
- case ETH_LINK_SPEED_50G:
+ case RTE_ETH_LINK_SPEED_50G:
eth_link_speed = pam4_link ?
HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_50GB :
HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
break;
- case ETH_LINK_SPEED_100G:
+ case RTE_ETH_LINK_SPEED_100G:
eth_link_speed = pam4_link ?
HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_100GB :
HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
break;
- case ETH_LINK_SPEED_200G:
+ case RTE_ETH_LINK_SPEED_200G:
eth_link_speed =
HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_200GB;
break;
return eth_link_speed;
}
-#define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
- ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
- ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
- ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | \
- ETH_LINK_SPEED_100G | ETH_LINK_SPEED_200G)
+#define BNXT_SUPPORTED_SPEEDS (RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_100M_HD | \
+ RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_2_5G | \
+ RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_20G | RTE_ETH_LINK_SPEED_25G | \
+ RTE_ETH_LINK_SPEED_40G | RTE_ETH_LINK_SPEED_50G | \
+ RTE_ETH_LINK_SPEED_100G | RTE_ETH_LINK_SPEED_200G)
static int bnxt_validate_link_speed(struct bnxt *bp)
{
uint32_t link_speed_capa;
uint32_t one_speed;
- if (link_speed == ETH_LINK_SPEED_AUTONEG)
+ if (link_speed == RTE_ETH_LINK_SPEED_AUTONEG)
return 0;
link_speed_capa = bnxt_get_speed_capabilities(bp);
- if (link_speed & ETH_LINK_SPEED_FIXED) {
- one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
+ if (link_speed & RTE_ETH_LINK_SPEED_FIXED) {
+ one_speed = link_speed & ~RTE_ETH_LINK_SPEED_FIXED;
if (one_speed & (one_speed - 1)) {
PMD_DRV_LOG(ERR,
{
uint16_t ret = 0;
- if (link_speed == ETH_LINK_SPEED_AUTONEG) {
+ if (link_speed == RTE_ETH_LINK_SPEED_AUTONEG) {
if (bp->link_info->support_speeds)
return bp->link_info->support_speeds;
link_speed = BNXT_SUPPORTED_SPEEDS;
}
- if (link_speed & ETH_LINK_SPEED_100M)
+ if (link_speed & RTE_ETH_LINK_SPEED_100M)
ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
- if (link_speed & ETH_LINK_SPEED_100M_HD)
+ if (link_speed & RTE_ETH_LINK_SPEED_100M_HD)
ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
- if (link_speed & ETH_LINK_SPEED_1G)
+ if (link_speed & RTE_ETH_LINK_SPEED_1G)
ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
- if (link_speed & ETH_LINK_SPEED_2_5G)
+ if (link_speed & RTE_ETH_LINK_SPEED_2_5G)
ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
- if (link_speed & ETH_LINK_SPEED_10G)
+ if (link_speed & RTE_ETH_LINK_SPEED_10G)
ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
- if (link_speed & ETH_LINK_SPEED_20G)
+ if (link_speed & RTE_ETH_LINK_SPEED_20G)
ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
- if (link_speed & ETH_LINK_SPEED_25G)
+ if (link_speed & RTE_ETH_LINK_SPEED_25G)
ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
- if (link_speed & ETH_LINK_SPEED_40G)
+ if (link_speed & RTE_ETH_LINK_SPEED_40G)
ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
- if (link_speed & ETH_LINK_SPEED_50G)
+ if (link_speed & RTE_ETH_LINK_SPEED_50G)
ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
- if (link_speed & ETH_LINK_SPEED_100G)
+ if (link_speed & RTE_ETH_LINK_SPEED_100G)
ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB;
- if (link_speed & ETH_LINK_SPEED_200G)
+ if (link_speed & RTE_ETH_LINK_SPEED_200G)
ret |= HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_200GB;
return ret;
}
static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
{
- uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
+ uint32_t eth_link_speed = RTE_ETH_SPEED_NUM_NONE;
switch (hw_link_speed) {
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
- eth_link_speed = ETH_SPEED_NUM_100M;
+ eth_link_speed = RTE_ETH_SPEED_NUM_100M;
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
- eth_link_speed = ETH_SPEED_NUM_1G;
+ eth_link_speed = RTE_ETH_SPEED_NUM_1G;
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
- eth_link_speed = ETH_SPEED_NUM_2_5G;
+ eth_link_speed = RTE_ETH_SPEED_NUM_2_5G;
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
- eth_link_speed = ETH_SPEED_NUM_10G;
+ eth_link_speed = RTE_ETH_SPEED_NUM_10G;
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
- eth_link_speed = ETH_SPEED_NUM_20G;
+ eth_link_speed = RTE_ETH_SPEED_NUM_20G;
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
- eth_link_speed = ETH_SPEED_NUM_25G;
+ eth_link_speed = RTE_ETH_SPEED_NUM_25G;
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
- eth_link_speed = ETH_SPEED_NUM_40G;
+ eth_link_speed = RTE_ETH_SPEED_NUM_40G;
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
- eth_link_speed = ETH_SPEED_NUM_50G;
+ eth_link_speed = RTE_ETH_SPEED_NUM_50G;
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
- eth_link_speed = ETH_SPEED_NUM_100G;
+ eth_link_speed = RTE_ETH_SPEED_NUM_100G;
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_200GB:
- eth_link_speed = ETH_SPEED_NUM_200G;
+ eth_link_speed = RTE_ETH_SPEED_NUM_200G;
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
default:
static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
{
- uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
+ uint16_t eth_link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
switch (hw_link_duplex) {
case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
/* FALLTHROUGH */
- eth_link_duplex = ETH_LINK_FULL_DUPLEX;
+ eth_link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
break;
case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
- eth_link_duplex = ETH_LINK_HALF_DUPLEX;
+ eth_link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
break;
default:
PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n",
link->link_speed =
bnxt_parse_hw_link_speed(link_info->link_speed);
else
- link->link_speed = ETH_SPEED_NUM_NONE;
+ link->link_speed = RTE_ETH_SPEED_NUM_NONE;
link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
link->link_status = link_info->link_up;
link->link_autoneg = link_info->auto_mode ==
HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
- ETH_LINK_FIXED : ETH_LINK_AUTONEG;
+ RTE_ETH_LINK_FIXED : RTE_ETH_LINK_AUTONEG;
exit:
return rc;
}
autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
if (BNXT_CHIP_P5(bp) &&
- dev_conf->link_speeds == ETH_LINK_SPEED_40G) {
+ dev_conf->link_speeds == RTE_ETH_LINK_SPEED_40G) {
/* 40G is not supported as part of media auto detect.
* The speed should be forced and autoneg disabled
* to configure 40G speed.
HWRM_CHECK_RESULT();
- bp->vlan = rte_le_to_cpu_16(resp->vlan) & ETH_VLAN_ID_MAX;
+ bp->vlan = rte_le_to_cpu_16(resp->vlan) & RTE_ETH_VLAN_ID_MAX;
svif_info = rte_le_to_cpu_16(resp->svif_info);
if (svif_info & HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_VALID)
dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT;
if (parent_bp->flags & BNXT_FLAG_PTP_SUPPORTED)
- dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP;
+ dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT;
dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT;
rx_ring_info->rx_ring_struct->ring_size *
AGG_RING_SIZE_FACTOR)) : 0;
- if (rx_ring_info && (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
+ if (rx_ring_info && (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)) {
int tpa_max = BNXT_TPA_MAX_AGGS(bp);
tpa_info_len = tpa_max * sizeof(struct bnxt_tpa_info);
ag_bitmap_start, ag_bitmap_len);
/* TPA info */
- if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)
rx_ring_info->tpa_info =
((struct bnxt_tpa_info *)
((char *)mz->addr + tpa_info_start));
bp->nr_vnics = 0;
/* Multi-queue mode */
- if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB_RSS) {
+ if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB_RSS) {
/* VMDq ONLY, VMDq+RSS, VMDq+DCB, VMDq+DCB+RSS */
switch (dev_conf->rxmode.mq_mode) {
- case ETH_MQ_RX_VMDQ_RSS:
- case ETH_MQ_RX_VMDQ_ONLY:
- case ETH_MQ_RX_VMDQ_DCB_RSS:
+ case RTE_ETH_MQ_RX_VMDQ_RSS:
+ case RTE_ETH_MQ_RX_VMDQ_ONLY:
+ case RTE_ETH_MQ_RX_VMDQ_DCB_RSS:
/* FALLTHROUGH */
/* ETH_8/64_POOLs */
pools = conf->nb_queue_pools;
max_pools = RTE_MIN(bp->max_vnics,
RTE_MIN(bp->max_l2_ctx,
RTE_MIN(bp->max_rsscos_ctx,
- ETH_64_POOLS)));
+ RTE_ETH_64_POOLS)));
PMD_DRV_LOG(DEBUG,
"pools = %u max_pools = %u\n",
pools, max_pools);
if (pools > max_pools)
pools = max_pools;
break;
- case ETH_MQ_RX_RSS:
+ case RTE_ETH_MQ_RX_RSS:
pools = bp->rx_cosq_cnt ? bp->rx_cosq_cnt : 1;
break;
default:
ring_idx, rxq, i, vnic);
}
if (i == 0) {
- if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB) {
+ if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB) {
bp->eth_dev->data->promiscuous = 1;
vnic->flags |= BNXT_VNIC_INFO_PROMISC;
}
vnic->end_grp_id = end_grp_id;
if (i) {
- if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB ||
- !(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS))
+ if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB ||
+ !(dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS))
vnic->rss_dflt_cr = true;
goto skip_filter_allocation;
}
bp->rx_num_qs_per_vnic = nb_q_per_grp;
- if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+ if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
struct rte_eth_rss_conf *rss = &dev_conf->rx_adv_conf.rss_conf;
if (bp->flags & BNXT_FLAG_UPDATE_HASH)
bp->flags &= ~BNXT_FLAG_UPDATE_HASH;
for (i = 0; i < bp->nr_vnics; i++) {
- uint32_t lvl = ETH_RSS_LEVEL(rss->rss_hf);
+ uint32_t lvl = RTE_ETH_RSS_LEVEL(rss->rss_hf);
vnic = &bp->vnic_info[i];
vnic->hash_type =
PMD_DRV_LOG(DEBUG, "RX Buf size is %d\n", rxq->rx_buf_size);
rxq->queue_id = queue_idx;
rxq->port_id = eth_dev->data->port_id;
- if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
rxq->crc_len = RTE_ETHER_CRC_LEN;
else
rxq->crc_len = 0;
}
PMD_DRV_LOG(INFO, "Rx queue started %d\n", rx_queue_id);
- if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+ if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
vnic = rxq->vnic;
if (BNXT_HAS_RING_GRPS(bp)) {
rxq->rx_started = false;
PMD_DRV_LOG(DEBUG, "Rx queue stopped\n");
- if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+ if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
if (BNXT_HAS_RING_GRPS(bp))
vnic->fw_grp_ids[rx_queue_id] = INVALID_HW_RING_ID;
dev_conf = &rxq->bp->eth_dev->data->dev_conf;
offloads = dev_conf->rxmode.offloads;
- outer_cksum_enabled = !!(offloads & (DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_OUTER_UDP_CKSUM));
+ outer_cksum_enabled = !!(offloads & (RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM));
/* Initialize ol_flags table. */
pt = rxr->ol_flags_table;
} while (nb_tx_pkts < ring_mask);
if (nb_tx_pkts) {
- if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
bnxt_tx_cmp_vec_fast(txq, nb_tx_pkts);
else
bnxt_tx_cmp_vec(txq, nb_tx_pkts);
}
/*
- * Transmit completion function for use when DEV_TX_OFFLOAD_MBUF_FAST_FREE
+ * Transmit completion function for use when RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE
* is enabled.
*/
static inline void
} while (nb_tx_pkts < ring_mask);
if (nb_tx_pkts) {
- if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
bnxt_tx_cmp_vec_fast(txq, nb_tx_pkts);
else
bnxt_tx_cmp_vec(txq, nb_tx_pkts);
} while (nb_tx_pkts < ring_mask);
if (nb_tx_pkts) {
- if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
bnxt_tx_cmp_vec_fast(txq, nb_tx_pkts);
else
bnxt_tx_cmp_vec(txq, nb_tx_pkts);
}
/*
- * Transmit completion function for use when DEV_TX_OFFLOAD_MBUF_FAST_FREE
+ * Transmit completion function for use when RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE
* is enabled.
*/
static void bnxt_tx_cmp_fast(struct bnxt_tx_queue *txq, int nr_pkts)
} while (nb_tx_pkts < ring_mask);
if (nb_tx_pkts) {
- if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
bnxt_tx_cmp_fast(txq, nb_tx_pkts);
else
bnxt_tx_cmp(txq, nb_tx_pkts);
{
uint16_t hwrm_type = 0;
- if (rte_type & ETH_RSS_IPV4)
+ if (rte_type & RTE_ETH_RSS_IPV4)
hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
- if (rte_type & ETH_RSS_NONFRAG_IPV4_TCP)
+ if (rte_type & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
- if (rte_type & ETH_RSS_NONFRAG_IPV4_UDP)
+ if (rte_type & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
- if (rte_type & ETH_RSS_IPV6)
+ if (rte_type & RTE_ETH_RSS_IPV6)
hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
- if (rte_type & ETH_RSS_NONFRAG_IPV6_TCP)
+ if (rte_type & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
- if (rte_type & ETH_RSS_NONFRAG_IPV6_UDP)
+ if (rte_type & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
return hwrm_type;
int bnxt_rte_to_hwrm_hash_level(struct bnxt *bp, uint64_t hash_f, uint32_t lvl)
{
uint32_t mode = HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_DEFAULT;
- bool l3 = (hash_f & (ETH_RSS_IPV4 | ETH_RSS_IPV6));
- bool l4 = (hash_f & (ETH_RSS_NONFRAG_IPV4_UDP |
- ETH_RSS_NONFRAG_IPV6_UDP |
- ETH_RSS_NONFRAG_IPV4_TCP |
- ETH_RSS_NONFRAG_IPV6_TCP));
+ bool l3 = (hash_f & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6));
+ bool l4 = (hash_f & (RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP));
bool l3_only = l3 && !l4;
bool l3_and_l4 = l3 && l4;
* return default hash mode.
*/
if (!(bp->vnic_cap_flags & BNXT_VNIC_CAP_OUTER_RSS))
- return ETH_RSS_LEVEL_PMD_DEFAULT;
+ return RTE_ETH_RSS_LEVEL_PMD_DEFAULT;
if (mode == HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_OUTERMOST_2 ||
mode == HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_OUTERMOST_4)
- rss_level |= ETH_RSS_LEVEL_OUTERMOST;
+ rss_level |= RTE_ETH_RSS_LEVEL_OUTERMOST;
else if (mode == HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_INNERMOST_2 ||
mode == HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_INNERMOST_4)
- rss_level |= ETH_RSS_LEVEL_INNERMOST;
+ rss_level |= RTE_ETH_RSS_LEVEL_INNERMOST;
else
- rss_level |= ETH_RSS_LEVEL_PMD_DEFAULT;
+ rss_level |= RTE_ETH_RSS_LEVEL_PMD_DEFAULT;
return rss_level;
}
if (vf >= bp->pdev->max_vfs)
return -EINVAL;
- if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG) {
+ if (rx_mask & RTE_ETH_VMDQ_ACCEPT_UNTAG) {
PMD_DRV_LOG(ERR, "Currently cannot toggle this setting\n");
return -ENOTSUP;
}
/* Is this really the correct mapping? VFd seems to think it is. */
- if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
+ if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_UC)
flag |= BNXT_VNIC_INFO_PROMISC;
- if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
+ if (rx_mask & RTE_ETH_VMDQ_ACCEPT_BROADCAST)
flag |= BNXT_VNIC_INFO_BCAST;
- if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
+ if (rx_mask & RTE_ETH_VMDQ_ACCEPT_MULTICAST)
flag |= BNXT_VNIC_INFO_ALLMULTI | BNXT_VNIC_INFO_MCAST;
if (on)
struct rte_eth_desc_lim tx_desc_lim; /**< Tx descriptor limits */
uint16_t reta_size;
- struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_512 /
- RTE_RETA_GROUP_SIZE];
+ struct rte_eth_rss_reta_entry64 reta_conf[RTE_ETH_RSS_RETA_SIZE_512 /
+ RTE_ETH_RETA_GROUP_SIZE];
uint8_t rss_key[52]; /**< 52-byte hash key buffer. */
uint8_t rss_key_len; /**< hash key length in bytes. */
uint16_t key_speed;
switch (speed) {
- case ETH_SPEED_NUM_NONE:
+ case RTE_ETH_SPEED_NUM_NONE:
key_speed = 0x00;
break;
- case ETH_SPEED_NUM_10M:
+ case RTE_ETH_SPEED_NUM_10M:
key_speed = BOND_LINK_SPEED_KEY_10M;
break;
- case ETH_SPEED_NUM_100M:
+ case RTE_ETH_SPEED_NUM_100M:
key_speed = BOND_LINK_SPEED_KEY_100M;
break;
- case ETH_SPEED_NUM_1G:
+ case RTE_ETH_SPEED_NUM_1G:
key_speed = BOND_LINK_SPEED_KEY_1000M;
break;
- case ETH_SPEED_NUM_10G:
+ case RTE_ETH_SPEED_NUM_10G:
key_speed = BOND_LINK_SPEED_KEY_10G;
break;
- case ETH_SPEED_NUM_20G:
+ case RTE_ETH_SPEED_NUM_20G:
key_speed = BOND_LINK_SPEED_KEY_20G;
break;
- case ETH_SPEED_NUM_40G:
+ case RTE_ETH_SPEED_NUM_40G:
key_speed = BOND_LINK_SPEED_KEY_40G;
break;
default:
if (ret >= 0 && link_info.link_status != 0) {
key = link_speed_key(link_info.link_speed) << 1;
- if (link_info.link_duplex == ETH_LINK_FULL_DUPLEX)
+ if (link_info.link_duplex == RTE_ETH_LINK_FULL_DUPLEX)
key |= BOND_LINK_FULL_DUPLEX_KEY;
} else {
key = 0;
bonded_eth_dev = &rte_eth_devices[bonded_port_id];
if ((bonded_eth_dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_VLAN_FILTER) == 0)
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER) == 0)
return 0;
internals = bonded_eth_dev->data->dev_private;
return -1;
}
- if (link_props.link_status == ETH_LINK_UP) {
+ if (link_props.link_status == RTE_ETH_LINK_UP) {
if (internals->active_slave_count == 0 &&
!internals->user_defined_primary_port)
bond_ethdev_primary_set(internals,
internals->tx_offload_capa = 0;
internals->rx_queue_offload_capa = 0;
internals->tx_queue_offload_capa = 0;
- internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
+ internals->flow_type_rss_offloads = RTE_ETH_RSS_PROTO_MASK;
internals->reta_size = 0;
internals->candidate_max_rx_pktlen = 0;
internals->max_rx_pktlen = 0;
* In any other mode the link properties are set to default
* values of AUTONEG/DUPLEX
*/
- ethdev->data->dev_link.link_autoneg = ETH_LINK_AUTONEG;
- ethdev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ ethdev->data->dev_link.link_autoneg = RTE_ETH_LINK_AUTONEG;
+ ethdev->data->dev_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
}
}
slave_eth_dev->data->dev_conf.intr_conf.lsc = 1;
/* If RSS is enabled for bonding, try to enable it for slaves */
- if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+ if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
/* rss_key won't be empty if RSS is configured in bonded dev */
slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len =
internals->rss_key_len;
}
if (bonded_eth_dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_VLAN_FILTER)
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
slave_eth_dev->data->dev_conf.rxmode.offloads |=
- DEV_RX_OFFLOAD_VLAN_FILTER;
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
else
slave_eth_dev->data->dev_conf.rxmode.offloads &=
- ~DEV_RX_OFFLOAD_VLAN_FILTER;
+ ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
slave_eth_dev->data->dev_conf.rxmode.mtu =
bonded_eth_dev->data->dev_conf.rxmode.mtu;
}
/* If RSS is enabled for bonding, synchronize RETA */
- if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
+ if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) {
int i;
struct bond_dev_private *internals;
return -1;
}
- eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+ eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
eth_dev->data->dev_started = 1;
internals = eth_dev->data->dev_private;
tlb_last_obytets[internals->active_slaves[i]] = 0;
}
- eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+ eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
eth_dev->data->dev_started = 0;
internals->link_status_polling_enabled = 0;
bond_ctx = ethdev->data->dev_private;
- ethdev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+ ethdev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
if (ethdev->data->dev_started == 0 ||
bond_ctx->active_slave_count == 0) {
- ethdev->data->dev_link.link_status = ETH_LINK_DOWN;
+ ethdev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
return 0;
}
- ethdev->data->dev_link.link_status = ETH_LINK_UP;
+ ethdev->data->dev_link.link_status = RTE_ETH_LINK_UP;
if (wait_to_complete)
link_update = rte_eth_link_get;
&slave_link);
if (ret < 0) {
ethdev->data->dev_link.link_speed =
- ETH_SPEED_NUM_NONE;
+ RTE_ETH_SPEED_NUM_NONE;
RTE_BOND_LOG(ERR,
"Slave (port %u) link get failed: %s",
bond_ctx->active_slaves[idx],
* In theses mode the maximum theoretical link speed is the sum
* of all the slaves
*/
- ethdev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+ ethdev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
one_link_update_succeeded = false;
for (idx = 0; idx < bond_ctx->active_slave_count; idx++) {
goto link_update;
/* check link state properties if bonded link is up*/
- if (bonded_eth_dev->data->dev_link.link_status == ETH_LINK_UP) {
+ if (bonded_eth_dev->data->dev_link.link_status == RTE_ETH_LINK_UP) {
if (link_properties_valid(bonded_eth_dev, &link) != 0)
RTE_BOND_LOG(ERR, "Invalid link properties "
"for slave %d in bonding mode %d",
if (internals->active_slave_count < 1) {
/* If first active slave, then change link status */
bonded_eth_dev->data->dev_link.link_status =
- ETH_LINK_UP;
+ RTE_ETH_LINK_UP;
internals->current_primary_port = port_id;
lsc_flag = 1;
return -EINVAL;
/* Copy RETA table */
- reta_count = (reta_size + RTE_RETA_GROUP_SIZE - 1) /
- RTE_RETA_GROUP_SIZE;
+ reta_count = (reta_size + RTE_ETH_RETA_GROUP_SIZE - 1) /
+ RTE_ETH_RETA_GROUP_SIZE;
for (i = 0; i < reta_count; i++) {
internals->reta_conf[i].mask = reta_conf[i].mask;
- for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+ for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
if ((reta_conf[i].mask >> j) & 0x01)
internals->reta_conf[i].reta[j] = reta_conf[i].reta[j];
}
return -EINVAL;
/* Copy RETA table */
- for (i = 0; i < reta_size / RTE_RETA_GROUP_SIZE; i++)
- for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+ for (i = 0; i < reta_size / RTE_ETH_RETA_GROUP_SIZE; i++)
+ for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
if ((reta_conf[i].mask >> j) & 0x01)
reta_conf[i].reta[j] = internals->reta_conf[i].reta[j];
internals->max_rx_pktlen = 0;
/* Initially allow to choose any offload type */
- internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
+ internals->flow_type_rss_offloads = RTE_ETH_RSS_PROTO_MASK;
memset(&internals->default_rxconf, 0,
sizeof(internals->default_rxconf));
* set key to the the value specified in port RSS configuration.
* Fall back to default RSS key if the key is not specified
*/
- if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
+ if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) {
struct rte_eth_rss_conf *rss_conf =
&dev->data->dev_conf.rx_adv_conf.rss_conf;
if (rss_conf->rss_key != NULL) {
for (i = 0; i < RTE_DIM(internals->reta_conf); i++) {
internals->reta_conf[i].mask = ~0LL;
- for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+ for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
internals->reta_conf[i].reta[j] =
- (i * RTE_RETA_GROUP_SIZE + j) %
+ (i * RTE_ETH_RETA_GROUP_SIZE + j) %
dev->data->nb_rx_queues;
}
}
struct rte_eth_rxmode *rxmode = &conf->rxmode;
uint16_t flags = 0;
- if (rxmode->mq_mode == ETH_MQ_RX_RSS &&
- (dev->rx_offloads & DEV_RX_OFFLOAD_RSS_HASH))
+ if (rxmode->mq_mode == RTE_ETH_MQ_RX_RSS &&
+ (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH))
flags |= NIX_RX_OFFLOAD_RSS_F;
if (dev->rx_offloads &
- (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM))
+ (RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM))
flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
if (dev->rx_offloads &
- (DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
+ (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM))
flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
- if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+ if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
flags |= NIX_RX_MULTI_SEG_F;
- if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+ if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
flags |= NIX_RX_OFFLOAD_TSTAMP_F;
if (!dev->ptype_disable)
flags |= NIX_RX_OFFLOAD_PTYPE_F;
- if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)
+ if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
flags |= NIX_RX_OFFLOAD_SECURITY_F;
return flags;
RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, tx_offload) !=
offsetof(struct rte_mbuf, pool) + 2 * sizeof(void *));
- if (conf & DEV_TX_OFFLOAD_VLAN_INSERT ||
- conf & DEV_TX_OFFLOAD_QINQ_INSERT)
+ if (conf & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
+ conf & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
- if (conf & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
- conf & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
+ if (conf & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
+ conf & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
flags |= NIX_TX_OFFLOAD_OL3_OL4_CSUM_F;
- if (conf & DEV_TX_OFFLOAD_IPV4_CKSUM ||
- conf & DEV_TX_OFFLOAD_TCP_CKSUM ||
- conf & DEV_TX_OFFLOAD_UDP_CKSUM || conf & DEV_TX_OFFLOAD_SCTP_CKSUM)
+ if (conf & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
+ conf & RTE_ETH_TX_OFFLOAD_TCP_CKSUM ||
+ conf & RTE_ETH_TX_OFFLOAD_UDP_CKSUM || conf & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)
flags |= NIX_TX_OFFLOAD_L3_L4_CSUM_F;
- if (!(conf & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
+ if (!(conf & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
flags |= NIX_TX_OFFLOAD_MBUF_NOFF_F;
- if (conf & DEV_TX_OFFLOAD_MULTI_SEGS)
+ if (conf & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
flags |= NIX_TX_MULTI_SEG_F;
/* Enable Inner checksum for TSO */
- if (conf & DEV_TX_OFFLOAD_TCP_TSO)
+ if (conf & RTE_ETH_TX_OFFLOAD_TCP_TSO)
flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_L3_L4_CSUM_F);
/* Enable Inner and Outer checksum for Tunnel TSO */
- if (conf & (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO))
+ if (conf & (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO))
flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_OL3_OL4_CSUM_F |
NIX_TX_OFFLOAD_L3_L4_CSUM_F);
- if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+ if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
flags |= NIX_TX_OFFLOAD_TSTAMP_F;
- if (conf & DEV_TX_OFFLOAD_SECURITY)
+ if (conf & RTE_ETH_TX_OFFLOAD_SECURITY)
flags |= NIX_TX_OFFLOAD_SECURITY_F;
return flags;
return -EINVAL;
}
- if (eth_dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
+ if (eth_dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_RSS) {
plt_err("multi-queue mode is disabled");
return -ENOTSUP;
}
nix_eth_rx_burst_mseg[0][0][0][0][0][0][0];
if (dev->scalar_ena) {
- if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+ if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
return pick_rx_func(eth_dev, nix_eth_rx_burst_mseg);
return pick_rx_func(eth_dev, nix_eth_rx_burst);
}
- if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+ if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
return pick_rx_func(eth_dev, nix_eth_rx_vec_burst_mseg);
return pick_rx_func(eth_dev, nix_eth_rx_vec_burst);
}
if (dev->scalar_ena) {
pick_tx_func(eth_dev, nix_eth_tx_burst);
- if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+ if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
pick_tx_func(eth_dev, nix_eth_tx_burst_mseg);
} else {
pick_tx_func(eth_dev, nix_eth_tx_vec_burst);
- if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+ if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
pick_tx_func(eth_dev, nix_eth_tx_vec_burst_mseg);
}
struct rte_eth_rxmode *rxmode = &conf->rxmode;
uint16_t flags = 0;
- if (rxmode->mq_mode == ETH_MQ_RX_RSS &&
- (dev->rx_offloads & DEV_RX_OFFLOAD_RSS_HASH))
+ if (rxmode->mq_mode == RTE_ETH_MQ_RX_RSS &&
+ (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH))
flags |= NIX_RX_OFFLOAD_RSS_F;
if (dev->rx_offloads &
- (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM))
+ (RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM))
flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
if (dev->rx_offloads &
- (DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
+ (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM))
flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
- if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+ if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
flags |= NIX_RX_MULTI_SEG_F;
- if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+ if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
flags |= NIX_RX_OFFLOAD_TSTAMP_F;
if (!dev->ptype_disable)
flags |= NIX_RX_OFFLOAD_PTYPE_F;
- if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)
+ if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
flags |= NIX_RX_OFFLOAD_SECURITY_F;
return flags;
RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, tx_offload) !=
offsetof(struct rte_mbuf, pool) + 2 * sizeof(void *));
- if (conf & DEV_TX_OFFLOAD_VLAN_INSERT ||
- conf & DEV_TX_OFFLOAD_QINQ_INSERT)
+ if (conf & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
+ conf & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
- if (conf & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
- conf & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
+ if (conf & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
+ conf & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
flags |= NIX_TX_OFFLOAD_OL3_OL4_CSUM_F;
- if (conf & DEV_TX_OFFLOAD_IPV4_CKSUM ||
- conf & DEV_TX_OFFLOAD_TCP_CKSUM ||
- conf & DEV_TX_OFFLOAD_UDP_CKSUM || conf & DEV_TX_OFFLOAD_SCTP_CKSUM)
+ if (conf & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
+ conf & RTE_ETH_TX_OFFLOAD_TCP_CKSUM ||
+ conf & RTE_ETH_TX_OFFLOAD_UDP_CKSUM || conf & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)
flags |= NIX_TX_OFFLOAD_L3_L4_CSUM_F;
- if (!(conf & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
+ if (!(conf & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
flags |= NIX_TX_OFFLOAD_MBUF_NOFF_F;
- if (conf & DEV_TX_OFFLOAD_MULTI_SEGS)
+ if (conf & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
flags |= NIX_TX_MULTI_SEG_F;
/* Enable Inner checksum for TSO */
- if (conf & DEV_TX_OFFLOAD_TCP_TSO)
+ if (conf & RTE_ETH_TX_OFFLOAD_TCP_TSO)
flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_L3_L4_CSUM_F);
/* Enable Inner and Outer checksum for Tunnel TSO */
- if (conf & (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO))
+ if (conf & (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO))
flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_OL3_OL4_CSUM_F |
NIX_TX_OFFLOAD_L3_L4_CSUM_F);
- if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+ if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
flags |= NIX_TX_OFFLOAD_TSTAMP_F;
- if (dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY)
+ if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
flags |= NIX_TX_OFFLOAD_SECURITY_F;
return flags;
/* Platform specific checks */
if ((roc_model_is_cn96_a0() || roc_model_is_cn95_a0()) &&
- (txmode->offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) &&
- ((txmode->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
- (txmode->offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM))) {
+ (txmode->offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) &&
+ ((txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
+ (txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM))) {
plt_err("Outer IP and SCTP checksum unsupported");
return -EINVAL;
}
* TSO not supported for earlier chip revisions
*/
if (roc_model_is_cn96_a0() || roc_model_is_cn95_a0())
- dev->tx_offload_capa &= ~(DEV_TX_OFFLOAD_TCP_TSO |
- DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
- DEV_TX_OFFLOAD_GRE_TNL_TSO);
+ dev->tx_offload_capa &= ~(RTE_ETH_TX_OFFLOAD_TCP_TSO |
+ RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO);
/* 50G and 100G to be supported for board version C0
* and above of CN9K.
*/
if (roc_model_is_cn96_a0() || roc_model_is_cn95_a0()) {
- dev->speed_capa &= ~(uint64_t)ETH_LINK_SPEED_50G;
- dev->speed_capa &= ~(uint64_t)ETH_LINK_SPEED_100G;
+ dev->speed_capa &= ~(uint64_t)RTE_ETH_LINK_SPEED_50G;
+ dev->speed_capa &= ~(uint64_t)RTE_ETH_LINK_SPEED_100G;
}
dev->hwcap = 0;
nix_eth_rx_burst_mseg[0][0][0][0][0][0][0];
if (dev->scalar_ena) {
- if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+ if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
return pick_rx_func(eth_dev, nix_eth_rx_burst_mseg);
return pick_rx_func(eth_dev, nix_eth_rx_burst);
}
- if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+ if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
return pick_rx_func(eth_dev, nix_eth_rx_vec_burst_mseg);
return pick_rx_func(eth_dev, nix_eth_rx_vec_burst);
}
if (dev->scalar_ena) {
pick_tx_func(eth_dev, nix_eth_tx_burst);
- if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+ if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
pick_tx_func(eth_dev, nix_eth_tx_burst_mseg);
} else {
pick_tx_func(eth_dev, nix_eth_tx_vec_burst);
- if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+ if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
pick_tx_func(eth_dev, nix_eth_tx_vec_burst_mseg);
}
if (roc_nix_is_vf_or_sdp(&dev->nix) ||
dev->npc.switch_header_type == ROC_PRIV_FLAGS_HIGIG)
- capa &= ~DEV_RX_OFFLOAD_TIMESTAMP;
+ capa &= ~RTE_ETH_RX_OFFLOAD_TIMESTAMP;
return capa;
}
uint32_t speed_capa;
/* Auto negotiation disabled */
- speed_capa = ETH_LINK_SPEED_FIXED;
+ speed_capa = RTE_ETH_LINK_SPEED_FIXED;
if (!roc_nix_is_vf_or_sdp(&dev->nix) && !roc_nix_is_lbk(&dev->nix)) {
- speed_capa |= ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
- ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G |
- ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G;
+ speed_capa |= RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G |
+ RTE_ETH_LINK_SPEED_25G | RTE_ETH_LINK_SPEED_40G |
+ RTE_ETH_LINK_SPEED_50G | RTE_ETH_LINK_SPEED_100G;
}
return speed_capa;
struct roc_nix *nix = &dev->nix;
int i, rc = 0;
- if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+ if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
/* Setup Inline Inbound */
rc = roc_nix_inl_inb_init(nix);
if (rc) {
cnxk_nix_inb_mode_set(dev, true);
}
- if (dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY ||
- dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+ if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY ||
+ dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
struct plt_bitmap *bmap;
size_t bmap_sz;
void *mem;
dev->outb.lf_base = roc_nix_inl_outb_lf_base_get(nix);
- /* Skip the rest if DEV_TX_OFFLOAD_SECURITY is not enabled */
- if (!(dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY))
+ /* Skip the rest if RTE_ETH_TX_OFFLOAD_SECURITY is not enabled */
+ if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY))
goto done;
rc = -ENOMEM;
done:
return 0;
cleanup:
- if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)
+ if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
rc |= roc_nix_inl_inb_fini(nix);
return rc;
}
int rc, ret = 0;
/* Cleanup Inline inbound */
- if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+ if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
/* Destroy inbound sessions */
tvar = NULL;
RTE_TAILQ_FOREACH_SAFE(eth_sec, &dev->inb.list, entry, tvar)
}
/* Cleanup Inline outbound */
- if (dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY ||
- dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+ if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY ||
+ dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
/* Destroy outbound sessions */
tvar = NULL;
RTE_TAILQ_FOREACH_SAFE(eth_sec, &dev->outb.list, entry, tvar)
buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
if (eth_dev->data->mtu + (uint32_t)CNXK_NIX_L2_OVERHEAD > buffsz) {
- dev->rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
- dev->tx_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+ dev->rx_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
+ dev->tx_offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
}
}
struct rte_eth_fc_conf fc_conf = {0};
int rc;
- /* Both Rx & Tx flow ctrl get enabled(RTE_FC_FULL) in HW
+ /* Both Rx & Tx flow ctrl get enabled(RTE_ETH_FC_FULL) in HW
* by AF driver, update those info in PMD structure.
*/
rc = cnxk_nix_flow_ctrl_get(eth_dev, &fc_conf);
goto exit;
fc->mode = fc_conf.mode;
- fc->rx_pause = (fc_conf.mode == RTE_FC_FULL) ||
- (fc_conf.mode == RTE_FC_RX_PAUSE);
- fc->tx_pause = (fc_conf.mode == RTE_FC_FULL) ||
- (fc_conf.mode == RTE_FC_TX_PAUSE);
+ fc->rx_pause = (fc_conf.mode == RTE_ETH_FC_FULL) ||
+ (fc_conf.mode == RTE_ETH_FC_RX_PAUSE);
+ fc->tx_pause = (fc_conf.mode == RTE_ETH_FC_FULL) ||
+ (fc_conf.mode == RTE_ETH_FC_TX_PAUSE);
exit:
return rc;
/* To avoid Link credit deadlock on Ax, disable Tx FC if it's enabled */
if (roc_model_is_cn96_ax() &&
dev->npc.switch_header_type != ROC_PRIV_FLAGS_HIGIG &&
- (fc_cfg.mode == RTE_FC_FULL || fc_cfg.mode == RTE_FC_RX_PAUSE)) {
+ (fc_cfg.mode == RTE_ETH_FC_FULL || fc_cfg.mode == RTE_ETH_FC_RX_PAUSE)) {
fc_cfg.mode =
- (fc_cfg.mode == RTE_FC_FULL ||
- fc_cfg.mode == RTE_FC_TX_PAUSE) ?
- RTE_FC_TX_PAUSE : RTE_FC_NONE;
+ (fc_cfg.mode == RTE_ETH_FC_FULL ||
+ fc_cfg.mode == RTE_ETH_FC_TX_PAUSE) ?
+ RTE_ETH_FC_TX_PAUSE : RTE_ETH_FC_NONE;
}
return cnxk_nix_flow_ctrl_set(eth_dev, &fc_cfg);
* Maximum three segments can be supported with W8, Choose
* NIX_MAXSQESZ_W16 for multi segment offload.
*/
- if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+ if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
return NIX_MAXSQESZ_W16;
else
return NIX_MAXSQESZ_W8;
/* When Tx Security offload is enabled, increase tx desc count by
* max possible outbound desc count.
*/
- if (dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY)
+ if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
nb_desc += dev->outb.nb_desc;
/* Setup ROC SQ */
* to avoid meta packet drop as LBK does not currently support
* backpressure.
*/
- if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY && roc_nix_is_lbk(nix)) {
+ if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY && roc_nix_is_lbk(nix)) {
uint64_t pkt_pool_limit = roc_nix_inl_dev_rq_limit_get();
/* Use current RQ's aura limit if inl rq is not available */
rxq_sp->qconf.nb_desc = nb_desc;
rxq_sp->qconf.mp = mp;
- if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+ if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
/* Setup rq reference for inline dev if present */
rc = roc_nix_inl_dev_rq_get(rq);
if (rc)
* These are needed in deriving raw clock value from tsc counter.
* read_clock eth op returns raw clock value.
*/
- if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en) {
+ if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en) {
rc = cnxk_nix_tsc_convert(dev);
if (rc) {
plt_err("Failed to calculate delta and freq mult");
plt_nix_dbg("Releasing rxq %u", qid);
/* Release rq reference for inline dev if present */
- if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)
+ if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
roc_nix_inl_dev_rq_put(rq);
/* Cleanup ROC RQ */
dev->ethdev_rss_hf = ethdev_rss;
- if (ethdev_rss & ETH_RSS_L2_PAYLOAD &&
+ if (ethdev_rss & RTE_ETH_RSS_L2_PAYLOAD &&
dev->npc.switch_header_type == ROC_PRIV_FLAGS_LEN_90B) {
flowkey_cfg |= FLOW_KEY_TYPE_CH_LEN_90B;
}
- if (ethdev_rss & ETH_RSS_C_VLAN)
+ if (ethdev_rss & RTE_ETH_RSS_C_VLAN)
flowkey_cfg |= FLOW_KEY_TYPE_VLAN;
- if (ethdev_rss & ETH_RSS_L3_SRC_ONLY)
+ if (ethdev_rss & RTE_ETH_RSS_L3_SRC_ONLY)
flowkey_cfg |= FLOW_KEY_TYPE_L3_SRC;
- if (ethdev_rss & ETH_RSS_L3_DST_ONLY)
+ if (ethdev_rss & RTE_ETH_RSS_L3_DST_ONLY)
flowkey_cfg |= FLOW_KEY_TYPE_L3_DST;
- if (ethdev_rss & ETH_RSS_L4_SRC_ONLY)
+ if (ethdev_rss & RTE_ETH_RSS_L4_SRC_ONLY)
flowkey_cfg |= FLOW_KEY_TYPE_L4_SRC;
- if (ethdev_rss & ETH_RSS_L4_DST_ONLY)
+ if (ethdev_rss & RTE_ETH_RSS_L4_DST_ONLY)
flowkey_cfg |= FLOW_KEY_TYPE_L4_DST;
if (ethdev_rss & RSS_IPV4_ENABLE)
if (ethdev_rss & RSS_IPV6_ENABLE)
flowkey_cfg |= flow_key_type[rss_level][RSS_IPV6_INDEX];
- if (ethdev_rss & ETH_RSS_TCP)
+ if (ethdev_rss & RTE_ETH_RSS_TCP)
flowkey_cfg |= flow_key_type[rss_level][RSS_TCP_INDEX];
- if (ethdev_rss & ETH_RSS_UDP)
+ if (ethdev_rss & RTE_ETH_RSS_UDP)
flowkey_cfg |= flow_key_type[rss_level][RSS_UDP_INDEX];
- if (ethdev_rss & ETH_RSS_SCTP)
+ if (ethdev_rss & RTE_ETH_RSS_SCTP)
flowkey_cfg |= flow_key_type[rss_level][RSS_SCTP_INDEX];
- if (ethdev_rss & ETH_RSS_L2_PAYLOAD)
+ if (ethdev_rss & RTE_ETH_RSS_L2_PAYLOAD)
flowkey_cfg |= flow_key_type[rss_level][RSS_DMAC_INDEX];
if (ethdev_rss & RSS_IPV6_EX_ENABLE)
flowkey_cfg |= FLOW_KEY_TYPE_IPV6_EXT;
- if (ethdev_rss & ETH_RSS_PORT)
+ if (ethdev_rss & RTE_ETH_RSS_PORT)
flowkey_cfg |= FLOW_KEY_TYPE_PORT;
- if (ethdev_rss & ETH_RSS_NVGRE)
+ if (ethdev_rss & RTE_ETH_RSS_NVGRE)
flowkey_cfg |= FLOW_KEY_TYPE_NVGRE;
- if (ethdev_rss & ETH_RSS_VXLAN)
+ if (ethdev_rss & RTE_ETH_RSS_VXLAN)
flowkey_cfg |= FLOW_KEY_TYPE_VXLAN;
- if (ethdev_rss & ETH_RSS_GENEVE)
+ if (ethdev_rss & RTE_ETH_RSS_GENEVE)
flowkey_cfg |= FLOW_KEY_TYPE_GENEVE;
- if (ethdev_rss & ETH_RSS_GTPU)
+ if (ethdev_rss & RTE_ETH_RSS_GTPU)
flowkey_cfg |= FLOW_KEY_TYPE_GTPU;
return flowkey_cfg;
uint64_t rss_hf;
rss_hf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
- rss_hash_level = ETH_RSS_LEVEL(rss_hf);
+ rss_hash_level = RTE_ETH_RSS_LEVEL(rss_hf);
if (rss_hash_level)
rss_hash_level -= 1;
/* Nothing much to do if offload is not enabled */
if (!(dev->tx_offloads &
- (DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO)))
+ (RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO)))
return 0;
/* Setup LSO formats in AF. Its a no-op if other ethdev has
goto fail_configure;
}
- if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
- rxmode->mq_mode != ETH_MQ_RX_RSS) {
+ if (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE &&
+ rxmode->mq_mode != RTE_ETH_MQ_RX_RSS) {
plt_err("Unsupported mq rx mode %d", rxmode->mq_mode);
goto fail_configure;
}
- if (txmode->mq_mode != ETH_MQ_TX_NONE) {
+ if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) {
plt_err("Unsupported mq tx mode %d", txmode->mq_mode);
goto fail_configure;
}
/* Prepare rx cfg */
rx_cfg = ROC_NIX_LF_RX_CFG_DIS_APAD;
if (dev->rx_offloads &
- (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM)) {
+ (RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM)) {
rx_cfg |= ROC_NIX_LF_RX_CFG_CSUM_OL4;
rx_cfg |= ROC_NIX_LF_RX_CFG_CSUM_IL4;
}
ROC_NIX_LF_RX_CFG_LEN_IL4 | ROC_NIX_LF_RX_CFG_LEN_IL3 |
ROC_NIX_LF_RX_CFG_LEN_OL4 | ROC_NIX_LF_RX_CFG_LEN_OL3);
- if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+ if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
rx_cfg |= ROC_NIX_LF_RX_CFG_IP6_UDP_OPT;
/* Disable drop re if rx offload security is enabled and
* platform does not support it.
* enabled on PF owning this VF
*/
memset(&dev->tstamp, 0, sizeof(struct cnxk_timesync_info));
- if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en)
+ if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en)
cnxk_eth_dev_ops.timesync_enable(eth_dev);
else
cnxk_eth_dev_ops.timesync_disable(eth_dev);
- if (dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
+ if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
rc = rte_mbuf_dyn_rx_timestamp_register
(&dev->tstamp.tstamp_dynfield_offset,
&dev->tstamp.rx_tstamp_dynflag);
CNXK_NIX_TX_NB_SEG_MAX)
#define CNXK_NIX_RSS_L3_L4_SRC_DST \
- (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_SRC_ONLY | \
- ETH_RSS_L4_DST_ONLY)
+ (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY | \
+ RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY)
#define CNXK_NIX_RSS_OFFLOAD \
- (ETH_RSS_PORT | ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP | \
- ETH_RSS_SCTP | ETH_RSS_TUNNEL | ETH_RSS_L2_PAYLOAD | \
- CNXK_NIX_RSS_L3_L4_SRC_DST | ETH_RSS_LEVEL_MASK | ETH_RSS_C_VLAN)
+ (RTE_ETH_RSS_PORT | RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP | \
+ RTE_ETH_RSS_TCP | RTE_ETH_RSS_SCTP | RTE_ETH_RSS_TUNNEL | \
+ RTE_ETH_RSS_L2_PAYLOAD | CNXK_NIX_RSS_L3_L4_SRC_DST | \
+ RTE_ETH_RSS_LEVEL_MASK | RTE_ETH_RSS_C_VLAN)
#define CNXK_NIX_TX_OFFLOAD_CAPA \
- (DEV_TX_OFFLOAD_MBUF_FAST_FREE | DEV_TX_OFFLOAD_MT_LOCKFREE | \
- DEV_TX_OFFLOAD_VLAN_INSERT | DEV_TX_OFFLOAD_QINQ_INSERT | \
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | DEV_TX_OFFLOAD_OUTER_UDP_CKSUM | \
- DEV_TX_OFFLOAD_TCP_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM | \
- DEV_TX_OFFLOAD_SCTP_CKSUM | DEV_TX_OFFLOAD_TCP_TSO | \
- DEV_TX_OFFLOAD_VXLAN_TNL_TSO | DEV_TX_OFFLOAD_GENEVE_TNL_TSO | \
- DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_MULTI_SEGS | \
- DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_SECURITY)
+ (RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE | RTE_ETH_TX_OFFLOAD_MT_LOCKFREE | \
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT | RTE_ETH_TX_OFFLOAD_QINQ_INSERT | \
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM | RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | RTE_ETH_TX_OFFLOAD_TCP_TSO | \
+ RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | \
+ RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | RTE_ETH_TX_OFFLOAD_MULTI_SEGS | \
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | RTE_ETH_TX_OFFLOAD_SECURITY)
#define CNXK_NIX_RX_OFFLOAD_CAPA \
- (DEV_RX_OFFLOAD_CHECKSUM | DEV_RX_OFFLOAD_SCTP_CKSUM | \
- DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | DEV_RX_OFFLOAD_SCATTER | \
- DEV_RX_OFFLOAD_OUTER_UDP_CKSUM | DEV_RX_OFFLOAD_RSS_HASH | \
- DEV_RX_OFFLOAD_TIMESTAMP | DEV_RX_OFFLOAD_VLAN_STRIP | \
- DEV_RX_OFFLOAD_SECURITY)
+ (RTE_ETH_RX_OFFLOAD_CHECKSUM | RTE_ETH_RX_OFFLOAD_SCTP_CKSUM | \
+ RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | RTE_ETH_RX_OFFLOAD_SCATTER | \
+ RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_RSS_HASH | \
+ RTE_ETH_RX_OFFLOAD_TIMESTAMP | RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \
+ RTE_ETH_RX_OFFLOAD_SECURITY)
#define RSS_IPV4_ENABLE \
- (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_UDP | \
- ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_SCTP)
+ (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | \
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_SCTP)
#define RSS_IPV6_ENABLE \
- (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_UDP | \
- ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_NONFRAG_IPV6_SCTP)
+ (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | \
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
#define RSS_IPV6_EX_ENABLE \
- (ETH_RSS_IPV6_EX | ETH_RSS_IPV6_TCP_EX | ETH_RSS_IPV6_UDP_EX)
+ (RTE_ETH_RSS_IPV6_EX | RTE_ETH_RSS_IPV6_TCP_EX | RTE_ETH_RSS_IPV6_UDP_EX)
#define RSS_MAX_LEVELS 3
val = atoi(value);
- if (val <= ETH_RSS_RETA_SIZE_64)
+ if (val <= RTE_ETH_RSS_RETA_SIZE_64)
val = ROC_NIX_RSS_RETA_SZ_64;
- else if (val > ETH_RSS_RETA_SIZE_64 && val <= ETH_RSS_RETA_SIZE_128)
+ else if (val > RTE_ETH_RSS_RETA_SIZE_64 && val <= RTE_ETH_RSS_RETA_SIZE_128)
val = ROC_NIX_RSS_RETA_SZ_128;
- else if (val > ETH_RSS_RETA_SIZE_128 && val <= ETH_RSS_RETA_SIZE_256)
+ else if (val > RTE_ETH_RSS_RETA_SIZE_128 && val <= RTE_ETH_RSS_RETA_SIZE_256)
val = ROC_NIX_RSS_RETA_SZ_256;
else
val = ROC_NIX_RSS_RETA_SZ_64;
uint64_t flags;
const char *output;
} rx_offload_map[] = {
- {DEV_RX_OFFLOAD_VLAN_STRIP, " VLAN Strip,"},
- {DEV_RX_OFFLOAD_IPV4_CKSUM, " Inner IPv4 Checksum,"},
- {DEV_RX_OFFLOAD_UDP_CKSUM, " UDP Checksum,"},
- {DEV_RX_OFFLOAD_TCP_CKSUM, " TCP Checksum,"},
- {DEV_RX_OFFLOAD_TCP_LRO, " TCP LRO,"},
- {DEV_RX_OFFLOAD_QINQ_STRIP, " QinQ VLAN Strip,"},
- {DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPv4 Checksum,"},
- {DEV_RX_OFFLOAD_MACSEC_STRIP, " MACsec Strip,"},
- {DEV_RX_OFFLOAD_HEADER_SPLIT, " Header Split,"},
- {DEV_RX_OFFLOAD_VLAN_FILTER, " VLAN Filter,"},
- {DEV_RX_OFFLOAD_VLAN_EXTEND, " VLAN Extend,"},
- {DEV_RX_OFFLOAD_SCATTER, " Scattered,"},
- {DEV_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
- {DEV_RX_OFFLOAD_SECURITY, " Security,"},
- {DEV_RX_OFFLOAD_KEEP_CRC, " Keep CRC,"},
- {DEV_RX_OFFLOAD_SCTP_CKSUM, " SCTP,"},
- {DEV_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP Checksum,"},
- {DEV_RX_OFFLOAD_RSS_HASH, " RSS,"}
+ {RTE_ETH_RX_OFFLOAD_VLAN_STRIP, " VLAN Strip,"},
+ {RTE_ETH_RX_OFFLOAD_IPV4_CKSUM, " Inner IPv4 Checksum,"},
+ {RTE_ETH_RX_OFFLOAD_UDP_CKSUM, " UDP Checksum,"},
+ {RTE_ETH_RX_OFFLOAD_TCP_CKSUM, " TCP Checksum,"},
+ {RTE_ETH_RX_OFFLOAD_TCP_LRO, " TCP LRO,"},
+ {RTE_ETH_RX_OFFLOAD_QINQ_STRIP, " QinQ VLAN Strip,"},
+ {RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPv4 Checksum,"},
+ {RTE_ETH_RX_OFFLOAD_MACSEC_STRIP, " MACsec Strip,"},
+ {RTE_ETH_RX_OFFLOAD_HEADER_SPLIT, " Header Split,"},
+ {RTE_ETH_RX_OFFLOAD_VLAN_FILTER, " VLAN Filter,"},
+ {RTE_ETH_RX_OFFLOAD_VLAN_EXTEND, " VLAN Extend,"},
+ {RTE_ETH_RX_OFFLOAD_SCATTER, " Scattered,"},
+ {RTE_ETH_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
+ {RTE_ETH_RX_OFFLOAD_SECURITY, " Security,"},
+ {RTE_ETH_RX_OFFLOAD_KEEP_CRC, " Keep CRC,"},
+ {RTE_ETH_RX_OFFLOAD_SCTP_CKSUM, " SCTP,"},
+ {RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP Checksum,"},
+ {RTE_ETH_RX_OFFLOAD_RSS_HASH, " RSS,"}
};
static const char *const burst_mode[] = {"Vector Neon, Rx Offloads:",
"Scalar, Rx Offloads:"
uint64_t flags;
const char *output;
} tx_offload_map[] = {
- {DEV_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"},
- {DEV_TX_OFFLOAD_IPV4_CKSUM, " Inner IPv4 Checksum,"},
- {DEV_TX_OFFLOAD_UDP_CKSUM, " UDP Checksum,"},
- {DEV_TX_OFFLOAD_TCP_CKSUM, " TCP Checksum,"},
- {DEV_TX_OFFLOAD_SCTP_CKSUM, " SCTP Checksum,"},
- {DEV_TX_OFFLOAD_TCP_TSO, " TCP TSO,"},
- {DEV_TX_OFFLOAD_UDP_TSO, " UDP TSO,"},
- {DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPv4 Checksum,"},
- {DEV_TX_OFFLOAD_QINQ_INSERT, " QinQ VLAN Insert,"},
- {DEV_TX_OFFLOAD_VXLAN_TNL_TSO, " VXLAN Tunnel TSO,"},
- {DEV_TX_OFFLOAD_GRE_TNL_TSO, " GRE Tunnel TSO,"},
- {DEV_TX_OFFLOAD_IPIP_TNL_TSO, " IP-in-IP Tunnel TSO,"},
- {DEV_TX_OFFLOAD_GENEVE_TNL_TSO, " Geneve Tunnel TSO,"},
- {DEV_TX_OFFLOAD_MACSEC_INSERT, " MACsec Insert,"},
- {DEV_TX_OFFLOAD_MT_LOCKFREE, " Multi Thread Lockless Tx,"},
- {DEV_TX_OFFLOAD_MULTI_SEGS, " Scattered,"},
- {DEV_TX_OFFLOAD_MBUF_FAST_FREE, " H/W MBUF Free,"},
- {DEV_TX_OFFLOAD_SECURITY, " Security,"},
- {DEV_TX_OFFLOAD_UDP_TNL_TSO, " UDP Tunnel TSO,"},
- {DEV_TX_OFFLOAD_IP_TNL_TSO, " IP Tunnel TSO,"},
- {DEV_TX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP Checksum,"},
- {DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP, " Timestamp,"}
+ {RTE_ETH_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"},
+ {RTE_ETH_TX_OFFLOAD_IPV4_CKSUM, " Inner IPv4 Checksum,"},
+ {RTE_ETH_TX_OFFLOAD_UDP_CKSUM, " UDP Checksum,"},
+ {RTE_ETH_TX_OFFLOAD_TCP_CKSUM, " TCP Checksum,"},
+ {RTE_ETH_TX_OFFLOAD_SCTP_CKSUM, " SCTP Checksum,"},
+ {RTE_ETH_TX_OFFLOAD_TCP_TSO, " TCP TSO,"},
+ {RTE_ETH_TX_OFFLOAD_UDP_TSO, " UDP TSO,"},
+ {RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPv4 Checksum,"},
+ {RTE_ETH_TX_OFFLOAD_QINQ_INSERT, " QinQ VLAN Insert,"},
+ {RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO, " VXLAN Tunnel TSO,"},
+ {RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO, " GRE Tunnel TSO,"},
+ {RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO, " IP-in-IP Tunnel TSO,"},
+ {RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO, " Geneve Tunnel TSO,"},
+ {RTE_ETH_TX_OFFLOAD_MACSEC_INSERT, " MACsec Insert,"},
+ {RTE_ETH_TX_OFFLOAD_MT_LOCKFREE, " Multi Thread Lockless Tx,"},
+ {RTE_ETH_TX_OFFLOAD_MULTI_SEGS, " Scattered,"},
+ {RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE, " H/W MBUF Free,"},
+ {RTE_ETH_TX_OFFLOAD_SECURITY, " Security,"},
+ {RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO, " UDP Tunnel TSO,"},
+ {RTE_ETH_TX_OFFLOAD_IP_TNL_TSO, " IP Tunnel TSO,"},
+ {RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP Checksum,"},
+ {RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP, " Timestamp,"}
};
static const char *const burst_mode[] = {"Vector Neon, Tx Offloads:",
"Scalar, Tx Offloads:"
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
enum rte_eth_fc_mode mode_map[] = {
- RTE_FC_NONE, RTE_FC_RX_PAUSE,
- RTE_FC_TX_PAUSE, RTE_FC_FULL
+ RTE_ETH_FC_NONE, RTE_ETH_FC_RX_PAUSE,
+ RTE_ETH_FC_TX_PAUSE, RTE_ETH_FC_FULL
};
struct roc_nix *nix = &dev->nix;
int mode;
if (fc_conf->mode == fc->mode)
return 0;
- rx_pause = (fc_conf->mode == RTE_FC_FULL) ||
- (fc_conf->mode == RTE_FC_RX_PAUSE);
- tx_pause = (fc_conf->mode == RTE_FC_FULL) ||
- (fc_conf->mode == RTE_FC_TX_PAUSE);
+ rx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) ||
+ (fc_conf->mode == RTE_ETH_FC_RX_PAUSE);
+ tx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) ||
+ (fc_conf->mode == RTE_ETH_FC_TX_PAUSE);
/* Check if TX pause frame is already enabled or not */
if (fc->tx_pause ^ tx_pause) {
* when this feature has not been enabled before.
*/
if (data->dev_started && frame_size > buffsz &&
- !(dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)) {
+ !(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)) {
plt_err("Scatter offload is not enabled for mtu");
goto exit;
}
/* Check <seg size> * <max_seg> >= max_frame */
- if ((dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER) &&
+ if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER) &&
frame_size > (buffsz * CNXK_NIX_RX_NB_SEG_MAX)) {
plt_err("Greater than maximum supported packet length");
goto exit;
}
/* Copy RETA table */
- for (i = 0; i < (int)(dev->nix.reta_sz / RTE_RETA_GROUP_SIZE); i++) {
- for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) {
+ for (i = 0; i < (int)(dev->nix.reta_sz / RTE_ETH_RETA_GROUP_SIZE); i++) {
+ for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) {
if ((reta_conf[i].mask >> j) & 0x01)
reta[idx] = reta_conf[i].reta[j];
idx++;
goto fail;
/* Copy RETA table */
- for (i = 0; i < (int)(dev->nix.reta_sz / RTE_RETA_GROUP_SIZE); i++) {
- for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) {
+ for (i = 0; i < (int)(dev->nix.reta_sz / RTE_ETH_RETA_GROUP_SIZE); i++) {
+ for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) {
if ((reta_conf[i].mask >> j) & 0x01)
reta_conf[i].reta[j] = reta[idx];
idx++;
if (rss_conf->rss_key)
roc_nix_rss_key_set(nix, rss_conf->rss_key);
- rss_hash_level = ETH_RSS_LEVEL(rss_conf->rss_hf);
+ rss_hash_level = RTE_ETH_RSS_LEVEL(rss_conf->rss_hf);
if (rss_hash_level)
rss_hash_level -= 1;
flowkey_cfg =
plt_info("Port %d: Link Up - speed %u Mbps - %s",
(int)(eth_dev->data->port_id),
(uint32_t)link->link_speed,
- link->link_duplex == ETH_LINK_FULL_DUPLEX
+ link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX
? "full-duplex"
: "half-duplex");
else
eth_link.link_status = link->status;
eth_link.link_speed = link->speed;
- eth_link.link_autoneg = ETH_LINK_AUTONEG;
+ eth_link.link_autoneg = RTE_ETH_LINK_AUTONEG;
eth_link.link_duplex = link->full_duplex;
/* Print link info */
return 0;
if (roc_nix_is_lbk(&dev->nix)) {
- link.link_status = ETH_LINK_UP;
- link.link_speed = ETH_SPEED_NUM_100G;
- link.link_autoneg = ETH_LINK_FIXED;
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_status = RTE_ETH_LINK_UP;
+ link.link_speed = RTE_ETH_SPEED_NUM_100G;
+ link.link_autoneg = RTE_ETH_LINK_FIXED;
+ link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
} else {
rc = roc_nix_mac_link_info_get(&dev->nix, &info);
if (rc)
return rc;
link.link_status = info.status;
link.link_speed = info.speed;
- link.link_autoneg = ETH_LINK_AUTONEG;
+ link.link_autoneg = RTE_ETH_LINK_AUTONEG;
if (info.full_duplex)
link.link_duplex = info.full_duplex;
}
dev->rx_tstamp_tc.cc_mask = CNXK_CYCLECOUNTER_MASK;
dev->tx_tstamp_tc.cc_mask = CNXK_CYCLECOUNTER_MASK;
- dev->rx_offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
+ dev->rx_offloads |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
rc = roc_nix_ptp_rx_ena_dis(nix, true);
if (!rc) {
cnxk_nix_timesync_disable(struct rte_eth_dev *eth_dev)
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
- uint64_t rx_offloads = DEV_RX_OFFLOAD_TIMESTAMP;
+ uint64_t rx_offloads = RTE_ETH_RX_OFFLOAD_TIMESTAMP;
struct roc_nix *nix = &dev->nix;
int rc = 0;
return -EINVAL;
}
- if (eth_dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
+ if (eth_dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_RSS) {
plt_err("multi-queue mode is disabled");
return -ENOTSUP;
}
#define CXGBE_LINK_STATUS_POLL_CNT 100 /* Max number of times to poll */
#define CXGBE_DEFAULT_RSS_KEY_LEN 40 /* 320-bits */
-#define CXGBE_RSS_HF_IPV4_MASK (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | \
- ETH_RSS_NONFRAG_IPV4_OTHER)
-#define CXGBE_RSS_HF_IPV6_MASK (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | \
- ETH_RSS_NONFRAG_IPV6_OTHER | \
- ETH_RSS_IPV6_EX)
-#define CXGBE_RSS_HF_TCP_IPV6_MASK (ETH_RSS_NONFRAG_IPV6_TCP | \
- ETH_RSS_IPV6_TCP_EX)
-#define CXGBE_RSS_HF_UDP_IPV6_MASK (ETH_RSS_NONFRAG_IPV6_UDP | \
- ETH_RSS_IPV6_UDP_EX)
-#define CXGBE_RSS_HF_ALL (ETH_RSS_IP | ETH_RSS_TCP | ETH_RSS_UDP)
+#define CXGBE_RSS_HF_IPV4_MASK (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | \
+ RTE_ETH_RSS_NONFRAG_IPV4_OTHER)
+#define CXGBE_RSS_HF_IPV6_MASK (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | \
+ RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
+ RTE_ETH_RSS_IPV6_EX)
+#define CXGBE_RSS_HF_TCP_IPV6_MASK (RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+ RTE_ETH_RSS_IPV6_TCP_EX)
+#define CXGBE_RSS_HF_UDP_IPV6_MASK (RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+ RTE_ETH_RSS_IPV6_UDP_EX)
+#define CXGBE_RSS_HF_ALL (RTE_ETH_RSS_IP | RTE_ETH_RSS_TCP | RTE_ETH_RSS_UDP)
/* Tx/Rx Offloads supported */
-#define CXGBE_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT | \
- DEV_TX_OFFLOAD_IPV4_CKSUM | \
- DEV_TX_OFFLOAD_UDP_CKSUM | \
- DEV_TX_OFFLOAD_TCP_CKSUM | \
- DEV_TX_OFFLOAD_TCP_TSO | \
- DEV_TX_OFFLOAD_MULTI_SEGS)
-
-#define CXGBE_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP | \
- DEV_RX_OFFLOAD_IPV4_CKSUM | \
- DEV_RX_OFFLOAD_UDP_CKSUM | \
- DEV_RX_OFFLOAD_TCP_CKSUM | \
- DEV_RX_OFFLOAD_SCATTER | \
- DEV_RX_OFFLOAD_RSS_HASH)
+#define CXGBE_TX_OFFLOADS (RTE_ETH_TX_OFFLOAD_VLAN_INSERT | \
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_TCP_TSO | \
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
+
+#define CXGBE_RX_OFFLOADS (RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \
+ RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | \
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM | \
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM | \
+ RTE_ETH_RX_OFFLOAD_SCATTER | \
+ RTE_ETH_RX_OFFLOAD_RSS_HASH)
/* Devargs filtermode and filtermask representation */
enum cxgbe_devargs_filter_mode_flags {
}
new_link.link_status = cxgbe_force_linkup(adapter) ?
- ETH_LINK_UP : pi->link_cfg.link_ok;
+ RTE_ETH_LINK_UP : pi->link_cfg.link_ok;
new_link.link_autoneg = (lc->link_caps & FW_PORT_CAP32_ANEG) ? 1 : 0;
- new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ new_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
new_link.link_speed = t4_fwcap_to_speed(lc->link_caps);
return rte_eth_linkstatus_set(eth_dev, &new_link);
goto out;
}
- if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
+ if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
eth_dev->data->scattered_rx = 1;
else
eth_dev->data->scattered_rx = 0;
CXGBE_FUNC_TRACE();
- if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
+ if (eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
eth_dev->data->dev_conf.rxmode.offloads |=
- DEV_RX_OFFLOAD_RSS_HASH;
+ RTE_ETH_RX_OFFLOAD_RSS_HASH;
if (!(adapter->flags & FW_QUEUE_BOUND)) {
err = cxgbe_setup_sge_fwevtq(adapter);
rx_pause = 1;
if (rx_pause && tx_pause)
- fc_conf->mode = RTE_FC_FULL;
+ fc_conf->mode = RTE_ETH_FC_FULL;
else if (rx_pause)
- fc_conf->mode = RTE_FC_RX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
else if (tx_pause)
- fc_conf->mode = RTE_FC_TX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
else
- fc_conf->mode = RTE_FC_NONE;
+ fc_conf->mode = RTE_ETH_FC_NONE;
return 0;
}
u8 tx_pause = 0, rx_pause = 0;
int ret;
- if (fc_conf->mode == RTE_FC_FULL) {
+ if (fc_conf->mode == RTE_ETH_FC_FULL) {
tx_pause = 1;
rx_pause = 1;
- } else if (fc_conf->mode == RTE_FC_TX_PAUSE) {
+ } else if (fc_conf->mode == RTE_ETH_FC_TX_PAUSE) {
tx_pause = 1;
- } else if (fc_conf->mode == RTE_FC_RX_PAUSE) {
+ } else if (fc_conf->mode == RTE_ETH_FC_RX_PAUSE) {
rx_pause = 1;
}
rss_hf |= CXGBE_RSS_HF_IPV6_MASK;
if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) {
- rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
if (flags & F_FW_RSS_VI_CONFIG_CMD_UDPEN)
- rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
}
if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
rte_memcpy(rss, pi->rss, pi->rss_size * sizeof(u16));
for (i = 0; i < reta_size; i++) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
if (!(reta_conf[idx].mask & (1ULL << shift)))
continue;
return -EINVAL;
for (i = 0; i < reta_size; i++) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
if (!(reta_conf[idx].mask & (1ULL << shift)))
continue;
if (lc->pcaps & FW_PORT_CAP32_SPEED_100G) {
if (capa_arr) {
- capa_arr[num].speed = ETH_SPEED_NUM_100G;
+ capa_arr[num].speed = RTE_ETH_SPEED_NUM_100G;
capa_arr[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
RTE_ETH_FEC_MODE_CAPA_MASK(RS);
}
if (lc->pcaps & FW_PORT_CAP32_SPEED_50G) {
if (capa_arr) {
- capa_arr[num].speed = ETH_SPEED_NUM_50G;
+ capa_arr[num].speed = RTE_ETH_SPEED_NUM_50G;
capa_arr[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
RTE_ETH_FEC_MODE_CAPA_MASK(BASER);
}
if (lc->pcaps & FW_PORT_CAP32_SPEED_25G) {
if (capa_arr) {
- capa_arr[num].speed = ETH_SPEED_NUM_25G;
+ capa_arr[num].speed = RTE_ETH_SPEED_NUM_25G;
capa_arr[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
RTE_ETH_FEC_MODE_CAPA_MASK(BASER) |
RTE_ETH_FEC_MODE_CAPA_MASK(RS);
* that step explicitly.
*/
ret = t4_set_rxmode(adapter, adapter->mbox, pi->viid, mtu, -1, -1, -1,
- !!(conf_offloads & DEV_RX_OFFLOAD_VLAN_STRIP),
+ !!(conf_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP),
true);
if (ret == 0) {
ret = cxgbe_mpstcam_modify(pi, (int)pi->xact_addr_filt,
}
if (ret == 0 && cxgbe_force_linkup(adapter))
- pi->eth_dev->data->dev_link.link_status = ETH_LINK_UP;
+ pi->eth_dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
return ret;
}
if (rss_hf & CXGBE_RSS_HF_IPV4_MASK)
flags |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN;
- if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
flags |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
- if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
flags |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
F_FW_RSS_VI_CONFIG_CMD_UDPEN;
{
#define SET_SPEED(__speed_name) \
do { \
- *speed_caps |= ETH_LINK_ ## __speed_name; \
+ *speed_caps |= RTE_ETH_LINK_ ## __speed_name; \
} while (0)
#define FW_CAPS_TO_SPEED(__fw_name) \
speed_caps);
if (!(pi->link_cfg.pcaps & FW_PORT_CAP32_ANEG))
- *speed_caps |= ETH_LINK_SPEED_FIXED;
+ *speed_caps |= RTE_ETH_LINK_SPEED_FIXED;
}
/**
/* Supported Rx offloads */
static uint64_t dev_rx_offloads_sup =
- DEV_RX_OFFLOAD_SCATTER;
+ RTE_ETH_RX_OFFLOAD_SCATTER;
/* Rx offloads which cannot be disabled */
static uint64_t dev_rx_offloads_nodis =
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_RSS_HASH;
+ RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_RSS_HASH;
/* Supported Tx offloads */
static uint64_t dev_tx_offloads_sup =
- DEV_TX_OFFLOAD_MT_LOCKFREE |
- DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ RTE_ETH_TX_OFFLOAD_MT_LOCKFREE |
+ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
/* Tx offloads which cannot be disabled */
static uint64_t dev_tx_offloads_nodis =
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_SCTP_CKSUM |
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_TX_OFFLOAD_MULTI_SEGS;
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
/* Keep track of whether QMAN and BMAN have been globally initialized */
static int is_global_init;
fman_if_set_maxfrm(dev->process_private, max_rx_pktlen);
- if (rx_offloads & DEV_RX_OFFLOAD_SCATTER) {
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
DPAA_PMD_DEBUG("enabling scatter mode");
fman_if_set_sg(dev->process_private, 1);
dev->data->scattered_rx = 1;
/* Configure link only if link is UP*/
if (link->link_status) {
- if (eth_conf->link_speeds == ETH_LINK_SPEED_AUTONEG) {
+ if (eth_conf->link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
/* Start autoneg only if link is not in autoneg mode */
if (!link->link_autoneg)
dpaa_restart_link_autoneg(__fif->node_name);
- } else if (eth_conf->link_speeds & ETH_LINK_SPEED_FIXED) {
- switch (eth_conf->link_speeds & ~ETH_LINK_SPEED_FIXED) {
- case ETH_LINK_SPEED_10M_HD:
- speed = ETH_SPEED_NUM_10M;
- duplex = ETH_LINK_HALF_DUPLEX;
+ } else if (eth_conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
+ switch (eth_conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
+ case RTE_ETH_LINK_SPEED_10M_HD:
+ speed = RTE_ETH_SPEED_NUM_10M;
+ duplex = RTE_ETH_LINK_HALF_DUPLEX;
break;
- case ETH_LINK_SPEED_10M:
- speed = ETH_SPEED_NUM_10M;
- duplex = ETH_LINK_FULL_DUPLEX;
+ case RTE_ETH_LINK_SPEED_10M:
+ speed = RTE_ETH_SPEED_NUM_10M;
+ duplex = RTE_ETH_LINK_FULL_DUPLEX;
break;
- case ETH_LINK_SPEED_100M_HD:
- speed = ETH_SPEED_NUM_100M;
- duplex = ETH_LINK_HALF_DUPLEX;
+ case RTE_ETH_LINK_SPEED_100M_HD:
+ speed = RTE_ETH_SPEED_NUM_100M;
+ duplex = RTE_ETH_LINK_HALF_DUPLEX;
break;
- case ETH_LINK_SPEED_100M:
- speed = ETH_SPEED_NUM_100M;
- duplex = ETH_LINK_FULL_DUPLEX;
+ case RTE_ETH_LINK_SPEED_100M:
+ speed = RTE_ETH_SPEED_NUM_100M;
+ duplex = RTE_ETH_LINK_FULL_DUPLEX;
break;
- case ETH_LINK_SPEED_1G:
- speed = ETH_SPEED_NUM_1G;
- duplex = ETH_LINK_FULL_DUPLEX;
+ case RTE_ETH_LINK_SPEED_1G:
+ speed = RTE_ETH_SPEED_NUM_1G;
+ duplex = RTE_ETH_LINK_FULL_DUPLEX;
break;
- case ETH_LINK_SPEED_2_5G:
- speed = ETH_SPEED_NUM_2_5G;
- duplex = ETH_LINK_FULL_DUPLEX;
+ case RTE_ETH_LINK_SPEED_2_5G:
+ speed = RTE_ETH_SPEED_NUM_2_5G;
+ duplex = RTE_ETH_LINK_FULL_DUPLEX;
break;
- case ETH_LINK_SPEED_10G:
- speed = ETH_SPEED_NUM_10G;
- duplex = ETH_LINK_FULL_DUPLEX;
+ case RTE_ETH_LINK_SPEED_10G:
+ speed = RTE_ETH_SPEED_NUM_10G;
+ duplex = RTE_ETH_LINK_FULL_DUPLEX;
break;
default:
- speed = ETH_SPEED_NUM_NONE;
- duplex = ETH_LINK_FULL_DUPLEX;
+ speed = RTE_ETH_SPEED_NUM_NONE;
+ duplex = RTE_ETH_LINK_FULL_DUPLEX;
break;
}
/* Set link speed */
dev_info->max_mac_addrs = DPAA_MAX_MAC_FILTER;
dev_info->max_hash_mac_addrs = 0;
dev_info->max_vfs = 0;
- dev_info->max_vmdq_pools = ETH_16_POOLS;
+ dev_info->max_vmdq_pools = RTE_ETH_16_POOLS;
dev_info->flow_type_rss_offloads = DPAA_RSS_OFFLOAD_ALL;
if (fif->mac_type == fman_mac_1g) {
- dev_info->speed_capa = ETH_LINK_SPEED_10M_HD
- | ETH_LINK_SPEED_10M
- | ETH_LINK_SPEED_100M_HD
- | ETH_LINK_SPEED_100M
- | ETH_LINK_SPEED_1G;
+ dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD
+ | RTE_ETH_LINK_SPEED_10M
+ | RTE_ETH_LINK_SPEED_100M_HD
+ | RTE_ETH_LINK_SPEED_100M
+ | RTE_ETH_LINK_SPEED_1G;
} else if (fif->mac_type == fman_mac_2_5g) {
- dev_info->speed_capa = ETH_LINK_SPEED_10M_HD
- | ETH_LINK_SPEED_10M
- | ETH_LINK_SPEED_100M_HD
- | ETH_LINK_SPEED_100M
- | ETH_LINK_SPEED_1G
- | ETH_LINK_SPEED_2_5G;
+ dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD
+ | RTE_ETH_LINK_SPEED_10M
+ | RTE_ETH_LINK_SPEED_100M_HD
+ | RTE_ETH_LINK_SPEED_100M
+ | RTE_ETH_LINK_SPEED_1G
+ | RTE_ETH_LINK_SPEED_2_5G;
} else if (fif->mac_type == fman_mac_10g) {
- dev_info->speed_capa = ETH_LINK_SPEED_10M_HD
- | ETH_LINK_SPEED_10M
- | ETH_LINK_SPEED_100M_HD
- | ETH_LINK_SPEED_100M
- | ETH_LINK_SPEED_1G
- | ETH_LINK_SPEED_2_5G
- | ETH_LINK_SPEED_10G;
+ dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD
+ | RTE_ETH_LINK_SPEED_10M
+ | RTE_ETH_LINK_SPEED_100M_HD
+ | RTE_ETH_LINK_SPEED_100M
+ | RTE_ETH_LINK_SPEED_1G
+ | RTE_ETH_LINK_SPEED_2_5G
+ | RTE_ETH_LINK_SPEED_10G;
} else {
DPAA_PMD_ERR("invalid link_speed: %s, %d",
dpaa_intf->name, fif->mac_type);
uint64_t flags;
const char *output;
} rx_offload_map[] = {
- {DEV_RX_OFFLOAD_SCATTER, " Scattered,"},
- {DEV_RX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
- {DEV_RX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
- {DEV_RX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
- {DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
- {DEV_RX_OFFLOAD_RSS_HASH, " RSS,"}
+ {RTE_ETH_RX_OFFLOAD_SCATTER, " Scattered,"},
+ {RTE_ETH_RX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
+ {RTE_ETH_RX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
+ {RTE_ETH_RX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
+ {RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
+ {RTE_ETH_RX_OFFLOAD_RSS_HASH, " RSS,"}
};
/* Update Rx offload info */
uint64_t flags;
const char *output;
} tx_offload_map[] = {
- {DEV_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"},
- {DEV_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"},
- {DEV_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
- {DEV_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
- {DEV_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
- {DEV_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
- {DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
- {DEV_TX_OFFLOAD_MULTI_SEGS, " Scattered,"}
+ {RTE_ETH_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"},
+ {RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"},
+ {RTE_ETH_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
+ {RTE_ETH_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
+ {RTE_ETH_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
+ {RTE_ETH_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
+ {RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
+ {RTE_ETH_TX_OFFLOAD_MULTI_SEGS, " Scattered,"}
};
/* Update Tx offload info */
ret = dpaa_get_link_status(__fif->node_name, link);
if (ret)
return ret;
- if (link->link_status == ETH_LINK_DOWN &&
+ if (link->link_status == RTE_ETH_LINK_DOWN &&
wait_to_complete)
rte_delay_ms(CHECK_INTERVAL);
else
}
if (ioctl_version < 2) {
- link->link_duplex = ETH_LINK_FULL_DUPLEX;
- link->link_autoneg = ETH_LINK_AUTONEG;
+ link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+ link->link_autoneg = RTE_ETH_LINK_AUTONEG;
if (fif->mac_type == fman_mac_1g)
- link->link_speed = ETH_SPEED_NUM_1G;
+ link->link_speed = RTE_ETH_SPEED_NUM_1G;
else if (fif->mac_type == fman_mac_2_5g)
- link->link_speed = ETH_SPEED_NUM_2_5G;
+ link->link_speed = RTE_ETH_SPEED_NUM_2_5G;
else if (fif->mac_type == fman_mac_10g)
- link->link_speed = ETH_SPEED_NUM_10G;
+ link->link_speed = RTE_ETH_SPEED_NUM_10G;
else
DPAA_PMD_ERR("invalid link_speed: %s, %d",
dpaa_intf->name, fif->mac_type);
if (max_rx_pktlen <= buffsz) {
;
} else if (dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_SCATTER) {
+ RTE_ETH_RX_OFFLOAD_SCATTER) {
if (max_rx_pktlen > buffsz * DPAA_SGT_MAX_ENTRIES) {
DPAA_PMD_ERR("Maximum Rx packet size %d too big to fit "
"MaxSGlist %d",
__fif = container_of(fif, struct __fman_if, __if);
if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
- dpaa_update_link_status(__fif->node_name, ETH_LINK_DOWN);
+ dpaa_update_link_status(__fif->node_name, RTE_ETH_LINK_DOWN);
else
return dpaa_eth_dev_stop(dev);
return 0;
__fif = container_of(fif, struct __fman_if, __if);
if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
- dpaa_update_link_status(__fif->node_name, ETH_LINK_UP);
+ dpaa_update_link_status(__fif->node_name, RTE_ETH_LINK_UP);
else
dpaa_eth_dev_start(dev);
return 0;
return -EINVAL;
}
- if (fc_conf->mode == RTE_FC_NONE) {
+ if (fc_conf->mode == RTE_ETH_FC_NONE) {
return 0;
- } else if (fc_conf->mode == RTE_FC_TX_PAUSE ||
- fc_conf->mode == RTE_FC_FULL) {
+ } else if (fc_conf->mode == RTE_ETH_FC_TX_PAUSE ||
+ fc_conf->mode == RTE_ETH_FC_FULL) {
fman_if_set_fc_threshold(dev->process_private,
fc_conf->high_water,
fc_conf->low_water,
}
ret = fman_if_get_fc_threshold(dev->process_private);
if (ret) {
- fc_conf->mode = RTE_FC_TX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
fc_conf->pause_time =
fman_if_get_fc_quanta(dev->process_private);
} else {
- fc_conf->mode = RTE_FC_NONE;
+ fc_conf->mode = RTE_ETH_FC_NONE;
}
return 0;
fc_conf = dpaa_intf->fc_conf;
ret = fman_if_get_fc_threshold(fman_intf);
if (ret) {
- fc_conf->mode = RTE_FC_TX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
fc_conf->pause_time = fman_if_get_fc_quanta(fman_intf);
} else {
- fc_conf->mode = RTE_FC_NONE;
+ fc_conf->mode = RTE_ETH_FC_NONE;
}
return 0;
#define DPAA_DEBUG_FQ_TX_ERROR 1
#define DPAA_RSS_OFFLOAD_ALL ( \
- ETH_RSS_L2_PAYLOAD | \
- ETH_RSS_IP | \
- ETH_RSS_UDP | \
- ETH_RSS_TCP | \
- ETH_RSS_SCTP)
+ RTE_ETH_RSS_L2_PAYLOAD | \
+ RTE_ETH_RSS_IP | \
+ RTE_ETH_RSS_UDP | \
+ RTE_ETH_RSS_TCP | \
+ RTE_ETH_RSS_SCTP)
#define DPAA_TX_CKSUM_OFFLOAD_MASK ( \
PKT_TX_IP_CKSUM | \
if (req_dist_set % 2 != 0) {
dist_field = 1U << loop;
switch (dist_field) {
- case ETH_RSS_L2_PAYLOAD:
+ case RTE_ETH_RSS_L2_PAYLOAD:
if (l2_configured)
break;
= HEADER_TYPE_ETH;
break;
- case ETH_RSS_IPV4:
- case ETH_RSS_FRAG_IPV4:
- case ETH_RSS_NONFRAG_IPV4_OTHER:
+ case RTE_ETH_RSS_IPV4:
+ case RTE_ETH_RSS_FRAG_IPV4:
+ case RTE_ETH_RSS_NONFRAG_IPV4_OTHER:
if (ipv4_configured)
break;
= HEADER_TYPE_IPV4;
break;
- case ETH_RSS_IPV6:
- case ETH_RSS_FRAG_IPV6:
- case ETH_RSS_NONFRAG_IPV6_OTHER:
- case ETH_RSS_IPV6_EX:
+ case RTE_ETH_RSS_IPV6:
+ case RTE_ETH_RSS_FRAG_IPV6:
+ case RTE_ETH_RSS_NONFRAG_IPV6_OTHER:
+ case RTE_ETH_RSS_IPV6_EX:
if (ipv6_configured)
break;
= HEADER_TYPE_IPV6;
break;
- case ETH_RSS_NONFRAG_IPV4_TCP:
- case ETH_RSS_NONFRAG_IPV6_TCP:
- case ETH_RSS_IPV6_TCP_EX:
+ case RTE_ETH_RSS_NONFRAG_IPV4_TCP:
+ case RTE_ETH_RSS_NONFRAG_IPV6_TCP:
+ case RTE_ETH_RSS_IPV6_TCP_EX:
if (tcp_configured)
break;
= HEADER_TYPE_TCP;
break;
- case ETH_RSS_NONFRAG_IPV4_UDP:
- case ETH_RSS_NONFRAG_IPV6_UDP:
- case ETH_RSS_IPV6_UDP_EX:
+ case RTE_ETH_RSS_NONFRAG_IPV4_UDP:
+ case RTE_ETH_RSS_NONFRAG_IPV6_UDP:
+ case RTE_ETH_RSS_IPV6_UDP_EX:
if (udp_configured)
break;
= HEADER_TYPE_UDP;
break;
- case ETH_RSS_NONFRAG_IPV4_SCTP:
- case ETH_RSS_NONFRAG_IPV6_SCTP:
+ case RTE_ETH_RSS_NONFRAG_IPV4_SCTP:
+ case RTE_ETH_RSS_NONFRAG_IPV6_SCTP:
if (sctp_configured)
break;
if (req_dist_set % 2 != 0) {
dist_field = 1ULL << loop;
switch (dist_field) {
- case ETH_RSS_L2_PAYLOAD:
- case ETH_RSS_ETH:
-
+ case RTE_ETH_RSS_L2_PAYLOAD:
+ case RTE_ETH_RSS_ETH:
if (l2_configured)
break;
l2_configured = 1;
i++;
break;
- case ETH_RSS_PPPOE:
+ case RTE_ETH_RSS_PPPOE:
if (pppoe_configured)
break;
kg_cfg->extracts[i].extract.from_hdr.prot =
i++;
break;
- case ETH_RSS_ESP:
+ case RTE_ETH_RSS_ESP:
if (esp_configured)
break;
esp_configured = 1;
i++;
break;
- case ETH_RSS_AH:
+ case RTE_ETH_RSS_AH:
if (ah_configured)
break;
ah_configured = 1;
i++;
break;
- case ETH_RSS_C_VLAN:
- case ETH_RSS_S_VLAN:
+ case RTE_ETH_RSS_C_VLAN:
+ case RTE_ETH_RSS_S_VLAN:
if (vlan_configured)
break;
vlan_configured = 1;
i++;
break;
- case ETH_RSS_MPLS:
+ case RTE_ETH_RSS_MPLS:
if (mpls_configured)
break;
i++;
break;
- case ETH_RSS_IPV4:
- case ETH_RSS_FRAG_IPV4:
- case ETH_RSS_NONFRAG_IPV4_OTHER:
- case ETH_RSS_IPV6:
- case ETH_RSS_FRAG_IPV6:
- case ETH_RSS_NONFRAG_IPV6_OTHER:
- case ETH_RSS_IPV6_EX:
+ case RTE_ETH_RSS_IPV4:
+ case RTE_ETH_RSS_FRAG_IPV4:
+ case RTE_ETH_RSS_NONFRAG_IPV4_OTHER:
+ case RTE_ETH_RSS_IPV6:
+ case RTE_ETH_RSS_FRAG_IPV6:
+ case RTE_ETH_RSS_NONFRAG_IPV6_OTHER:
+ case RTE_ETH_RSS_IPV6_EX:
if (l3_configured)
break;
i++;
break;
- case ETH_RSS_NONFRAG_IPV4_TCP:
- case ETH_RSS_NONFRAG_IPV6_TCP:
- case ETH_RSS_NONFRAG_IPV4_UDP:
- case ETH_RSS_NONFRAG_IPV6_UDP:
- case ETH_RSS_IPV6_TCP_EX:
- case ETH_RSS_IPV6_UDP_EX:
+ case RTE_ETH_RSS_NONFRAG_IPV4_TCP:
+ case RTE_ETH_RSS_NONFRAG_IPV6_TCP:
+ case RTE_ETH_RSS_NONFRAG_IPV4_UDP:
+ case RTE_ETH_RSS_NONFRAG_IPV6_UDP:
+ case RTE_ETH_RSS_IPV6_TCP_EX:
+ case RTE_ETH_RSS_IPV6_UDP_EX:
if (l4_configured)
break;
i++;
break;
- case ETH_RSS_NONFRAG_IPV4_SCTP:
- case ETH_RSS_NONFRAG_IPV6_SCTP:
+ case RTE_ETH_RSS_NONFRAG_IPV4_SCTP:
+ case RTE_ETH_RSS_NONFRAG_IPV6_SCTP:
if (sctp_configured)
break;
/* Supported Rx offloads */
static uint64_t dev_rx_offloads_sup =
- DEV_RX_OFFLOAD_CHECKSUM |
- DEV_RX_OFFLOAD_SCTP_CKSUM |
- DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
- DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_VLAN_FILTER |
- DEV_RX_OFFLOAD_TIMESTAMP;
+ RTE_ETH_RX_OFFLOAD_CHECKSUM |
+ RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+ RTE_ETH_RX_OFFLOAD_TIMESTAMP;
/* Rx offloads which cannot be disabled */
static uint64_t dev_rx_offloads_nodis =
- DEV_RX_OFFLOAD_RSS_HASH |
- DEV_RX_OFFLOAD_SCATTER;
+ RTE_ETH_RX_OFFLOAD_RSS_HASH |
+ RTE_ETH_RX_OFFLOAD_SCATTER;
/* Supported Tx offloads */
static uint64_t dev_tx_offloads_sup =
- DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_SCTP_CKSUM |
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_TX_OFFLOAD_MT_LOCKFREE |
- DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_MT_LOCKFREE |
+ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
/* Tx offloads which cannot be disabled */
static uint64_t dev_tx_offloads_nodis =
- DEV_TX_OFFLOAD_MULTI_SEGS;
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
/* enable timestamp in mbuf */
bool dpaa2_enable_ts[RTE_MAX_ETHPORTS];
PMD_INIT_FUNC_TRACE();
- if (mask & ETH_VLAN_FILTER_MASK) {
+ if (mask & RTE_ETH_VLAN_FILTER_MASK) {
/* VLAN Filter not avaialble */
if (!priv->max_vlan_filters) {
DPAA2_PMD_INFO("VLAN filter not available");
}
if (dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_VLAN_FILTER)
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
priv->token, true);
else
dev_rx_offloads_nodis;
dev_info->tx_offload_capa = dev_tx_offloads_sup |
dev_tx_offloads_nodis;
- dev_info->speed_capa = ETH_LINK_SPEED_1G |
- ETH_LINK_SPEED_2_5G |
- ETH_LINK_SPEED_10G;
+ dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G |
+ RTE_ETH_LINK_SPEED_2_5G |
+ RTE_ETH_LINK_SPEED_10G;
dev_info->max_hash_mac_addrs = 0;
dev_info->max_vfs = 0;
- dev_info->max_vmdq_pools = ETH_16_POOLS;
+ dev_info->max_vmdq_pools = RTE_ETH_16_POOLS;
dev_info->flow_type_rss_offloads = DPAA2_RSS_OFFLOAD_ALL;
dev_info->default_rxportconf.burst_size = dpaa2_dqrr_size;
dev_info->default_rxportconf.ring_size = DPAA2_RX_DEFAULT_NBDESC;
if (dpaa2_svr_family == SVR_LX2160A) {
- dev_info->speed_capa |= ETH_LINK_SPEED_25G |
- ETH_LINK_SPEED_40G |
- ETH_LINK_SPEED_50G |
- ETH_LINK_SPEED_100G;
+ dev_info->speed_capa |= RTE_ETH_LINK_SPEED_25G |
+ RTE_ETH_LINK_SPEED_40G |
+ RTE_ETH_LINK_SPEED_50G |
+ RTE_ETH_LINK_SPEED_100G;
}
return 0;
uint64_t flags;
const char *output;
} rx_offload_map[] = {
- {DEV_RX_OFFLOAD_CHECKSUM, " Checksum,"},
- {DEV_RX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
- {DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
- {DEV_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP csum,"},
- {DEV_RX_OFFLOAD_VLAN_STRIP, " VLAN strip,"},
- {DEV_RX_OFFLOAD_VLAN_FILTER, " VLAN filter,"},
- {DEV_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
- {DEV_RX_OFFLOAD_RSS_HASH, " RSS,"},
- {DEV_RX_OFFLOAD_SCATTER, " Scattered,"}
+ {RTE_ETH_RX_OFFLOAD_CHECKSUM, " Checksum,"},
+ {RTE_ETH_RX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
+ {RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
+ {RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP csum,"},
+ {RTE_ETH_RX_OFFLOAD_VLAN_STRIP, " VLAN strip,"},
+ {RTE_ETH_RX_OFFLOAD_VLAN_FILTER, " VLAN filter,"},
+ {RTE_ETH_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
+ {RTE_ETH_RX_OFFLOAD_RSS_HASH, " RSS,"},
+ {RTE_ETH_RX_OFFLOAD_SCATTER, " Scattered,"}
};
/* Update Rx offload info */
uint64_t flags;
const char *output;
} tx_offload_map[] = {
- {DEV_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"},
- {DEV_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
- {DEV_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
- {DEV_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
- {DEV_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
- {DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
- {DEV_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"},
- {DEV_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"},
- {DEV_TX_OFFLOAD_MULTI_SEGS, " Scattered,"}
+ {RTE_ETH_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"},
+ {RTE_ETH_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
+ {RTE_ETH_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
+ {RTE_ETH_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
+ {RTE_ETH_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
+ {RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
+ {RTE_ETH_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"},
+ {RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"},
+ {RTE_ETH_TX_OFFLOAD_MULTI_SEGS, " Scattered,"}
};
/* Update Tx offload info */
return -1;
}
- if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) {
+ if (eth_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
for (tc_index = 0; tc_index < priv->num_rx_tc; tc_index++) {
ret = dpaa2_setup_flow_dist(dev,
eth_conf->rx_adv_conf.rss_conf.rss_hf,
}
}
- if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM)
rx_l3_csum_offload = true;
- if ((rx_offloads & DEV_RX_OFFLOAD_UDP_CKSUM) ||
- (rx_offloads & DEV_RX_OFFLOAD_TCP_CKSUM) ||
- (rx_offloads & DEV_RX_OFFLOAD_SCTP_CKSUM))
+ if ((rx_offloads & RTE_ETH_RX_OFFLOAD_UDP_CKSUM) ||
+ (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_CKSUM) ||
+ (rx_offloads & RTE_ETH_RX_OFFLOAD_SCTP_CKSUM))
rx_l4_csum_offload = true;
ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
}
#if !defined(RTE_LIBRTE_IEEE1588)
- if (rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
#endif
{
ret = rte_mbuf_dyn_rx_timestamp_register(
dpaa2_enable_ts[dev->data->port_id] = true;
}
- if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
+ if (tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
tx_l3_csum_offload = true;
- if ((tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) ||
- (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) ||
- (tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM))
+ if ((tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) ||
+ (tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) ||
+ (tx_offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM))
tx_l4_csum_offload = true;
ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
}
}
- if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
- dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
+ dpaa2_vlan_offload_set(dev, RTE_ETH_VLAN_FILTER_MASK);
dpaa2_tm_init(dev);
DPAA2_PMD_DEBUG("error: dpni_get_link_state %d", ret);
return -1;
}
- if (state.up == ETH_LINK_DOWN &&
+ if (state.up == RTE_ETH_LINK_DOWN &&
wait_to_complete)
rte_delay_ms(CHECK_INTERVAL);
else
link.link_speed = state.rate;
if (state.options & DPNI_LINK_OPT_HALF_DUPLEX)
- link.link_duplex = ETH_LINK_HALF_DUPLEX;
+ link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
else
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
ret = rte_eth_linkstatus_set(dev, &link);
if (ret == -1)
* No TX side flow control (send Pause frame disabled)
*/
if (!(state.options & DPNI_LINK_OPT_ASYM_PAUSE))
- fc_conf->mode = RTE_FC_FULL;
+ fc_conf->mode = RTE_ETH_FC_FULL;
else
- fc_conf->mode = RTE_FC_RX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
} else {
/* DPNI_LINK_OPT_PAUSE not set
* if ASYM_PAUSE set,
* Flow control disabled
*/
if (state.options & DPNI_LINK_OPT_ASYM_PAUSE)
- fc_conf->mode = RTE_FC_TX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
else
- fc_conf->mode = RTE_FC_NONE;
+ fc_conf->mode = RTE_ETH_FC_NONE;
}
return ret;
/* update cfg with fc_conf */
switch (fc_conf->mode) {
- case RTE_FC_FULL:
+ case RTE_ETH_FC_FULL:
/* Full flow control;
* OPT_PAUSE set, ASYM_PAUSE not set
*/
cfg.options |= DPNI_LINK_OPT_PAUSE;
cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
break;
- case RTE_FC_TX_PAUSE:
+ case RTE_ETH_FC_TX_PAUSE:
/* Enable RX flow control
* OPT_PAUSE not set;
* ASYM_PAUSE set;
cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
cfg.options &= ~DPNI_LINK_OPT_PAUSE;
break;
- case RTE_FC_RX_PAUSE:
+ case RTE_ETH_FC_RX_PAUSE:
/* Enable TX Flow control
* OPT_PAUSE set
* ASYM_PAUSE set
cfg.options |= DPNI_LINK_OPT_PAUSE;
cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
break;
- case RTE_FC_NONE:
+ case RTE_ETH_FC_NONE:
/* Disable Flow control
* OPT_PAUSE not set
* ASYM_PAUSE not set
#define DPAA2_TX_CONF_ENABLE 0x08
#define DPAA2_RSS_OFFLOAD_ALL ( \
- ETH_RSS_L2_PAYLOAD | \
- ETH_RSS_IP | \
- ETH_RSS_UDP | \
- ETH_RSS_TCP | \
- ETH_RSS_SCTP | \
- ETH_RSS_MPLS | \
- ETH_RSS_C_VLAN | \
- ETH_RSS_S_VLAN | \
- ETH_RSS_ESP | \
- ETH_RSS_AH | \
- ETH_RSS_PPPOE)
+ RTE_ETH_RSS_L2_PAYLOAD | \
+ RTE_ETH_RSS_IP | \
+ RTE_ETH_RSS_UDP | \
+ RTE_ETH_RSS_TCP | \
+ RTE_ETH_RSS_SCTP | \
+ RTE_ETH_RSS_MPLS | \
+ RTE_ETH_RSS_C_VLAN | \
+ RTE_ETH_RSS_S_VLAN | \
+ RTE_ETH_RSS_ESP | \
+ RTE_ETH_RSS_AH | \
+ RTE_ETH_RSS_PPPOE)
/* LX2 FRC Parsed values (Little Endian) */
#define DPAA2_PKT_TYPE_ETHER 0x0060
#endif
if (eth_data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_VLAN_STRIP)
+ RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
rte_vlan_strip(bufs[num_rx]);
dq_storage++;
eth_data->port_id);
if (eth_data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_VLAN_STRIP) {
+ RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
rte_vlan_strip(bufs[num_rx]);
}
if (unlikely(((*bufs)->ol_flags
& PKT_TX_VLAN_PKT) ||
(eth_data->dev_conf.txmode.offloads
- & DEV_TX_OFFLOAD_VLAN_INSERT))) {
+ & RTE_ETH_TX_OFFLOAD_VLAN_INSERT))) {
ret = rte_vlan_insert(bufs);
if (ret)
goto send_n_return;
if (unlikely(((*bufs)->ol_flags & PKT_TX_VLAN_PKT) ||
(eth_data->dev_conf.txmode.offloads
- & DEV_TX_OFFLOAD_VLAN_INSERT))) {
+ & RTE_ETH_TX_OFFLOAD_VLAN_INSERT))) {
int ret = rte_vlan_insert(bufs);
if (ret)
goto send_n_return;
#define E1000_FTQF_QUEUE_ENABLE 0x00000100
#define IGB_RSS_OFFLOAD_ALL ( \
- ETH_RSS_IPV4 | \
- ETH_RSS_NONFRAG_IPV4_TCP | \
- ETH_RSS_NONFRAG_IPV4_UDP | \
- ETH_RSS_IPV6 | \
- ETH_RSS_NONFRAG_IPV6_TCP | \
- ETH_RSS_NONFRAG_IPV6_UDP | \
- ETH_RSS_IPV6_EX | \
- ETH_RSS_IPV6_TCP_EX | \
- ETH_RSS_IPV6_UDP_EX)
+ RTE_ETH_RSS_IPV4 | \
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+ RTE_ETH_RSS_IPV6 | \
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+ RTE_ETH_RSS_IPV6_EX | \
+ RTE_ETH_RSS_IPV6_TCP_EX | \
+ RTE_ETH_RSS_IPV6_UDP_EX)
/*
* The overhead from MTU to max frame size.
e1000_clear_hw_cntrs_base_generic(hw);
- mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
- ETH_VLAN_EXTEND_MASK;
+ mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+ RTE_ETH_VLAN_EXTEND_MASK;
ret = eth_em_vlan_offload_set(dev, mask);
if (ret) {
PMD_INIT_LOG(ERR, "Unable to update vlan offload");
/* Setup link speed and duplex */
speeds = &dev->data->dev_conf.link_speeds;
- if (*speeds == ETH_LINK_SPEED_AUTONEG) {
+ if (*speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
hw->mac.autoneg = 1;
} else {
num_speeds = 0;
- autoneg = (*speeds & ETH_LINK_SPEED_FIXED) == 0;
+ autoneg = (*speeds & RTE_ETH_LINK_SPEED_FIXED) == 0;
/* Reset */
hw->phy.autoneg_advertised = 0;
- if (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
- ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
- ETH_LINK_SPEED_1G | ETH_LINK_SPEED_FIXED)) {
+ if (*speeds & ~(RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M |
+ RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M |
+ RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_FIXED)) {
num_speeds = -1;
goto error_invalid_config;
}
- if (*speeds & ETH_LINK_SPEED_10M_HD) {
+ if (*speeds & RTE_ETH_LINK_SPEED_10M_HD) {
hw->phy.autoneg_advertised |= ADVERTISE_10_HALF;
num_speeds++;
}
- if (*speeds & ETH_LINK_SPEED_10M) {
+ if (*speeds & RTE_ETH_LINK_SPEED_10M) {
hw->phy.autoneg_advertised |= ADVERTISE_10_FULL;
num_speeds++;
}
- if (*speeds & ETH_LINK_SPEED_100M_HD) {
+ if (*speeds & RTE_ETH_LINK_SPEED_100M_HD) {
hw->phy.autoneg_advertised |= ADVERTISE_100_HALF;
num_speeds++;
}
- if (*speeds & ETH_LINK_SPEED_100M) {
+ if (*speeds & RTE_ETH_LINK_SPEED_100M) {
hw->phy.autoneg_advertised |= ADVERTISE_100_FULL;
num_speeds++;
}
- if (*speeds & ETH_LINK_SPEED_1G) {
+ if (*speeds & RTE_ETH_LINK_SPEED_1G) {
hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL;
num_speeds++;
}
.nb_mtu_seg_max = EM_TX_MAX_MTU_SEG,
};
- dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
- ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
- ETH_LINK_SPEED_1G;
+ dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M |
+ RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M |
+ RTE_ETH_LINK_SPEED_1G;
/* Preferred queue parameters */
dev_info->default_rxportconf.nb_queues = 1;
uint16_t duplex, speed;
hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
link.link_duplex = (duplex == FULL_DUPLEX) ?
- ETH_LINK_FULL_DUPLEX :
- ETH_LINK_HALF_DUPLEX;
+ RTE_ETH_LINK_FULL_DUPLEX :
+ RTE_ETH_LINK_HALF_DUPLEX;
link.link_speed = speed;
- link.link_status = ETH_LINK_UP;
+ link.link_status = RTE_ETH_LINK_UP;
link.link_autoneg = !(dev->data->dev_conf.link_speeds &
- ETH_LINK_SPEED_FIXED);
+ RTE_ETH_LINK_SPEED_FIXED);
} else {
- link.link_speed = ETH_SPEED_NUM_NONE;
- link.link_duplex = ETH_LINK_HALF_DUPLEX;
- link.link_status = ETH_LINK_DOWN;
- link.link_autoneg = ETH_LINK_FIXED;
+ link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+ link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+ link.link_status = RTE_ETH_LINK_DOWN;
+ link.link_autoneg = RTE_ETH_LINK_FIXED;
}
return rte_eth_linkstatus_set(dev, &link);
struct rte_eth_rxmode *rxmode;
rxmode = &dev->data->dev_conf.rxmode;
- if(mask & ETH_VLAN_STRIP_MASK){
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
em_vlan_hw_strip_enable(dev);
else
em_vlan_hw_strip_disable(dev);
}
- if(mask & ETH_VLAN_FILTER_MASK){
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+ if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
em_vlan_hw_filter_enable(dev);
else
em_vlan_hw_filter_disable(dev);
if (link.link_status) {
PMD_INIT_LOG(INFO, " Port %d: Link Up - speed %u Mbps - %s",
dev->data->port_id, link.link_speed,
- link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+ link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
"full-duplex" : "half-duplex");
} else {
PMD_INIT_LOG(INFO, " Port %d: Link Down", dev->data->port_id);
rx_pause = 0;
if (rx_pause && tx_pause)
- fc_conf->mode = RTE_FC_FULL;
+ fc_conf->mode = RTE_ETH_FC_FULL;
else if (rx_pause)
- fc_conf->mode = RTE_FC_RX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
else if (tx_pause)
- fc_conf->mode = RTE_FC_TX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
else
- fc_conf->mode = RTE_FC_NONE;
+ fc_conf->mode = RTE_ETH_FC_NONE;
return 0;
}
struct em_rx_entry *sw_ring; /**< address of RX software ring. */
struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
- uint64_t offloads; /**< Offloads of DEV_RX_OFFLOAD_* */
+ uint64_t offloads; /**< Offloads of RTE_ETH_RX_OFFLOAD_* */
uint16_t nb_rx_desc; /**< number of RX descriptors. */
uint16_t rx_tail; /**< current value of RDT register. */
uint16_t nb_rx_hold; /**< number of held free RX desc. */
uint8_t wthresh; /**< Write-back threshold register. */
struct em_ctx_info ctx_cache;
/**< Hardware context history.*/
- uint64_t offloads; /**< offloads of DEV_TX_OFFLOAD_* */
+ uint64_t offloads; /**< offloads of RTE_ETH_TX_OFFLOAD_* */
const struct rte_memzone *mz;
};
RTE_SET_USED(dev);
tx_offload_capa =
- DEV_TX_OFFLOAD_MULTI_SEGS |
- DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM;
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
return tx_offload_capa;
}
uint64_t rx_offload_capa;
rx_offload_capa =
- DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_VLAN_FILTER |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_KEEP_CRC |
- DEV_RX_OFFLOAD_SCATTER;
+ RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+ RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_KEEP_CRC |
+ RTE_ETH_RX_OFFLOAD_SCATTER;
return rx_offload_capa;
}
rxq->rx_free_thresh = rx_conf->rx_free_thresh;
rxq->queue_id = queue_idx;
rxq->port_id = dev->data->port_id;
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
rxq->crc_len = RTE_ETHER_CRC_LEN;
else
rxq->crc_len = 0;
* Reset crc_len in case it was changed after queue setup by a
* call to configure
*/
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
rxq->crc_len = RTE_ETHER_CRC_LEN;
else
rxq->crc_len = 0;
}
}
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
+ if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
if (!dev->data->scattered_rx)
PMD_INIT_LOG(DEBUG, "forcing scatter mode");
dev->rx_pkt_burst = eth_em_recv_scattered_pkts;
*/
rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
- if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
rxcsum |= E1000_RXCSUM_IPOFL;
else
rxcsum &= ~E1000_RXCSUM_IPOFL;
}
/* Setup the Receive Control Register. */
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
else
rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
uint16_t nb_rx_q = dev->data->nb_rx_queues;
uint16_t nb_tx_q = dev->data->nb_tx_queues;
- if ((rx_mq_mode & ETH_MQ_RX_DCB_FLAG) ||
- tx_mq_mode == ETH_MQ_TX_DCB ||
- tx_mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+ if ((rx_mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) ||
+ tx_mq_mode == RTE_ETH_MQ_TX_DCB ||
+ tx_mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) {
PMD_INIT_LOG(ERR, "DCB mode is not supported.");
return -EINVAL;
}
if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
/* Check multi-queue mode.
- * To no break software we accept ETH_MQ_RX_NONE as this might
+ * To no break software we accept RTE_ETH_MQ_RX_NONE as this might
* be used to turn off VLAN filter.
*/
- if (rx_mq_mode == ETH_MQ_RX_NONE ||
- rx_mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
- dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
+ if (rx_mq_mode == RTE_ETH_MQ_RX_NONE ||
+ rx_mq_mode == RTE_ETH_MQ_RX_VMDQ_ONLY) {
+ dev->data->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_ONLY;
RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
} else {
/* Only support one queue on VFs.
return -EINVAL;
}
/* TX mode is not used here, so mode might be ignored.*/
- if (tx_mq_mode != ETH_MQ_TX_VMDQ_ONLY) {
+ if (tx_mq_mode != RTE_ETH_MQ_TX_VMDQ_ONLY) {
/* SRIOV only works in VMDq enable mode */
PMD_INIT_LOG(WARNING, "SRIOV is active,"
" TX mode %d is not supported. "
" Driver will behave as %d mode.",
- tx_mq_mode, ETH_MQ_TX_VMDQ_ONLY);
+ tx_mq_mode, RTE_ETH_MQ_TX_VMDQ_ONLY);
}
/* check valid queue number */
/* To no break software that set invalid mode, only display
* warning if invalid mode is used.
*/
- if (rx_mq_mode != ETH_MQ_RX_NONE &&
- rx_mq_mode != ETH_MQ_RX_VMDQ_ONLY &&
- rx_mq_mode != ETH_MQ_RX_RSS) {
+ if (rx_mq_mode != RTE_ETH_MQ_RX_NONE &&
+ rx_mq_mode != RTE_ETH_MQ_RX_VMDQ_ONLY &&
+ rx_mq_mode != RTE_ETH_MQ_RX_RSS) {
/* RSS together with VMDq not supported*/
PMD_INIT_LOG(ERR, "RX mode %d is not supported.",
rx_mq_mode);
return -EINVAL;
}
- if (tx_mq_mode != ETH_MQ_TX_NONE &&
- tx_mq_mode != ETH_MQ_TX_VMDQ_ONLY) {
+ if (tx_mq_mode != RTE_ETH_MQ_TX_NONE &&
+ tx_mq_mode != RTE_ETH_MQ_TX_VMDQ_ONLY) {
PMD_INIT_LOG(WARNING, "TX mode %d is not supported."
" Due to txmode is meaningless in this"
" driver, just ignore.",
PMD_INIT_FUNC_TRACE();
- if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
- dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+ if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+ dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
/* multipe queue mode checking */
ret = igb_check_mq_mode(dev);
/*
* VLAN Offload Settings
*/
- mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
- ETH_VLAN_EXTEND_MASK;
+ mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+ RTE_ETH_VLAN_EXTEND_MASK;
ret = eth_igb_vlan_offload_set(dev, mask);
if (ret) {
PMD_INIT_LOG(ERR, "Unable to set vlan offload");
return ret;
}
- if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
+ if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_ONLY) {
/* Enable VLAN filter since VMDq always use VLAN filter */
igb_vmdq_vlan_hw_filter_enable(dev);
}
/* Setup link speed and duplex */
speeds = &dev->data->dev_conf.link_speeds;
- if (*speeds == ETH_LINK_SPEED_AUTONEG) {
+ if (*speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
hw->mac.autoneg = 1;
} else {
num_speeds = 0;
- autoneg = (*speeds & ETH_LINK_SPEED_FIXED) == 0;
+ autoneg = (*speeds & RTE_ETH_LINK_SPEED_FIXED) == 0;
/* Reset */
hw->phy.autoneg_advertised = 0;
- if (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
- ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
- ETH_LINK_SPEED_1G | ETH_LINK_SPEED_FIXED)) {
+ if (*speeds & ~(RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M |
+ RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M |
+ RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_FIXED)) {
num_speeds = -1;
goto error_invalid_config;
}
- if (*speeds & ETH_LINK_SPEED_10M_HD) {
+ if (*speeds & RTE_ETH_LINK_SPEED_10M_HD) {
hw->phy.autoneg_advertised |= ADVERTISE_10_HALF;
num_speeds++;
}
- if (*speeds & ETH_LINK_SPEED_10M) {
+ if (*speeds & RTE_ETH_LINK_SPEED_10M) {
hw->phy.autoneg_advertised |= ADVERTISE_10_FULL;
num_speeds++;
}
- if (*speeds & ETH_LINK_SPEED_100M_HD) {
+ if (*speeds & RTE_ETH_LINK_SPEED_100M_HD) {
hw->phy.autoneg_advertised |= ADVERTISE_100_HALF;
num_speeds++;
}
- if (*speeds & ETH_LINK_SPEED_100M) {
+ if (*speeds & RTE_ETH_LINK_SPEED_100M) {
hw->phy.autoneg_advertised |= ADVERTISE_100_FULL;
num_speeds++;
}
- if (*speeds & ETH_LINK_SPEED_1G) {
+ if (*speeds & RTE_ETH_LINK_SPEED_1G) {
hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL;
num_speeds++;
}
case e1000_82576:
dev_info->max_rx_queues = 16;
dev_info->max_tx_queues = 16;
- dev_info->max_vmdq_pools = ETH_8_POOLS;
+ dev_info->max_vmdq_pools = RTE_ETH_8_POOLS;
dev_info->vmdq_queue_num = 16;
break;
case e1000_82580:
dev_info->max_rx_queues = 8;
dev_info->max_tx_queues = 8;
- dev_info->max_vmdq_pools = ETH_8_POOLS;
+ dev_info->max_vmdq_pools = RTE_ETH_8_POOLS;
dev_info->vmdq_queue_num = 8;
break;
case e1000_i350:
dev_info->max_rx_queues = 8;
dev_info->max_tx_queues = 8;
- dev_info->max_vmdq_pools = ETH_8_POOLS;
+ dev_info->max_vmdq_pools = RTE_ETH_8_POOLS;
dev_info->vmdq_queue_num = 8;
break;
return -EINVAL;
}
dev_info->hash_key_size = IGB_HKEY_MAX_INDEX * sizeof(uint32_t);
- dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
+ dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
dev_info->flow_type_rss_offloads = IGB_RSS_OFFLOAD_ALL;
dev_info->default_rxconf = (struct rte_eth_rxconf) {
dev_info->rx_desc_lim = rx_desc_lim;
dev_info->tx_desc_lim = tx_desc_lim;
- dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
- ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
- ETH_LINK_SPEED_1G;
+ dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M |
+ RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M |
+ RTE_ETH_LINK_SPEED_1G;
dev_info->max_mtu = dev_info->max_rx_pktlen - E1000_ETH_OVERHEAD;
dev_info->min_mtu = RTE_ETHER_MIN_MTU;
dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */
dev_info->max_mac_addrs = hw->mac.rar_entry_count;
- dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_SCTP_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO;
+ dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_TSO;
switch (hw->mac.type) {
case e1000_vfadapt:
dev_info->max_rx_queues = 2;
uint16_t duplex, speed;
hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
link.link_duplex = (duplex == FULL_DUPLEX) ?
- ETH_LINK_FULL_DUPLEX :
- ETH_LINK_HALF_DUPLEX;
+ RTE_ETH_LINK_FULL_DUPLEX :
+ RTE_ETH_LINK_HALF_DUPLEX;
link.link_speed = speed;
- link.link_status = ETH_LINK_UP;
+ link.link_status = RTE_ETH_LINK_UP;
link.link_autoneg = !(dev->data->dev_conf.link_speeds &
- ETH_LINK_SPEED_FIXED);
+ RTE_ETH_LINK_SPEED_FIXED);
} else if (!link_check) {
link.link_speed = 0;
- link.link_duplex = ETH_LINK_HALF_DUPLEX;
- link.link_status = ETH_LINK_DOWN;
- link.link_autoneg = ETH_LINK_FIXED;
+ link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+ link.link_status = RTE_ETH_LINK_DOWN;
+ link.link_autoneg = RTE_ETH_LINK_FIXED;
}
return rte_eth_linkstatus_set(dev, &link);
qinq &= E1000_CTRL_EXT_EXT_VLAN;
/* only outer TPID of double VLAN can be configured*/
- if (qinq && vlan_type == ETH_VLAN_TYPE_OUTER) {
+ if (qinq && vlan_type == RTE_ETH_VLAN_TYPE_OUTER) {
reg = E1000_READ_REG(hw, E1000_VET);
reg = (reg & (~E1000_VET_VET_EXT)) |
((uint32_t)tpid << E1000_VET_VET_EXT_SHIFT);
struct rte_eth_rxmode *rxmode;
rxmode = &dev->data->dev_conf.rxmode;
- if(mask & ETH_VLAN_STRIP_MASK){
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
igb_vlan_hw_strip_enable(dev);
else
igb_vlan_hw_strip_disable(dev);
}
- if(mask & ETH_VLAN_FILTER_MASK){
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+ if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
igb_vlan_hw_filter_enable(dev);
else
igb_vlan_hw_filter_disable(dev);
}
- if(mask & ETH_VLAN_EXTEND_MASK){
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+ if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
igb_vlan_hw_extend_enable(dev);
else
igb_vlan_hw_extend_disable(dev);
" Port %d: Link Up - speed %u Mbps - %s",
dev->data->port_id,
(unsigned)link.link_speed,
- link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+ link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
"full-duplex" : "half-duplex");
} else {
PMD_INIT_LOG(INFO, " Port %d: Link Down",
rx_pause = 0;
if (rx_pause && tx_pause)
- fc_conf->mode = RTE_FC_FULL;
+ fc_conf->mode = RTE_ETH_FC_FULL;
else if (rx_pause)
- fc_conf->mode = RTE_FC_RX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
else if (tx_pause)
- fc_conf->mode = RTE_FC_TX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
else
- fc_conf->mode = RTE_FC_NONE;
+ fc_conf->mode = RTE_ETH_FC_NONE;
return 0;
}
* on configuration
*/
switch (fc_conf->mode) {
- case RTE_FC_NONE:
+ case RTE_ETH_FC_NONE:
ctrl &= ~E1000_CTRL_RFCE & ~E1000_CTRL_TFCE;
break;
- case RTE_FC_RX_PAUSE:
+ case RTE_ETH_FC_RX_PAUSE:
ctrl |= E1000_CTRL_RFCE;
ctrl &= ~E1000_CTRL_TFCE;
break;
- case RTE_FC_TX_PAUSE:
+ case RTE_ETH_FC_TX_PAUSE:
ctrl |= E1000_CTRL_TFCE;
ctrl &= ~E1000_CTRL_RFCE;
break;
- case RTE_FC_FULL:
+ case RTE_ETH_FC_FULL:
ctrl |= E1000_CTRL_RFCE | E1000_CTRL_TFCE;
break;
default:
PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
dev->data->port_id);
- if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
- dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+ if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+ dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
/*
* VF has no ability to enable/disable HW CRC
* Keep the persistent behavior the same as Host PF
*/
#ifndef RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC
- if (conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+ if (conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
- conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_KEEP_CRC;
+ conf->rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_KEEP_CRC;
}
#else
- if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)) {
+ if (!(conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)) {
PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
- conf->rxmode.offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
+ conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
}
#endif
uint16_t idx, shift;
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (reta_size != ETH_RSS_RETA_SIZE_128) {
+ if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
"(%d) doesn't match the number hardware can supported "
- "(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
+ "(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
return -EINVAL;
}
for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
mask = (uint8_t)((reta_conf[idx].mask >> shift) &
IGB_4_BIT_MASK);
if (!mask)
uint16_t idx, shift;
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (reta_size != ETH_RSS_RETA_SIZE_128) {
+ if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
"(%d) doesn't match the number hardware can supported "
- "(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
+ "(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
return -EINVAL;
}
for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
mask = (uint8_t)((reta_conf[idx].mask >> shift) &
IGB_4_BIT_MASK);
if (!mask)
if (*vfinfo == NULL)
rte_panic("Cannot allocate memory for private VF data\n");
- RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_8_POOLS;
+ RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_8_POOLS;
RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = nb_queue;
RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = vf_num;
RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = (uint16_t)(vf_num * nb_queue);
uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
uint32_t flags; /**< RX flags. */
- uint64_t offloads; /**< offloads of DEV_RX_OFFLOAD_* */
+ uint64_t offloads; /**< offloads of RTE_ETH_RX_OFFLOAD_* */
const struct rte_memzone *mz;
};
/**< Start context position for transmit queue. */
struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
/**< Hardware context history.*/
- uint64_t offloads; /**< offloads of DEV_TX_OFFLOAD_* */
+ uint64_t offloads; /**< offloads of RTE_ETH_TX_OFFLOAD_* */
const struct rte_memzone *mz;
};
uint64_t tx_offload_capa;
RTE_SET_USED(dev);
- tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_SCTP_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO |
- DEV_TX_OFFLOAD_MULTI_SEGS;
+ tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_TSO |
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
return tx_offload_capa;
}
hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_VLAN_FILTER |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_KEEP_CRC |
- DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_RSS_HASH;
+ rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+ RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_KEEP_CRC |
+ RTE_ETH_RX_OFFLOAD_SCATTER |
+ RTE_ETH_RX_OFFLOAD_RSS_HASH;
if (hw->mac.type == e1000_i350 ||
hw->mac.type == e1000_i210 ||
hw->mac.type == e1000_i211)
- rx_offload_capa |= DEV_RX_OFFLOAD_VLAN_EXTEND;
+ rx_offload_capa |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
return rx_offload_capa;
}
rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
rxq->port_id = dev->data->port_id;
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
rxq->crc_len = RTE_ETHER_CRC_LEN;
else
rxq->crc_len = 0;
/* Set configured hashing protocols in MRQC register */
rss_hf = rss_conf->rss_hf;
mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
- if (rss_hf & ETH_RSS_IPV4)
+ if (rss_hf & RTE_ETH_RSS_IPV4)
mrqc |= E1000_MRQC_RSS_FIELD_IPV4;
- if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
mrqc |= E1000_MRQC_RSS_FIELD_IPV4_TCP;
- if (rss_hf & ETH_RSS_IPV6)
+ if (rss_hf & RTE_ETH_RSS_IPV6)
mrqc |= E1000_MRQC_RSS_FIELD_IPV6;
- if (rss_hf & ETH_RSS_IPV6_EX)
+ if (rss_hf & RTE_ETH_RSS_IPV6_EX)
mrqc |= E1000_MRQC_RSS_FIELD_IPV6_EX;
- if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP;
- if (rss_hf & ETH_RSS_IPV6_TCP_EX)
+ if (rss_hf & RTE_ETH_RSS_IPV6_TCP_EX)
mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
- if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
- if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
- if (rss_hf & ETH_RSS_IPV6_UDP_EX)
+ if (rss_hf & RTE_ETH_RSS_IPV6_UDP_EX)
mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP_EX;
E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
}
}
rss_hf = 0;
if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
- rss_hf |= ETH_RSS_IPV4;
+ rss_hf |= RTE_ETH_RSS_IPV4;
if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
- rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
- rss_hf |= ETH_RSS_IPV6;
+ rss_hf |= RTE_ETH_RSS_IPV6;
if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_EX)
- rss_hf |= ETH_RSS_IPV6_EX;
+ rss_hf |= RTE_ETH_RSS_IPV6_EX;
if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
- rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP_EX)
- rss_hf |= ETH_RSS_IPV6_TCP_EX;
+ rss_hf |= RTE_ETH_RSS_IPV6_TCP_EX;
if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_UDP)
- rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP)
- rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP_EX)
- rss_hf |= ETH_RSS_IPV6_UDP_EX;
+ rss_hf |= RTE_ETH_RSS_IPV6_UDP_EX;
rss_conf->rss_hf = rss_hf;
return 0;
}
E1000_VMOLR_ROPE | E1000_VMOLR_BAM |
E1000_VMOLR_MPME);
- if (cfg->rx_mode & ETH_VMDQ_ACCEPT_UNTAG)
+ if (cfg->rx_mode & RTE_ETH_VMDQ_ACCEPT_UNTAG)
vmolr |= E1000_VMOLR_AUPE;
- if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_MC)
+ if (cfg->rx_mode & RTE_ETH_VMDQ_ACCEPT_HASH_MC)
vmolr |= E1000_VMOLR_ROMPE;
- if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_UC)
+ if (cfg->rx_mode & RTE_ETH_VMDQ_ACCEPT_HASH_UC)
vmolr |= E1000_VMOLR_ROPE;
- if (cfg->rx_mode & ETH_VMDQ_ACCEPT_BROADCAST)
+ if (cfg->rx_mode & RTE_ETH_VMDQ_ACCEPT_BROADCAST)
vmolr |= E1000_VMOLR_BAM;
- if (cfg->rx_mode & ETH_VMDQ_ACCEPT_MULTICAST)
+ if (cfg->rx_mode & RTE_ETH_VMDQ_ACCEPT_MULTICAST)
vmolr |= E1000_VMOLR_MPME;
E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
/* VLVF: set up filters for vlan tags as configured */
for (i = 0; i < cfg->nb_pool_maps; i++) {
/* set vlan id in VF register and set the valid bit */
- E1000_WRITE_REG(hw, E1000_VLVF(i), (E1000_VLVF_VLANID_ENABLE | \
- (cfg->pool_map[i].vlan_id & ETH_VLAN_ID_MAX) | \
- ((cfg->pool_map[i].pools << E1000_VLVF_POOLSEL_SHIFT ) & \
+ E1000_WRITE_REG(hw, E1000_VLVF(i), (E1000_VLVF_VLANID_ENABLE |
+ (cfg->pool_map[i].vlan_id & RTE_ETH_VLAN_ID_MAX) |
+ ((cfg->pool_map[i].pools << E1000_VLVF_POOLSEL_SHIFT) &
E1000_VLVF_POOLSEL_MASK)));
}
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t mrqc;
- if (RTE_ETH_DEV_SRIOV(dev).active == ETH_8_POOLS) {
+ if (RTE_ETH_DEV_SRIOV(dev).active == RTE_ETH_8_POOLS) {
/*
* SRIOV active scheme
* FIXME if support RSS together with VMDq & SRIOV
* SRIOV inactive scheme
*/
switch (dev->data->dev_conf.rxmode.mq_mode) {
- case ETH_MQ_RX_RSS:
+ case RTE_ETH_MQ_RX_RSS:
igb_rss_configure(dev);
break;
- case ETH_MQ_RX_VMDQ_ONLY:
+ case RTE_ETH_MQ_RX_VMDQ_ONLY:
/*Configure general VMDQ only RX parameters*/
igb_vmdq_rx_hw_configure(dev);
break;
- case ETH_MQ_RX_NONE:
+ case RTE_ETH_MQ_RX_NONE:
/* if mq_mode is none, disable rss mode.*/
default:
igb_rss_disable(dev);
* Set maximum packet length by default, and might be updated
* together with enabling/disabling dual VLAN.
*/
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
max_len += VLAN_TAG_SIZE;
E1000_WRITE_REG(hw, E1000_RLPML, max_len);
* Reset crc_len in case it was changed after queue setup by a
* call to configure
*/
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
rxq->crc_len = RTE_ETHER_CRC_LEN;
else
rxq->crc_len = 0;
E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl);
}
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
+ if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
if (!dev->data->scattered_rx)
PMD_INIT_LOG(DEBUG, "forcing scatter mode");
dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
rxcsum |= E1000_RXCSUM_PCSD;
/* Enable both L3/L4 rx checksum offload */
- if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM)
rxcsum |= E1000_RXCSUM_IPOFL;
else
rxcsum &= ~E1000_RXCSUM_IPOFL;
if (rxmode->offloads &
- (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM))
+ (RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM))
rxcsum |= E1000_RXCSUM_TUOFL;
else
rxcsum &= ~E1000_RXCSUM_TUOFL;
- if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
rxcsum |= E1000_RXCSUM_CRCOFL;
else
rxcsum &= ~E1000_RXCSUM_CRCOFL;
E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
/* Setup the Receive Control Register. */
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+ if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
/* clear STRCRC bit in all queues */
(hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
/* Make sure VLAN Filters are off. */
- if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_VMDQ_ONLY)
+ if (dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_VMDQ_ONLY)
rctl &= ~E1000_RCTL_VFE;
/* Don't store bad packets. */
rctl &= ~E1000_RCTL_SBP;
E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
}
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
+ if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
if (!dev->data->scattered_rx)
PMD_INIT_LOG(DEBUG, "forcing scatter mode");
dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
#define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings)
#define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings)
-#define QUEUE_OFFLOADS (DEV_TX_OFFLOAD_TCP_CKSUM |\
- DEV_TX_OFFLOAD_UDP_CKSUM |\
- DEV_TX_OFFLOAD_IPV4_CKSUM |\
- DEV_TX_OFFLOAD_TCP_TSO)
+#define QUEUE_OFFLOADS (RTE_ETH_TX_OFFLOAD_TCP_CKSUM |\
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |\
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |\
+ RTE_ETH_TX_OFFLOAD_TCP_TSO)
#define MBUF_OFFLOADS (PKT_TX_L4_MASK |\
PKT_TX_IP_CKSUM |\
PKT_TX_TCP_SEG)
(queue_offloads & QUEUE_OFFLOADS)) {
/* check if TSO is required */
if ((mbuf->ol_flags & PKT_TX_TCP_SEG) &&
- (queue_offloads & DEV_TX_OFFLOAD_TCP_TSO)) {
+ (queue_offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO)) {
ena_tx_ctx->tso_enable = true;
ena_meta->l4_hdr_len = GET_L4_HDR_LEN(mbuf);
/* check if L3 checksum is needed */
if ((mbuf->ol_flags & PKT_TX_IP_CKSUM) &&
- (queue_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM))
+ (queue_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM))
ena_tx_ctx->l3_csum_enable = true;
if (mbuf->ol_flags & PKT_TX_IPV6) {
/* check if L4 checksum is needed */
if (((mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM) &&
- (queue_offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) {
+ (queue_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) {
ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP;
ena_tx_ctx->l4_csum_enable = true;
} else if (((mbuf->ol_flags & PKT_TX_L4_MASK) ==
PKT_TX_UDP_CKSUM) &&
- (queue_offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) {
+ (queue_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM)) {
ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP;
ena_tx_ctx->l4_csum_enable = true;
} else {
struct rte_eth_link *link = &dev->data->dev_link;
struct ena_adapter *adapter = dev->data->dev_private;
- link->link_status = adapter->link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
- link->link_speed = ETH_SPEED_NUM_NONE;
- link->link_duplex = ETH_LINK_FULL_DUPLEX;
+ link->link_status = adapter->link_status ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
+ link->link_speed = RTE_ETH_SPEED_NUM_NONE;
+ link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
return 0;
}
if (rc)
goto err_start_tx;
- if (adapter->edev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+ if (adapter->edev_data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
rc = ena_rss_configure(adapter);
if (rc)
goto err_rss_init;
adapter->state = ENA_ADAPTER_STATE_CONFIG;
- if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
- dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
- dev->data->dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+ if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+ dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
+ dev->data->dev_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
/* Scattered Rx cannot be turned off in the HW, so this capability must
* be forced.
uint64_t port_offloads = 0;
if (adapter->offloads.rx_offloads & ENA_L3_IPV4_CSUM)
- port_offloads |= DEV_RX_OFFLOAD_IPV4_CKSUM;
+ port_offloads |= RTE_ETH_RX_OFFLOAD_IPV4_CKSUM;
if (adapter->offloads.rx_offloads &
(ENA_L4_IPV4_CSUM | ENA_L4_IPV6_CSUM))
port_offloads |=
- DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM;
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_TCP_CKSUM;
if (adapter->offloads.rx_offloads & ENA_RX_RSS_HASH)
- port_offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+ port_offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
- port_offloads |= DEV_RX_OFFLOAD_SCATTER;
+ port_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
return port_offloads;
}
uint64_t port_offloads = 0;
if (adapter->offloads.tx_offloads & ENA_IPV4_TSO)
- port_offloads |= DEV_TX_OFFLOAD_TCP_TSO;
+ port_offloads |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
if (adapter->offloads.tx_offloads & ENA_L3_IPV4_CSUM)
- port_offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM;
+ port_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
if (adapter->offloads.tx_offloads &
(ENA_L4_IPV4_CSUM_PARTIAL | ENA_L4_IPV4_CSUM |
ENA_L4_IPV6_CSUM | ENA_L4_IPV6_CSUM_PARTIAL))
port_offloads |=
- DEV_TX_OFFLOAD_UDP_CKSUM | DEV_TX_OFFLOAD_TCP_CKSUM;
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM | RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
- port_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+ port_offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
return port_offloads;
}
ena_assert_msg(ena_dev != NULL, "Uninitialized device\n");
dev_info->speed_capa =
- ETH_LINK_SPEED_1G |
- ETH_LINK_SPEED_2_5G |
- ETH_LINK_SPEED_5G |
- ETH_LINK_SPEED_10G |
- ETH_LINK_SPEED_25G |
- ETH_LINK_SPEED_40G |
- ETH_LINK_SPEED_50G |
- ETH_LINK_SPEED_100G;
+ RTE_ETH_LINK_SPEED_1G |
+ RTE_ETH_LINK_SPEED_2_5G |
+ RTE_ETH_LINK_SPEED_5G |
+ RTE_ETH_LINK_SPEED_10G |
+ RTE_ETH_LINK_SPEED_25G |
+ RTE_ETH_LINK_SPEED_40G |
+ RTE_ETH_LINK_SPEED_50G |
+ RTE_ETH_LINK_SPEED_100G;
/* Inform framework about available features */
dev_info->rx_offload_capa = ena_get_rx_port_offloads(adapter);
}
#endif
- fill_hash = rx_ring->offloads & DEV_RX_OFFLOAD_RSS_HASH;
+ fill_hash = rx_ring->offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH;
descs_in_use = rx_ring->ring_size -
ena_com_free_q_entries(rx_ring->ena_com_io_sq) - 1;
#ifdef RTE_LIBRTE_ETHDEV_DEBUG
/* Check if requested offload is also enabled for the queue */
if ((ol_flags & PKT_TX_IP_CKSUM &&
- !(tx_ring->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)) ||
+ !(tx_ring->offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)) ||
(l4_csum_flag == PKT_TX_TCP_CKSUM &&
- !(tx_ring->offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) ||
+ !(tx_ring->offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) ||
(l4_csum_flag == PKT_TX_UDP_CKSUM &&
- !(tx_ring->offloads & DEV_TX_OFFLOAD_UDP_CKSUM))) {
+ !(tx_ring->offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM))) {
PMD_TX_LOG(DEBUG,
"mbuf[%" PRIu32 "]: requested offloads: %" PRIu16 " are not enabled for the queue[%u]\n",
i, m->nb_segs, tx_ring->id);
#define ENA_HASH_KEY_SIZE 40
-#define ENA_ALL_RSS_HF (ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_UDP | \
- ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_NONFRAG_IPV6_UDP)
+#define ENA_ALL_RSS_HF (RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_NONFRAG_IPV6_UDP)
#define ENA_IO_TXQ_IDX(q) (2 * (q))
#define ENA_IO_RXQ_IDX(q) (2 * (q) + 1)
if (reta_size == 0 || reta_conf == NULL)
return -EINVAL;
- if (!(dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) {
+ if (!(dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) {
PMD_DRV_LOG(ERR,
"RSS was not configured for the PMD\n");
return -ENOTSUP;
/* Each reta_conf is for 64 entries.
* To support 128 we use 2 conf of 64.
*/
- conf_idx = i / RTE_RETA_GROUP_SIZE;
- idx = i % RTE_RETA_GROUP_SIZE;
+ conf_idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ idx = i % RTE_ETH_RETA_GROUP_SIZE;
if (TEST_BIT(reta_conf[conf_idx].mask, idx)) {
entry_value =
ENA_IO_RXQ_IDX(reta_conf[conf_idx].reta[idx]);
if (reta_size == 0 || reta_conf == NULL)
return -EINVAL;
- if (!(dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) {
+ if (!(dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) {
PMD_DRV_LOG(ERR,
"RSS was not configured for the PMD\n");
return -ENOTSUP;
}
for (i = 0 ; i < reta_size ; i++) {
- reta_conf_idx = i / RTE_RETA_GROUP_SIZE;
- reta_idx = i % RTE_RETA_GROUP_SIZE;
+ reta_conf_idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ reta_idx = i % RTE_ETH_RETA_GROUP_SIZE;
if (TEST_BIT(reta_conf[reta_conf_idx].mask, reta_idx))
reta_conf[reta_conf_idx].reta[reta_idx] =
ENA_IO_RXQ_IDX_REV(indirect_table[i]);
/* Convert proto to ETH flag */
switch (proto) {
case ENA_ADMIN_RSS_TCP4:
- rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
break;
case ENA_ADMIN_RSS_UDP4:
- rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
break;
case ENA_ADMIN_RSS_TCP6:
- rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
break;
case ENA_ADMIN_RSS_UDP6:
- rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
break;
case ENA_ADMIN_RSS_IP4:
- rss_hf |= ETH_RSS_IPV4;
+ rss_hf |= RTE_ETH_RSS_IPV4;
break;
case ENA_ADMIN_RSS_IP6:
- rss_hf |= ETH_RSS_IPV6;
+ rss_hf |= RTE_ETH_RSS_IPV6;
break;
case ENA_ADMIN_RSS_IP4_FRAG:
- rss_hf |= ETH_RSS_FRAG_IPV4;
+ rss_hf |= RTE_ETH_RSS_FRAG_IPV4;
break;
case ENA_ADMIN_RSS_NOT_IP:
- rss_hf |= ETH_RSS_L2_PAYLOAD;
+ rss_hf |= RTE_ETH_RSS_L2_PAYLOAD;
break;
case ENA_ADMIN_RSS_TCP6_EX:
- rss_hf |= ETH_RSS_IPV6_TCP_EX;
+ rss_hf |= RTE_ETH_RSS_IPV6_TCP_EX;
break;
case ENA_ADMIN_RSS_IP6_EX:
- rss_hf |= ETH_RSS_IPV6_EX;
+ rss_hf |= RTE_ETH_RSS_IPV6_EX;
break;
default:
break;
/* Check if only DA or SA is being used for L3. */
switch (fields & ENA_HF_RSS_ALL_L3) {
case ENA_ADMIN_RSS_L3_SA:
- rss_hf |= ETH_RSS_L3_SRC_ONLY;
+ rss_hf |= RTE_ETH_RSS_L3_SRC_ONLY;
break;
case ENA_ADMIN_RSS_L3_DA:
- rss_hf |= ETH_RSS_L3_DST_ONLY;
+ rss_hf |= RTE_ETH_RSS_L3_DST_ONLY;
break;
default:
break;
/* Check if only DA or SA is being used for L4. */
switch (fields & ENA_HF_RSS_ALL_L4) {
case ENA_ADMIN_RSS_L4_SP:
- rss_hf |= ETH_RSS_L4_SRC_ONLY;
+ rss_hf |= RTE_ETH_RSS_L4_SRC_ONLY;
break;
case ENA_ADMIN_RSS_L4_DP:
- rss_hf |= ETH_RSS_L4_DST_ONLY;
+ rss_hf |= RTE_ETH_RSS_L4_DST_ONLY;
break;
default:
break;
fields_mask = ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
/* Determine which fields of L3 should be used. */
- switch (rss_hf & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY)) {
- case ETH_RSS_L3_DST_ONLY:
+ switch (rss_hf & (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY)) {
+ case RTE_ETH_RSS_L3_DST_ONLY:
fields_mask |= ENA_ADMIN_RSS_L3_DA;
break;
- case ETH_RSS_L3_SRC_ONLY:
+ case RTE_ETH_RSS_L3_SRC_ONLY:
fields_mask |= ENA_ADMIN_RSS_L3_SA;
break;
default:
}
/* Determine which fields of L4 should be used. */
- switch (rss_hf & (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)) {
- case ETH_RSS_L4_DST_ONLY:
+ switch (rss_hf & (RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY)) {
+ case RTE_ETH_RSS_L4_DST_ONLY:
fields_mask |= ENA_ADMIN_RSS_L4_DP;
break;
- case ETH_RSS_L4_SRC_ONLY:
+ case RTE_ETH_RSS_L4_SRC_ONLY:
fields_mask |= ENA_ADMIN_RSS_L4_SP;
break;
default:
int rc, i;
/* Turn on appropriate fields for each requested packet type */
- if ((rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) != 0)
+ if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) != 0)
selected_fields[ENA_ADMIN_RSS_TCP4].fields =
ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_TCP4, rss_hf);
- if ((rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) != 0)
+ if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) != 0)
selected_fields[ENA_ADMIN_RSS_UDP4].fields =
ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_UDP4, rss_hf);
- if ((rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) != 0)
+ if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) != 0)
selected_fields[ENA_ADMIN_RSS_TCP6].fields =
ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_TCP6, rss_hf);
- if ((rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) != 0)
+ if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) != 0)
selected_fields[ENA_ADMIN_RSS_UDP6].fields =
ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_UDP6, rss_hf);
- if ((rss_hf & ETH_RSS_IPV4) != 0)
+ if ((rss_hf & RTE_ETH_RSS_IPV4) != 0)
selected_fields[ENA_ADMIN_RSS_IP4].fields =
ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_IP4, rss_hf);
- if ((rss_hf & ETH_RSS_IPV6) != 0)
+ if ((rss_hf & RTE_ETH_RSS_IPV6) != 0)
selected_fields[ENA_ADMIN_RSS_IP6].fields =
ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_IP6, rss_hf);
- if ((rss_hf & ETH_RSS_FRAG_IPV4) != 0)
+ if ((rss_hf & RTE_ETH_RSS_FRAG_IPV4) != 0)
selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_IP4_FRAG, rss_hf);
- if ((rss_hf & ETH_RSS_L2_PAYLOAD) != 0)
+ if ((rss_hf & RTE_ETH_RSS_L2_PAYLOAD) != 0)
selected_fields[ENA_ADMIN_RSS_NOT_IP].fields =
ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_NOT_IP, rss_hf);
- if ((rss_hf & ETH_RSS_IPV6_TCP_EX) != 0)
+ if ((rss_hf & RTE_ETH_RSS_IPV6_TCP_EX) != 0)
selected_fields[ENA_ADMIN_RSS_TCP6_EX].fields =
ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_TCP6_EX, rss_hf);
- if ((rss_hf & ETH_RSS_IPV6_EX) != 0)
+ if ((rss_hf & RTE_ETH_RSS_IPV6_EX) != 0)
selected_fields[ENA_ADMIN_RSS_IP6_EX].fields =
ena_eth_hf_to_admin_hf(ENA_ADMIN_RSS_IP6_EX, rss_hf);
uint16_t admin_hf;
static bool warn_once;
- if (!(dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) {
+ if (!(dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) {
PMD_DRV_LOG(ERR, "RSS was not configured for the PMD\n");
return -ENOTSUP;
}
status = enetc_port_rd(enetc_hw, ENETC_PM0_STATUS);
if (status & ENETC_LINK_MODE)
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
else
- link.link_duplex = ETH_LINK_HALF_DUPLEX;
+ link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
if (status & ENETC_LINK_STATUS)
- link.link_status = ETH_LINK_UP;
+ link.link_status = RTE_ETH_LINK_UP;
else
- link.link_status = ETH_LINK_DOWN;
+ link.link_status = RTE_ETH_LINK_DOWN;
switch (status & ENETC_LINK_SPEED_MASK) {
case ENETC_LINK_SPEED_1G:
- link.link_speed = ETH_SPEED_NUM_1G;
+ link.link_speed = RTE_ETH_SPEED_NUM_1G;
break;
case ENETC_LINK_SPEED_100M:
- link.link_speed = ETH_SPEED_NUM_100M;
+ link.link_speed = RTE_ETH_SPEED_NUM_100M;
break;
default:
case ENETC_LINK_SPEED_10M:
- link.link_speed = ETH_SPEED_NUM_10M;
+ link.link_speed = RTE_ETH_SPEED_NUM_10M;
}
return rte_eth_linkstatus_set(dev, &link);
dev_info->max_tx_queues = MAX_TX_RINGS;
dev_info->max_rx_pktlen = ENETC_MAC_MAXFRM_SIZE;
dev_info->rx_offload_capa =
- (DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_KEEP_CRC);
+ (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_KEEP_CRC);
return 0;
}
RTE_ETH_QUEUE_STATE_STOPPED;
}
- rx_ring->crc_len = (uint8_t)((rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) ?
+ rx_ring->crc_len = (uint8_t)((rx_offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) ?
RTE_ETHER_CRC_LEN : 0);
return 0;
enetc_port_wr(enetc_hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE);
enetc_port_wr(enetc_hw, ENETC_PTXMBAR, 2 * ENETC_MAC_MAXFRM_SIZE);
- if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
int config;
config = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG);
enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG, config);
}
- if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM)
checksum &= ~L3_CKSUM;
- if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM))
+ if (rx_offloads & (RTE_ETH_RX_OFFLOAD_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_TCP_CKSUM))
checksum &= ~L4_CKSUM;
enetc_port_wr(enetc_hw, ENETC_PAR_PORT_CFG, checksum);
*/
uint8_t rss_hash_type; /* NIC_CFG_RSS_HASH_TYPE flags */
uint8_t rss_enable;
- uint64_t rss_hf; /* ETH_RSS flags */
+ uint64_t rss_hf; /* RTE_ETH_RSS flags */
union vnic_rss_key rss_key;
union vnic_rss_cpu rss_cpu;
uint16_t sub_devid;
uint32_t capa;
} vic_speed_capa_map[] = {
- { 0x0043, ETH_LINK_SPEED_10G }, /* VIC */
- { 0x0047, ETH_LINK_SPEED_10G }, /* P81E PCIe */
- { 0x0048, ETH_LINK_SPEED_10G }, /* M81KR Mezz */
- { 0x004f, ETH_LINK_SPEED_10G }, /* 1280 Mezz */
- { 0x0084, ETH_LINK_SPEED_10G }, /* 1240 MLOM */
- { 0x0085, ETH_LINK_SPEED_10G }, /* 1225 PCIe */
- { 0x00cd, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1285 PCIe */
- { 0x00ce, ETH_LINK_SPEED_10G }, /* 1225T PCIe */
- { 0x012a, ETH_LINK_SPEED_40G }, /* M4308 */
- { 0x012c, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1340 MLOM */
- { 0x012e, ETH_LINK_SPEED_10G }, /* 1227 PCIe */
- { 0x0137, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1380 Mezz */
- { 0x014d, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1385 PCIe */
- { 0x015d, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1387 MLOM */
- { 0x0215, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G |
- ETH_LINK_SPEED_40G }, /* 1440 Mezz */
- { 0x0216, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G |
- ETH_LINK_SPEED_40G }, /* 1480 MLOM */
- { 0x0217, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G }, /* 1455 PCIe */
- { 0x0218, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G }, /* 1457 MLOM */
- { 0x0219, ETH_LINK_SPEED_40G }, /* 1485 PCIe */
- { 0x021a, ETH_LINK_SPEED_40G }, /* 1487 MLOM */
- { 0x024a, ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G }, /* 1495 PCIe */
- { 0x024b, ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G }, /* 1497 MLOM */
+ { 0x0043, RTE_ETH_LINK_SPEED_10G }, /* VIC */
+ { 0x0047, RTE_ETH_LINK_SPEED_10G }, /* P81E PCIe */
+ { 0x0048, RTE_ETH_LINK_SPEED_10G }, /* M81KR Mezz */
+ { 0x004f, RTE_ETH_LINK_SPEED_10G }, /* 1280 Mezz */
+ { 0x0084, RTE_ETH_LINK_SPEED_10G }, /* 1240 MLOM */
+ { 0x0085, RTE_ETH_LINK_SPEED_10G }, /* 1225 PCIe */
+ { 0x00cd, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1285 PCIe */
+ { 0x00ce, RTE_ETH_LINK_SPEED_10G }, /* 1225T PCIe */
+ { 0x012a, RTE_ETH_LINK_SPEED_40G }, /* M4308 */
+ { 0x012c, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1340 MLOM */
+ { 0x012e, RTE_ETH_LINK_SPEED_10G }, /* 1227 PCIe */
+ { 0x0137, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1380 Mezz */
+ { 0x014d, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1385 PCIe */
+ { 0x015d, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1387 MLOM */
+ { 0x0215, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G |
+ RTE_ETH_LINK_SPEED_40G }, /* 1440 Mezz */
+ { 0x0216, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G |
+ RTE_ETH_LINK_SPEED_40G }, /* 1480 MLOM */
+ { 0x0217, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G }, /* 1455 PCIe */
+ { 0x0218, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G }, /* 1457 MLOM */
+ { 0x0219, RTE_ETH_LINK_SPEED_40G }, /* 1485 PCIe */
+ { 0x021a, RTE_ETH_LINK_SPEED_40G }, /* 1487 MLOM */
+ { 0x024a, RTE_ETH_LINK_SPEED_40G | RTE_ETH_LINK_SPEED_100G }, /* 1495 PCIe */
+ { 0x024b, RTE_ETH_LINK_SPEED_40G | RTE_ETH_LINK_SPEED_100G }, /* 1497 MLOM */
{ 0, 0 }, /* End marker */
};
ENICPMD_FUNC_TRACE();
offloads = eth_dev->data->dev_conf.rxmode.offloads;
- if (mask & ETH_VLAN_STRIP_MASK) {
- if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+ if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
enic->ig_vlan_strip_en = 1;
else
enic->ig_vlan_strip_en = 0;
return ret;
}
- if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
+ if (eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
eth_dev->data->dev_conf.rxmode.offloads |=
- DEV_RX_OFFLOAD_RSS_HASH;
+ RTE_ETH_RX_OFFLOAD_RSS_HASH;
enic->mc_count = 0;
enic->hw_ip_checksum = !!(eth_dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_CHECKSUM);
+ RTE_ETH_RX_OFFLOAD_CHECKSUM);
/* All vlan offload masks to apply the current settings */
- mask = ETH_VLAN_STRIP_MASK |
- ETH_VLAN_FILTER_MASK |
- ETH_VLAN_EXTEND_MASK;
+ mask = RTE_ETH_VLAN_STRIP_MASK |
+ RTE_ETH_VLAN_FILTER_MASK |
+ RTE_ETH_VLAN_EXTEND_MASK;
ret = enicpmd_vlan_offload_set(eth_dev, mask);
if (ret) {
dev_err(enic, "Failed to configure VLAN offloads\n");
}
/* 1300 and later models are at least 40G */
if (id >= 0x0100)
- return ETH_LINK_SPEED_40G;
+ return RTE_ETH_LINK_SPEED_40G;
/* VFs have subsystem id 0, check device id */
if (id == 0) {
/* Newer VF implies at least 40G model */
if (pdev->id.device_id == PCI_DEVICE_ID_CISCO_VIC_ENET_SN)
- return ETH_LINK_SPEED_40G;
+ return RTE_ETH_LINK_SPEED_40G;
}
- return ETH_LINK_SPEED_10G;
+ return RTE_ETH_LINK_SPEED_10G;
}
static int enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
}
for (i = 0; i < reta_size; i++) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
if (reta_conf[idx].mask & (1ULL << shift))
reta_conf[idx].reta[shift] = enic_sop_rq_idx_to_rte_idx(
enic->rss_cpu.cpu[i / 4].b[i % 4]);
*/
rss_cpu = enic->rss_cpu;
for (i = 0; i < reta_size; i++) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
if (reta_conf[idx].mask & (1ULL << shift))
rss_cpu.cpu[i / 4].b[i % 4] =
enic_rte_rq_idx_to_sop_idx(
*/
conf->offloads = enic->rx_offload_capa;
if (!enic->ig_vlan_strip_en)
- conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+ conf->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
/* rx_thresh and other fields are not applicable for enic */
}
static int udp_tunnel_common_check(struct enic *enic,
struct rte_eth_udp_tunnel *tnl)
{
- if (tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN &&
- tnl->prot_type != RTE_TUNNEL_TYPE_GENEVE)
+ if (tnl->prot_type != RTE_ETH_TUNNEL_TYPE_VXLAN &&
+ tnl->prot_type != RTE_ETH_TUNNEL_TYPE_GENEVE)
return -ENOTSUP;
if (!enic->overlay_offload) {
ENICPMD_LOG(DEBUG, " overlay offload is not supported\n");
ret = udp_tunnel_common_check(enic, tnl);
if (ret)
return ret;
- vxlan = (tnl->prot_type == RTE_TUNNEL_TYPE_VXLAN);
+ vxlan = (tnl->prot_type == RTE_ETH_TUNNEL_TYPE_VXLAN);
if (vxlan)
port = enic->vxlan_port;
else
ret = udp_tunnel_common_check(enic, tnl);
if (ret)
return ret;
- vxlan = (tnl->prot_type == RTE_TUNNEL_TYPE_VXLAN);
+ vxlan = (tnl->prot_type == RTE_ETH_TUNNEL_TYPE_VXLAN);
if (vxlan)
port = enic->vxlan_port;
else
memset(&link, 0, sizeof(link));
link.link_status = enic_get_link_status(enic);
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
link.link_speed = vnic_dev_port_speed(enic->vdev);
return rte_eth_linkstatus_set(eth_dev, &link);
}
eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
- eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ eth_dev->data->dev_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
/* vnic notification of link status has already been turned on in
* enic_dev_init() which is called during probe time. Here we are
* and vlan insertion are supported.
*/
simple_tx_offloads = enic->tx_offload_capa &
- (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM);
+ (RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM);
if ((eth_dev->data->dev_conf.txmode.offloads &
~simple_tx_offloads) == 0) {
ENICPMD_LOG(DEBUG, " use the simple tx handler");
max_rx_pktlen = enic_mtu_to_max_rx_pktlen(enic->rte_dev->data->mtu);
if (enic->rte_dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_SCATTER) {
+ RTE_ETH_RX_OFFLOAD_SCATTER) {
dev_info(enic, "Rq %u Scatter rx mode enabled\n", queue_idx);
/* ceil((max pkt len)/mbuf_size) */
mbufs_per_pkt = (max_rx_pktlen + mbuf_size - 1) / mbuf_size;
rss_hash_type = 0;
rss_hf = rss_conf->rss_hf & enic->flow_type_rss_offloads;
if (enic->rq_count > 1 &&
- (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) &&
+ (eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) &&
rss_hf != 0) {
rss_enable = 1;
- if (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
- ETH_RSS_NONFRAG_IPV4_OTHER))
+ if (rss_hf & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+ RTE_ETH_RSS_NONFRAG_IPV4_OTHER))
rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_IPV4;
- if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV4;
- if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) {
rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_UDP_IPV4;
if (enic->udp_rss_weak) {
/*
rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV4;
}
}
- if (rss_hf & (ETH_RSS_IPV6 | ETH_RSS_IPV6_EX |
- ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER))
+ if (rss_hf & (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_IPV6_EX |
+ RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_OTHER))
rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_IPV6;
- if (rss_hf & (ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX))
+ if (rss_hf & (RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_IPV6_TCP_EX))
rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
- if (rss_hf & (ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_IPV6_UDP_EX)) {
+ if (rss_hf & (RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_IPV6_UDP_EX)) {
rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_UDP_IPV6;
if (enic->udp_rss_weak)
rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
return -EINVAL;
}
enic->tx_offload_capa |=
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
- (enic->geneve ? DEV_TX_OFFLOAD_GENEVE_TNL_TSO : 0) |
- (enic->vxlan ? DEV_TX_OFFLOAD_VXLAN_TNL_TSO : 0);
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ (enic->geneve ? RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO : 0) |
+ (enic->vxlan ? RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO : 0);
enic->tx_offload_mask |=
PKT_TX_OUTER_IPV6 |
PKT_TX_OUTER_IPV4 |
* IPV4 hash type handles both non-frag and frag packet types.
* TCP/UDP is controlled via a separate flag below.
*/
- enic->flow_type_rss_offloads |= ETH_RSS_IPV4 |
- ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_OTHER;
+ enic->flow_type_rss_offloads |= RTE_ETH_RSS_IPV4 |
+ RTE_ETH_RSS_FRAG_IPV4 | RTE_ETH_RSS_NONFRAG_IPV4_OTHER;
if (ENIC_SETTING(enic, RSSHASH_TCPIPV4))
- enic->flow_type_rss_offloads |= ETH_RSS_NONFRAG_IPV4_TCP;
+ enic->flow_type_rss_offloads |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
if (ENIC_SETTING(enic, RSSHASH_IPV6))
/*
* The VIC adapter can perform RSS on IPv6 packets with and
* without extension headers. An IPv6 "fragment" is an IPv6
* packet with the fragment extension header.
*/
- enic->flow_type_rss_offloads |= ETH_RSS_IPV6 |
- ETH_RSS_IPV6_EX | ETH_RSS_FRAG_IPV6 |
- ETH_RSS_NONFRAG_IPV6_OTHER;
+ enic->flow_type_rss_offloads |= RTE_ETH_RSS_IPV6 |
+ RTE_ETH_RSS_IPV6_EX | RTE_ETH_RSS_FRAG_IPV6 |
+ RTE_ETH_RSS_NONFRAG_IPV6_OTHER;
if (ENIC_SETTING(enic, RSSHASH_TCPIPV6))
- enic->flow_type_rss_offloads |= ETH_RSS_NONFRAG_IPV6_TCP |
- ETH_RSS_IPV6_TCP_EX;
+ enic->flow_type_rss_offloads |= RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+ RTE_ETH_RSS_IPV6_TCP_EX;
if (enic->udp_rss_weak)
enic->flow_type_rss_offloads |=
- ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP |
- ETH_RSS_IPV6_UDP_EX;
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+ RTE_ETH_RSS_IPV6_UDP_EX;
if (ENIC_SETTING(enic, RSSHASH_UDPIPV4))
- enic->flow_type_rss_offloads |= ETH_RSS_NONFRAG_IPV4_UDP;
+ enic->flow_type_rss_offloads |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
if (ENIC_SETTING(enic, RSSHASH_UDPIPV6))
- enic->flow_type_rss_offloads |= ETH_RSS_NONFRAG_IPV6_UDP |
- ETH_RSS_IPV6_UDP_EX;
+ enic->flow_type_rss_offloads |= RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+ RTE_ETH_RSS_IPV6_UDP_EX;
/* Zero offloads if RSS is not enabled */
if (!ENIC_SETTING(enic, RSS))
enic->tx_queue_offload_capa = 0;
enic->tx_offload_capa =
enic->tx_queue_offload_capa |
- DEV_TX_OFFLOAD_MULTI_SEGS |
- DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO;
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_TSO;
enic->rx_offload_capa =
- DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_RSS_HASH;
+ RTE_ETH_RX_OFFLOAD_SCATTER |
+ RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+ RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_RSS_HASH;
enic->tx_offload_mask =
PKT_TX_IPV6 |
PKT_TX_IPV4 |
const char pmd_failsafe_driver_name[] = FAILSAFE_DRIVER_NAME;
static const struct rte_eth_link eth_link = {
- .link_speed = ETH_SPEED_NUM_10G,
- .link_duplex = ETH_LINK_FULL_DUPLEX,
- .link_status = ETH_LINK_UP,
- .link_autoneg = ETH_LINK_AUTONEG,
+ .link_speed = RTE_ETH_SPEED_NUM_10G,
+ .link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+ .link_status = RTE_ETH_LINK_UP,
+ .link_autoneg = RTE_ETH_LINK_AUTONEG,
};
static int
int qid;
struct rte_eth_dev *fsdev;
struct rxq **rxq;
- const struct rte_intr_conf *const intr_conf =
+ const struct rte_eth_intr_conf *const intr_conf =
Ð(sdev)->data->dev_conf.intr_conf;
fsdev = fs_dev(sdev);
failsafe_rx_intr_install(struct rte_eth_dev *dev)
{
struct fs_priv *priv = PRIV(dev);
- const struct rte_intr_conf *const intr_conf =
+ const struct rte_eth_intr_conf *const intr_conf =
&priv->data->dev_conf.intr_conf;
if (intr_conf->rxq == 0 || dev->intr_handle != NULL)
* configuring a sub-device.
*/
infos->rx_offload_capa =
- DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_TCP_LRO |
- DEV_RX_OFFLOAD_QINQ_STRIP |
- DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_MACSEC_STRIP |
- DEV_RX_OFFLOAD_HEADER_SPLIT |
- DEV_RX_OFFLOAD_VLAN_FILTER |
- DEV_RX_OFFLOAD_VLAN_EXTEND |
- DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_TIMESTAMP |
- DEV_RX_OFFLOAD_SECURITY |
- DEV_RX_OFFLOAD_RSS_HASH;
+ RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+ RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_LRO |
+ RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+ RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_MACSEC_STRIP |
+ RTE_ETH_RX_OFFLOAD_HEADER_SPLIT |
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+ RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+ RTE_ETH_RX_OFFLOAD_SCATTER |
+ RTE_ETH_RX_OFFLOAD_TIMESTAMP |
+ RTE_ETH_RX_OFFLOAD_SECURITY |
+ RTE_ETH_RX_OFFLOAD_RSS_HASH;
infos->rx_queue_offload_capa =
- DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_TCP_LRO |
- DEV_RX_OFFLOAD_QINQ_STRIP |
- DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_MACSEC_STRIP |
- DEV_RX_OFFLOAD_HEADER_SPLIT |
- DEV_RX_OFFLOAD_VLAN_FILTER |
- DEV_RX_OFFLOAD_VLAN_EXTEND |
- DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_TIMESTAMP |
- DEV_RX_OFFLOAD_SECURITY |
- DEV_RX_OFFLOAD_RSS_HASH;
+ RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+ RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_LRO |
+ RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+ RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_MACSEC_STRIP |
+ RTE_ETH_RX_OFFLOAD_HEADER_SPLIT |
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+ RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+ RTE_ETH_RX_OFFLOAD_SCATTER |
+ RTE_ETH_RX_OFFLOAD_TIMESTAMP |
+ RTE_ETH_RX_OFFLOAD_SECURITY |
+ RTE_ETH_RX_OFFLOAD_RSS_HASH;
infos->tx_offload_capa =
- DEV_TX_OFFLOAD_MULTI_SEGS |
- DEV_TX_OFFLOAD_MBUF_FAST_FREE |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO;
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE |
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_TSO;
infos->flow_type_rss_offloads =
- ETH_RSS_IP |
- ETH_RSS_UDP |
- ETH_RSS_TCP;
+ RTE_ETH_RSS_IP |
+ RTE_ETH_RSS_UDP |
+ RTE_ETH_RSS_TCP;
infos->dev_capa =
RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
uint8_t drop_en;
uint8_t rx_deferred_start; /* don't start this queue in dev start. */
uint16_t rx_ftag_en; /* indicates FTAG RX supported */
- uint64_t offloads; /* offloads of DEV_RX_OFFLOAD_* */
+ uint64_t offloads; /* offloads of RTE_ETH_RX_OFFLOAD_* */
};
/*
uint16_t next_rs; /* Next pos to set RS flag */
uint16_t next_dd; /* Next pos to check DD flag */
volatile uint32_t *tail_ptr;
- uint64_t offloads; /* Offloads of DEV_TX_OFFLOAD_* */
+ uint64_t offloads; /* Offloads of RTE_ETH_TX_OFFLOAD_* */
uint16_t nb_desc;
uint16_t port_id;
uint8_t tx_deferred_start; /** don't start this queue in dev start. */
vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
- if (rx_mq_mode & ETH_MQ_RX_DCB_FLAG) {
+ if (rx_mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) {
PMD_INIT_LOG(ERR, "DCB mode is not supported.");
return -EINVAL;
}
- if (!(rx_mq_mode & ETH_MQ_RX_VMDQ_FLAG))
+ if (!(rx_mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG))
return 0;
if (hw->mac.type == fm10k_mac_vf) {
PMD_INIT_FUNC_TRACE();
- if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
- dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+ if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+ dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
/* multipe queue mode checking */
ret = fm10k_check_mq_mode(dev);
0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
};
- if (dev_conf->rxmode.mq_mode != ETH_MQ_RX_RSS ||
+ if (dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_RSS ||
dev_conf->rx_adv_conf.rss_conf.rss_hf == 0) {
FM10K_WRITE_REG(hw, FM10K_MRQC(0), 0);
return;
*/
hf = dev_conf->rx_adv_conf.rss_conf.rss_hf;
mrqc = 0;
- mrqc |= (hf & ETH_RSS_IPV4) ? FM10K_MRQC_IPV4 : 0;
- mrqc |= (hf & ETH_RSS_IPV6) ? FM10K_MRQC_IPV6 : 0;
- mrqc |= (hf & ETH_RSS_IPV6_EX) ? FM10K_MRQC_IPV6 : 0;
- mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? FM10K_MRQC_TCP_IPV4 : 0;
- mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? FM10K_MRQC_TCP_IPV6 : 0;
- mrqc |= (hf & ETH_RSS_IPV6_TCP_EX) ? FM10K_MRQC_TCP_IPV6 : 0;
- mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? FM10K_MRQC_UDP_IPV4 : 0;
- mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? FM10K_MRQC_UDP_IPV6 : 0;
- mrqc |= (hf & ETH_RSS_IPV6_UDP_EX) ? FM10K_MRQC_UDP_IPV6 : 0;
+ mrqc |= (hf & RTE_ETH_RSS_IPV4) ? FM10K_MRQC_IPV4 : 0;
+ mrqc |= (hf & RTE_ETH_RSS_IPV6) ? FM10K_MRQC_IPV6 : 0;
+ mrqc |= (hf & RTE_ETH_RSS_IPV6_EX) ? FM10K_MRQC_IPV6 : 0;
+ mrqc |= (hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) ? FM10K_MRQC_TCP_IPV4 : 0;
+ mrqc |= (hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) ? FM10K_MRQC_TCP_IPV6 : 0;
+ mrqc |= (hf & RTE_ETH_RSS_IPV6_TCP_EX) ? FM10K_MRQC_TCP_IPV6 : 0;
+ mrqc |= (hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) ? FM10K_MRQC_UDP_IPV4 : 0;
+ mrqc |= (hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) ? FM10K_MRQC_UDP_IPV6 : 0;
+ mrqc |= (hf & RTE_ETH_RSS_IPV6_UDP_EX) ? FM10K_MRQC_UDP_IPV6 : 0;
if (mrqc == 0) {
PMD_INIT_LOG(ERR, "Specified RSS mode 0x%"PRIx64"is not"
if (hw->mac.type != fm10k_mac_pf)
return;
- if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
+ if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG)
nb_queue_pools = vmdq_conf->nb_queue_pools;
/* no pool number change, no need to update logic port and VLAN/MAC */
/* It adds dual VLAN length for supporting dual VLAN */
if ((dev->data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN +
2 * FM10K_VLAN_TAG_SIZE) > buf_size ||
- rxq->offloads & DEV_RX_OFFLOAD_SCATTER) {
+ rxq->offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
uint32_t reg;
dev->data->scattered_rx = 1;
reg = FM10K_READ_REG(hw, FM10K_SRRCTL(i));
}
/* Update default vlan when not in VMDQ mode */
- if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG))
+ if (!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG))
fm10k_vlan_filter_set(dev, hw->mac.default_vid, true);
fm10k_link_update(dev, 0);
FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
- dev->data->dev_link.link_speed = ETH_SPEED_NUM_50G;
- dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_50G;
+ dev->data->dev_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
dev->data->dev_link.link_status =
- dev_info->sm_down ? ETH_LINK_DOWN : ETH_LINK_UP;
- dev->data->dev_link.link_autoneg = ETH_LINK_FIXED;
+ dev_info->sm_down ? RTE_ETH_LINK_DOWN : RTE_ETH_LINK_UP;
+ dev->data->dev_link.link_autoneg = RTE_ETH_LINK_FIXED;
return 0;
}
dev_info->max_vfs = pdev->max_vfs;
dev_info->vmdq_pool_base = 0;
dev_info->vmdq_queue_base = 0;
- dev_info->max_vmdq_pools = ETH_32_POOLS;
+ dev_info->max_vmdq_pools = RTE_ETH_32_POOLS;
dev_info->vmdq_queue_num = FM10K_MAX_QUEUES_PF;
dev_info->rx_queue_offload_capa = fm10k_get_rx_queue_offloads_capa(dev);
dev_info->rx_offload_capa = fm10k_get_rx_port_offloads_capa(dev) |
dev_info->hash_key_size = FM10K_RSSRK_SIZE * sizeof(uint32_t);
dev_info->reta_size = FM10K_MAX_RSS_INDICES;
- dev_info->flow_type_rss_offloads = ETH_RSS_IPV4 |
- ETH_RSS_IPV6 |
- ETH_RSS_IPV6_EX |
- ETH_RSS_NONFRAG_IPV4_TCP |
- ETH_RSS_NONFRAG_IPV6_TCP |
- ETH_RSS_IPV6_TCP_EX |
- ETH_RSS_NONFRAG_IPV4_UDP |
- ETH_RSS_NONFRAG_IPV6_UDP |
- ETH_RSS_IPV6_UDP_EX;
+ dev_info->flow_type_rss_offloads = RTE_ETH_RSS_IPV4 |
+ RTE_ETH_RSS_IPV6 |
+ RTE_ETH_RSS_IPV6_EX |
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+ RTE_ETH_RSS_IPV6_TCP_EX |
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+ RTE_ETH_RSS_IPV6_UDP_EX;
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_thresh = {
.nb_mtu_seg_max = FM10K_TX_MAX_MTU_SEG,
};
- dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G |
- ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G |
- ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G;
+ dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_2_5G |
+ RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G |
+ RTE_ETH_LINK_SPEED_40G | RTE_ETH_LINK_SPEED_100G;
return 0;
}
return -EINVAL;
}
- if (vlan_id > ETH_VLAN_ID_MAX) {
+ if (vlan_id > RTE_ETH_VLAN_ID_MAX) {
PMD_INIT_LOG(ERR, "Invalid vlan_id: must be < 4096");
return -EINVAL;
}
{
RTE_SET_USED(dev);
- return (uint64_t)(DEV_RX_OFFLOAD_SCATTER);
+ return (uint64_t)(RTE_ETH_RX_OFFLOAD_SCATTER);
}
static uint64_t fm10k_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
{
RTE_SET_USED(dev);
- return (uint64_t)(DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_VLAN_FILTER |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_HEADER_SPLIT |
- DEV_RX_OFFLOAD_RSS_HASH);
+ return (uint64_t)(RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+ RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_HEADER_SPLIT |
+ RTE_ETH_RX_OFFLOAD_RSS_HASH);
}
static int
{
RTE_SET_USED(dev);
- return (uint64_t)(DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_MULTI_SEGS |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO);
+ return (uint64_t)(RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_TSO);
}
static int
* 128-entries in 32 registers
*/
for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
mask = (uint8_t)((reta_conf[idx].mask >> shift) &
BIT_MASK_PER_UINT32);
if (mask == 0)
* 128-entries in 32 registers
*/
for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
mask = (uint8_t)((reta_conf[idx].mask >> shift) &
BIT_MASK_PER_UINT32);
if (mask == 0)
return -EINVAL;
mrqc = 0;
- mrqc |= (hf & ETH_RSS_IPV4) ? FM10K_MRQC_IPV4 : 0;
- mrqc |= (hf & ETH_RSS_IPV6) ? FM10K_MRQC_IPV6 : 0;
- mrqc |= (hf & ETH_RSS_IPV6_EX) ? FM10K_MRQC_IPV6 : 0;
- mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? FM10K_MRQC_TCP_IPV4 : 0;
- mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? FM10K_MRQC_TCP_IPV6 : 0;
- mrqc |= (hf & ETH_RSS_IPV6_TCP_EX) ? FM10K_MRQC_TCP_IPV6 : 0;
- mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? FM10K_MRQC_UDP_IPV4 : 0;
- mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? FM10K_MRQC_UDP_IPV6 : 0;
- mrqc |= (hf & ETH_RSS_IPV6_UDP_EX) ? FM10K_MRQC_UDP_IPV6 : 0;
+ mrqc |= (hf & RTE_ETH_RSS_IPV4) ? FM10K_MRQC_IPV4 : 0;
+ mrqc |= (hf & RTE_ETH_RSS_IPV6) ? FM10K_MRQC_IPV6 : 0;
+ mrqc |= (hf & RTE_ETH_RSS_IPV6_EX) ? FM10K_MRQC_IPV6 : 0;
+ mrqc |= (hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) ? FM10K_MRQC_TCP_IPV4 : 0;
+ mrqc |= (hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) ? FM10K_MRQC_TCP_IPV6 : 0;
+ mrqc |= (hf & RTE_ETH_RSS_IPV6_TCP_EX) ? FM10K_MRQC_TCP_IPV6 : 0;
+ mrqc |= (hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) ? FM10K_MRQC_UDP_IPV4 : 0;
+ mrqc |= (hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) ? FM10K_MRQC_UDP_IPV6 : 0;
+ mrqc |= (hf & RTE_ETH_RSS_IPV6_UDP_EX) ? FM10K_MRQC_UDP_IPV6 : 0;
/* If the mapping doesn't fit any supported, return */
if (mrqc == 0)
mrqc = FM10K_READ_REG(hw, FM10K_MRQC(0));
hf = 0;
- hf |= (mrqc & FM10K_MRQC_IPV4) ? ETH_RSS_IPV4 : 0;
- hf |= (mrqc & FM10K_MRQC_IPV6) ? ETH_RSS_IPV6 : 0;
- hf |= (mrqc & FM10K_MRQC_IPV6) ? ETH_RSS_IPV6_EX : 0;
- hf |= (mrqc & FM10K_MRQC_TCP_IPV4) ? ETH_RSS_NONFRAG_IPV4_TCP : 0;
- hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_NONFRAG_IPV6_TCP : 0;
- hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_IPV6_TCP_EX : 0;
- hf |= (mrqc & FM10K_MRQC_UDP_IPV4) ? ETH_RSS_NONFRAG_IPV4_UDP : 0;
- hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_NONFRAG_IPV6_UDP : 0;
- hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_IPV6_UDP_EX : 0;
+ hf |= (mrqc & FM10K_MRQC_IPV4) ? RTE_ETH_RSS_IPV4 : 0;
+ hf |= (mrqc & FM10K_MRQC_IPV6) ? RTE_ETH_RSS_IPV6 : 0;
+ hf |= (mrqc & FM10K_MRQC_IPV6) ? RTE_ETH_RSS_IPV6_EX : 0;
+ hf |= (mrqc & FM10K_MRQC_TCP_IPV4) ? RTE_ETH_RSS_NONFRAG_IPV4_TCP : 0;
+ hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? RTE_ETH_RSS_NONFRAG_IPV6_TCP : 0;
+ hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? RTE_ETH_RSS_IPV6_TCP_EX : 0;
+ hf |= (mrqc & FM10K_MRQC_UDP_IPV4) ? RTE_ETH_RSS_NONFRAG_IPV4_UDP : 0;
+ hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? RTE_ETH_RSS_NONFRAG_IPV6_UDP : 0;
+ hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? RTE_ETH_RSS_IPV6_UDP_EX : 0;
rss_conf->rss_hf = hf;
/* first clear the internal SW recording structure */
if (!(dev->data->dev_conf.rxmode.mq_mode &
- ETH_MQ_RX_VMDQ_FLAG))
+ RTE_ETH_MQ_RX_VMDQ_FLAG))
fm10k_vlan_filter_set(dev, hw->mac.default_vid,
false);
MAIN_VSI_POOL_NUMBER);
if (!(dev->data->dev_conf.rxmode.mq_mode &
- ETH_MQ_RX_VMDQ_FLAG))
+ RTE_ETH_MQ_RX_VMDQ_FLAG))
fm10k_vlan_filter_set(dev, hw->mac.default_vid,
true);
{
#ifndef RTE_LIBRTE_IEEE1588
struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
- struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
+ struct rte_eth_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
#ifndef RTE_FM10K_RX_OLFLAGS_ENABLE
/* whithout rx ol_flags, no VP flag report */
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
return -1;
#endif
return -1;
/* no header split support */
- if (rxmode->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT)
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_HEADER_SPLIT)
return -1;
return 0;
static int hinic_link_event_process(struct hinic_hwdev *hwdev,
struct rte_eth_dev *eth_dev, u8 status)
{
- uint32_t port_speed[LINK_SPEED_MAX] = {ETH_SPEED_NUM_10M,
- ETH_SPEED_NUM_100M, ETH_SPEED_NUM_1G,
- ETH_SPEED_NUM_10G, ETH_SPEED_NUM_25G,
- ETH_SPEED_NUM_40G, ETH_SPEED_NUM_100G};
+ uint32_t port_speed[LINK_SPEED_MAX] = {RTE_ETH_SPEED_NUM_10M,
+ RTE_ETH_SPEED_NUM_100M, RTE_ETH_SPEED_NUM_1G,
+ RTE_ETH_SPEED_NUM_10G, RTE_ETH_SPEED_NUM_25G,
+ RTE_ETH_SPEED_NUM_40G, RTE_ETH_SPEED_NUM_100G};
struct nic_port_info port_info;
struct rte_eth_link link;
int rc = HINIC_OK;
if (!status) {
- link.link_status = ETH_LINK_DOWN;
+ link.link_status = RTE_ETH_LINK_DOWN;
link.link_speed = 0;
- link.link_duplex = ETH_LINK_HALF_DUPLEX;
- link.link_autoneg = ETH_LINK_FIXED;
+ link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+ link.link_autoneg = RTE_ETH_LINK_FIXED;
} else {
- link.link_status = ETH_LINK_UP;
+ link.link_status = RTE_ETH_LINK_UP;
memset(&port_info, 0, sizeof(port_info));
rc = hinic_get_port_info(hwdev, &port_info);
if (rc) {
- link.link_speed = ETH_SPEED_NUM_NONE;
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
- link.link_autoneg = ETH_LINK_FIXED;
+ link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+ link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+ link.link_autoneg = RTE_ETH_LINK_FIXED;
} else {
link.link_speed = port_speed[port_info.speed %
LINK_SPEED_MAX];
return -EINVAL;
}
- if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
- dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+ if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+ dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
/* mtu size is 256~9600 */
if (HINIC_MTU_TO_PKTLEN(dev->data->dev_conf.rxmode.mtu) <
/* init vlan offoad */
err = hinic_vlan_offload_set(dev,
- ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK);
+ RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK);
if (err) {
PMD_DRV_LOG(ERR, "Initialize vlan filter and strip failed");
(void)hinic_config_mq_mode(dev, FALSE);
} else {
*speed_capa = 0;
if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_1G))
- *speed_capa |= ETH_LINK_SPEED_1G;
+ *speed_capa |= RTE_ETH_LINK_SPEED_1G;
if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_10G))
- *speed_capa |= ETH_LINK_SPEED_10G;
+ *speed_capa |= RTE_ETH_LINK_SPEED_10G;
if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_25G))
- *speed_capa |= ETH_LINK_SPEED_25G;
+ *speed_capa |= RTE_ETH_LINK_SPEED_25G;
if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_40G))
- *speed_capa |= ETH_LINK_SPEED_40G;
+ *speed_capa |= RTE_ETH_LINK_SPEED_40G;
if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_100G))
- *speed_capa |= ETH_LINK_SPEED_100G;
+ *speed_capa |= RTE_ETH_LINK_SPEED_100G;
}
}
hinic_get_speed_capa(dev, &info->speed_capa);
info->rx_queue_offload_capa = 0;
- info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_VLAN_FILTER |
- DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_TCP_LRO |
- DEV_RX_OFFLOAD_RSS_HASH;
+ info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+ RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+ RTE_ETH_RX_OFFLOAD_SCATTER |
+ RTE_ETH_RX_OFFLOAD_TCP_LRO |
+ RTE_ETH_RX_OFFLOAD_RSS_HASH;
info->tx_queue_offload_capa = 0;
- info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_SCTP_CKSUM |
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO |
- DEV_TX_OFFLOAD_MULTI_SEGS;
+ info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_TSO |
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
info->hash_key_size = HINIC_RSS_KEY_SIZE;
info->reta_size = HINIC_RSS_INDIR_SIZE;
u8 port_link_status = 0;
struct nic_port_info port_link_info;
struct hinic_hwdev *nic_hwdev = nic_dev->hwdev;
- uint32_t port_speed[LINK_SPEED_MAX] = {ETH_SPEED_NUM_10M,
- ETH_SPEED_NUM_100M, ETH_SPEED_NUM_1G,
- ETH_SPEED_NUM_10G, ETH_SPEED_NUM_25G,
- ETH_SPEED_NUM_40G, ETH_SPEED_NUM_100G};
+ uint32_t port_speed[LINK_SPEED_MAX] = {RTE_ETH_SPEED_NUM_10M,
+ RTE_ETH_SPEED_NUM_100M, RTE_ETH_SPEED_NUM_1G,
+ RTE_ETH_SPEED_NUM_10G, RTE_ETH_SPEED_NUM_25G,
+ RTE_ETH_SPEED_NUM_40G, RTE_ETH_SPEED_NUM_100G};
rc = hinic_get_link_status(nic_hwdev, &port_link_status);
if (rc)
return rc;
if (!port_link_status) {
- link->link_status = ETH_LINK_DOWN;
+ link->link_status = RTE_ETH_LINK_DOWN;
link->link_speed = 0;
- link->link_duplex = ETH_LINK_HALF_DUPLEX;
- link->link_autoneg = ETH_LINK_FIXED;
+ link->link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+ link->link_autoneg = RTE_ETH_LINK_FIXED;
return HINIC_OK;
}
/* Get link status information from hardware */
rc = hinic_priv_get_dev_link_status(nic_dev, &link);
if (rc != HINIC_OK) {
- link.link_speed = ETH_SPEED_NUM_NONE;
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+ link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
PMD_DRV_LOG(ERR, "Get link status failed");
goto out;
}
int err;
/* Enable or disable VLAN filter */
- if (mask & ETH_VLAN_FILTER_MASK) {
- on = (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) ?
+ if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+ on = (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) ?
TRUE : FALSE;
err = hinic_config_vlan_filter(nic_dev->hwdev, on);
if (err == HINIC_MGMT_CMD_UNSUPPORTED) {
}
/* Enable or disable VLAN stripping */
- if (mask & ETH_VLAN_STRIP_MASK) {
- on = (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) ?
+ if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+ on = (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) ?
TRUE : FALSE;
err = hinic_set_rx_vlan_offload(nic_dev->hwdev, on);
if (err) {
fc_conf->autoneg = nic_pause.auto_neg;
if (nic_pause.tx_pause && nic_pause.rx_pause)
- fc_conf->mode = RTE_FC_FULL;
+ fc_conf->mode = RTE_ETH_FC_FULL;
else if (nic_pause.tx_pause)
- fc_conf->mode = RTE_FC_TX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
else if (nic_pause.rx_pause)
- fc_conf->mode = RTE_FC_RX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
else
- fc_conf->mode = RTE_FC_NONE;
+ fc_conf->mode = RTE_ETH_FC_NONE;
return 0;
}
nic_pause.auto_neg = fc_conf->autoneg;
- if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
- (fc_conf->mode & RTE_FC_TX_PAUSE))
+ if (((fc_conf->mode & RTE_ETH_FC_FULL) == RTE_ETH_FC_FULL) ||
+ (fc_conf->mode & RTE_ETH_FC_TX_PAUSE))
nic_pause.tx_pause = true;
else
nic_pause.tx_pause = false;
- if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
- (fc_conf->mode & RTE_FC_RX_PAUSE))
+ if (((fc_conf->mode & RTE_ETH_FC_FULL) == RTE_ETH_FC_FULL) ||
+ (fc_conf->mode & RTE_ETH_FC_RX_PAUSE))
nic_pause.rx_pause = true;
else
nic_pause.rx_pause = false;
struct nic_rss_type rss_type = {0};
int err = 0;
- if (!(nic_dev->flags & ETH_MQ_RX_RSS_FLAG)) {
+ if (!(nic_dev->flags & RTE_ETH_MQ_RX_RSS_FLAG)) {
PMD_DRV_LOG(WARNING, "RSS is not enabled");
return HINIC_OK;
}
}
}
- rss_type.ipv4 = (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4)) ? 1 : 0;
- rss_type.tcp_ipv4 = (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) ? 1 : 0;
- rss_type.ipv6 = (rss_hf & (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6)) ? 1 : 0;
- rss_type.ipv6_ext = (rss_hf & ETH_RSS_IPV6_EX) ? 1 : 0;
- rss_type.tcp_ipv6 = (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) ? 1 : 0;
- rss_type.tcp_ipv6_ext = (rss_hf & ETH_RSS_IPV6_TCP_EX) ? 1 : 0;
- rss_type.udp_ipv4 = (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) ? 1 : 0;
- rss_type.udp_ipv6 = (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) ? 1 : 0;
+ rss_type.ipv4 = (rss_hf & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4)) ? 1 : 0;
+ rss_type.tcp_ipv4 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) ? 1 : 0;
+ rss_type.ipv6 = (rss_hf & (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6)) ? 1 : 0;
+ rss_type.ipv6_ext = (rss_hf & RTE_ETH_RSS_IPV6_EX) ? 1 : 0;
+ rss_type.tcp_ipv6 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) ? 1 : 0;
+ rss_type.tcp_ipv6_ext = (rss_hf & RTE_ETH_RSS_IPV6_TCP_EX) ? 1 : 0;
+ rss_type.udp_ipv4 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) ? 1 : 0;
+ rss_type.udp_ipv6 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) ? 1 : 0;
err = hinic_set_rss_type(nic_dev->hwdev, tmpl_idx, rss_type);
if (err) {
struct nic_rss_type rss_type = {0};
int err;
- if (!(nic_dev->flags & ETH_MQ_RX_RSS_FLAG)) {
+ if (!(nic_dev->flags & RTE_ETH_MQ_RX_RSS_FLAG)) {
PMD_DRV_LOG(WARNING, "RSS is not enabled");
return HINIC_ERROR;
}
rss_conf->rss_hf = 0;
rss_conf->rss_hf |= rss_type.ipv4 ?
- (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4) : 0;
- rss_conf->rss_hf |= rss_type.tcp_ipv4 ? ETH_RSS_NONFRAG_IPV4_TCP : 0;
+ (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4) : 0;
+ rss_conf->rss_hf |= rss_type.tcp_ipv4 ? RTE_ETH_RSS_NONFRAG_IPV4_TCP : 0;
rss_conf->rss_hf |= rss_type.ipv6 ?
- (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6) : 0;
- rss_conf->rss_hf |= rss_type.ipv6_ext ? ETH_RSS_IPV6_EX : 0;
- rss_conf->rss_hf |= rss_type.tcp_ipv6 ? ETH_RSS_NONFRAG_IPV6_TCP : 0;
- rss_conf->rss_hf |= rss_type.tcp_ipv6_ext ? ETH_RSS_IPV6_TCP_EX : 0;
- rss_conf->rss_hf |= rss_type.udp_ipv4 ? ETH_RSS_NONFRAG_IPV4_UDP : 0;
- rss_conf->rss_hf |= rss_type.udp_ipv6 ? ETH_RSS_NONFRAG_IPV6_UDP : 0;
+ (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6) : 0;
+ rss_conf->rss_hf |= rss_type.ipv6_ext ? RTE_ETH_RSS_IPV6_EX : 0;
+ rss_conf->rss_hf |= rss_type.tcp_ipv6 ? RTE_ETH_RSS_NONFRAG_IPV6_TCP : 0;
+ rss_conf->rss_hf |= rss_type.tcp_ipv6_ext ? RTE_ETH_RSS_IPV6_TCP_EX : 0;
+ rss_conf->rss_hf |= rss_type.udp_ipv4 ? RTE_ETH_RSS_NONFRAG_IPV4_UDP : 0;
+ rss_conf->rss_hf |= rss_type.udp_ipv6 ? RTE_ETH_RSS_NONFRAG_IPV6_UDP : 0;
return HINIC_OK;
}
u16 i = 0;
u16 idx, shift;
- if (!(nic_dev->flags & ETH_MQ_RX_RSS_FLAG))
+ if (!(nic_dev->flags & RTE_ETH_MQ_RX_RSS_FLAG))
return HINIC_OK;
if (reta_size != NIC_RSS_INDIR_SIZE) {
/* update rss indir_tbl */
for (i = 0; i < reta_size; i++) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
if (reta_conf[idx].reta[shift] >= nic_dev->num_rq) {
PMD_DRV_LOG(ERR, "Invalid reta entry, indirtbl[%d]: %d "
}
for (i = 0; i < reta_size; i++) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
if (reta_conf[idx].mask & (1ULL << shift))
reta_conf[idx].reta[shift] = (uint16_t)indirtbl[i];
}
{
u64 rss_hf = rss_conf->rss_hf;
- rss_type->ipv4 = (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4)) ? 1 : 0;
- rss_type->tcp_ipv4 = (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) ? 1 : 0;
- rss_type->ipv6 = (rss_hf & (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6)) ? 1 : 0;
- rss_type->ipv6_ext = (rss_hf & ETH_RSS_IPV6_EX) ? 1 : 0;
- rss_type->tcp_ipv6 = (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) ? 1 : 0;
- rss_type->tcp_ipv6_ext = (rss_hf & ETH_RSS_IPV6_TCP_EX) ? 1 : 0;
- rss_type->udp_ipv4 = (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) ? 1 : 0;
- rss_type->udp_ipv6 = (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) ? 1 : 0;
+ rss_type->ipv4 = (rss_hf & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4)) ? 1 : 0;
+ rss_type->tcp_ipv4 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) ? 1 : 0;
+ rss_type->ipv6 = (rss_hf & (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6)) ? 1 : 0;
+ rss_type->ipv6_ext = (rss_hf & RTE_ETH_RSS_IPV6_EX) ? 1 : 0;
+ rss_type->tcp_ipv6 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) ? 1 : 0;
+ rss_type->tcp_ipv6_ext = (rss_hf & RTE_ETH_RSS_IPV6_TCP_EX) ? 1 : 0;
+ rss_type->udp_ipv4 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) ? 1 : 0;
+ rss_type->udp_ipv6 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) ? 1 : 0;
}
static void hinic_fillout_indir_tbl(struct hinic_nic_dev *nic_dev, u32 *indir)
{
int err, i;
- if (!(nic_dev->flags & ETH_MQ_RX_RSS_FLAG)) {
- nic_dev->flags &= ~ETH_MQ_RX_RSS_FLAG;
+ if (!(nic_dev->flags & RTE_ETH_MQ_RX_RSS_FLAG)) {
+ nic_dev->flags &= ~RTE_ETH_MQ_RX_RSS_FLAG;
nic_dev->num_rss = 0;
if (nic_dev->num_rq > 1) {
/* get rss template id */
PMD_DRV_LOG(WARNING, "Alloc rss template failed");
return err;
}
- nic_dev->flags |= ETH_MQ_RX_RSS_FLAG;
+ nic_dev->flags |= RTE_ETH_MQ_RX_RSS_FLAG;
for (i = 0; i < nic_dev->num_rq; i++)
hinic_add_rq_to_rx_queue_list(nic_dev, i);
}
static void hinic_destroy_num_qps(struct hinic_nic_dev *nic_dev)
{
- if (nic_dev->flags & ETH_MQ_RX_RSS_FLAG) {
+ if (nic_dev->flags & RTE_ETH_MQ_RX_RSS_FLAG) {
if (hinic_rss_template_free(nic_dev->hwdev,
nic_dev->rss_tmpl_idx))
PMD_DRV_LOG(WARNING, "Free rss template failed");
- nic_dev->flags &= ~ETH_MQ_RX_RSS_FLAG;
+ nic_dev->flags &= ~RTE_ETH_MQ_RX_RSS_FLAG;
}
}
int ret = 0;
switch (dev_conf->rxmode.mq_mode) {
- case ETH_MQ_RX_RSS:
+ case RTE_ETH_MQ_RX_RSS:
ret = hinic_config_mq_rx_rss(nic_dev, on);
break;
default:
int lro_wqe_num;
int buf_size;
- if (nic_dev->flags & ETH_MQ_RX_RSS_FLAG) {
+ if (nic_dev->flags & RTE_ETH_MQ_RX_RSS_FLAG) {
if (rss_conf.rss_hf == 0) {
rss_conf.rss_hf = HINIC_RSS_OFFLOAD_ALL;
} else if ((rss_conf.rss_hf & HINIC_RSS_OFFLOAD_ALL) == 0) {
}
/* Enable both L3/L4 rx checksum offload */
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_CHECKSUM)
+ if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
nic_dev->rx_csum_en = HINIC_RX_CSUM_OFFLOAD_EN;
err = hinic_set_rx_csum_offload(nic_dev->hwdev,
goto rx_csum_ofl_err;
/* config lro */
- lro_en = dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO ?
+ lro_en = dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO ?
true : false;
max_lro_size = dev->data->dev_conf.rxmode.max_lro_pkt_size;
buf_size = nic_dev->hwdev->nic_io->rq_buf_size;
{
struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
- if (nic_dev->flags & ETH_MQ_RX_RSS_FLAG) {
+ if (nic_dev->flags & RTE_ETH_MQ_RX_RSS_FLAG) {
hinic_rss_deinit(nic_dev);
hinic_destroy_num_qps(nic_dev);
}
#define HINIC_DEFAULT_RX_FREE_THRESH 32
#define HINIC_RSS_OFFLOAD_ALL ( \
- ETH_RSS_IPV4 | \
- ETH_RSS_FRAG_IPV4 |\
- ETH_RSS_NONFRAG_IPV4_TCP | \
- ETH_RSS_NONFRAG_IPV4_UDP | \
- ETH_RSS_IPV6 | \
- ETH_RSS_FRAG_IPV6 | \
- ETH_RSS_NONFRAG_IPV6_TCP | \
- ETH_RSS_NONFRAG_IPV6_UDP | \
- ETH_RSS_IPV6_EX | \
- ETH_RSS_IPV6_TCP_EX | \
- ETH_RSS_IPV6_UDP_EX)
+ RTE_ETH_RSS_IPV4 | \
+ RTE_ETH_RSS_FRAG_IPV4 |\
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+ RTE_ETH_RSS_IPV6 | \
+ RTE_ETH_RSS_FRAG_IPV6 | \
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+ RTE_ETH_RSS_IPV6_EX | \
+ RTE_ETH_RSS_IPV6_TCP_EX | \
+ RTE_ETH_RSS_IPV6_UDP_EX)
enum rq_completion_fmt {
RQ_COMPLETE_SGE = 1
return ret;
}
- if (hw->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
+ if (hw->data->dev_conf.dcb_capability_en & RTE_ETH_DCB_PFC_SUPPORT) {
dcb_rx_conf = &hw->data->dev_conf.rx_adv_conf.dcb_rx_conf;
if (dcb_rx_conf->nb_tcs == 0)
hw->dcb_info.pfc_en = 1; /* tc0 only */
uint16_t nb_tx_q = hw->data->nb_tx_queues;
int ret;
- if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG)
+ if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_DCB_FLAG)
return 0;
ret = hns3_dcb_update_tc_queue_mapping(hw, nb_rx_q, nb_tx_q);
hns3_get_fc_mode(struct hns3_hw *hw, enum rte_eth_fc_mode mode)
{
switch (mode) {
- case RTE_FC_NONE:
+ case RTE_ETH_FC_NONE:
hw->requested_fc_mode = HNS3_FC_NONE;
break;
- case RTE_FC_RX_PAUSE:
+ case RTE_ETH_FC_RX_PAUSE:
hw->requested_fc_mode = HNS3_FC_RX_PAUSE;
break;
- case RTE_FC_TX_PAUSE:
+ case RTE_ETH_FC_TX_PAUSE:
hw->requested_fc_mode = HNS3_FC_TX_PAUSE;
break;
- case RTE_FC_FULL:
+ case RTE_ETH_FC_FULL:
hw->requested_fc_mode = HNS3_FC_FULL;
break;
default:
hw->requested_fc_mode = HNS3_FC_NONE;
hns3_warn(hw, "fc_mode(%u) exceeds member scope and is "
- "configured to RTE_FC_NONE", mode);
+ "configured to RTE_ETH_FC_NONE", mode);
break;
}
}
};
static const struct rte_eth_fec_capa speed_fec_capa_tbl[] = {
- { ETH_SPEED_NUM_10G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
+ { RTE_ETH_SPEED_NUM_10G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
RTE_ETH_FEC_MODE_CAPA_MASK(BASER) },
- { ETH_SPEED_NUM_25G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
+ { RTE_ETH_SPEED_NUM_25G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
RTE_ETH_FEC_MODE_CAPA_MASK(BASER) |
RTE_ETH_FEC_MODE_CAPA_MASK(RS) },
- { ETH_SPEED_NUM_40G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
+ { RTE_ETH_SPEED_NUM_40G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
RTE_ETH_FEC_MODE_CAPA_MASK(BASER) },
- { ETH_SPEED_NUM_50G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
+ { RTE_ETH_SPEED_NUM_50G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
RTE_ETH_FEC_MODE_CAPA_MASK(BASER) |
RTE_ETH_FEC_MODE_CAPA_MASK(RS) },
- { ETH_SPEED_NUM_100G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
+ { RTE_ETH_SPEED_NUM_100G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
RTE_ETH_FEC_MODE_CAPA_MASK(RS) },
- { ETH_SPEED_NUM_200G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
+ { RTE_ETH_SPEED_NUM_200G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
RTE_ETH_FEC_MODE_CAPA_MASK(RS) }
};
struct hns3_cmd_desc desc;
int ret;
- if ((vlan_type != ETH_VLAN_TYPE_INNER &&
- vlan_type != ETH_VLAN_TYPE_OUTER)) {
+ if ((vlan_type != RTE_ETH_VLAN_TYPE_INNER &&
+ vlan_type != RTE_ETH_VLAN_TYPE_OUTER)) {
hns3_err(hw, "Unsupported vlan type, vlan_type =%d", vlan_type);
return -EINVAL;
}
hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_TYPE_ID, false);
rx_req = (struct hns3_rx_vlan_type_cfg_cmd *)desc.data;
- if (vlan_type == ETH_VLAN_TYPE_OUTER) {
+ if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER) {
rx_req->ot_fst_vlan_type = rte_cpu_to_le_16(tpid);
rx_req->ot_sec_vlan_type = rte_cpu_to_le_16(tpid);
- } else if (vlan_type == ETH_VLAN_TYPE_INNER) {
+ } else if (vlan_type == RTE_ETH_VLAN_TYPE_INNER) {
rx_req->ot_fst_vlan_type = rte_cpu_to_le_16(tpid);
rx_req->ot_sec_vlan_type = rte_cpu_to_le_16(tpid);
rx_req->in_fst_vlan_type = rte_cpu_to_le_16(tpid);
rte_spinlock_lock(&hw->lock);
rxmode = &dev->data->dev_conf.rxmode;
tmp_mask = (unsigned int)mask;
- if (tmp_mask & ETH_VLAN_FILTER_MASK) {
+ if (tmp_mask & RTE_ETH_VLAN_FILTER_MASK) {
/* ignore vlan filter configuration during promiscuous mode */
if (!dev->data->promiscuous) {
/* Enable or disable VLAN filter */
- enable = rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER ?
+ enable = rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER ?
true : false;
ret = hns3_enable_vlan_filter(hns, enable);
}
}
- if (tmp_mask & ETH_VLAN_STRIP_MASK) {
+ if (tmp_mask & RTE_ETH_VLAN_STRIP_MASK) {
/* Enable or disable VLAN stripping */
- enable = rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP ?
+ enable = rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP ?
true : false;
ret = hns3_en_hw_strip_rxvtag(hns, enable);
return ret;
}
- ret = hns3_vlan_tpid_configure(hns, ETH_VLAN_TYPE_INNER,
+ ret = hns3_vlan_tpid_configure(hns, RTE_ETH_VLAN_TYPE_INNER,
RTE_ETHER_TYPE_VLAN);
if (ret) {
hns3_err(hw, "tpid set fail in pf, ret =%d", ret);
if (!hw->data->promiscuous) {
/* restore vlan filter states */
offloads = hw->data->dev_conf.rxmode.offloads;
- enable = offloads & DEV_RX_OFFLOAD_VLAN_FILTER ? true : false;
+ enable = offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER ? true : false;
ret = hns3_enable_vlan_filter(hns, enable);
if (ret) {
hns3_err(hw, "failed to restore vlan rx filter conf, "
txmode->hw_vlan_reject_untagged);
/* Apply vlan offload setting */
- mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK;
+ mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK;
ret = hns3_vlan_offload_set(dev, mask);
if (ret) {
hns3_err(hw, "dev config rx vlan offload failed, ret = %d",
int max_tc = 0;
int i;
- if ((rx_mq_mode & ETH_MQ_RX_VMDQ_FLAG) ||
- (tx_mq_mode == ETH_MQ_TX_VMDQ_DCB ||
- tx_mq_mode == ETH_MQ_TX_VMDQ_ONLY)) {
+ if ((rx_mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG) ||
+ (tx_mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB ||
+ tx_mq_mode == RTE_ETH_MQ_TX_VMDQ_ONLY)) {
hns3_err(hw, "VMDQ is not supported, rx_mq_mode = %d, tx_mq_mode = %d.",
rx_mq_mode, tx_mq_mode);
return -EOPNOTSUPP;
dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
dcb_tx_conf = &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
- if (rx_mq_mode & ETH_MQ_RX_DCB_FLAG) {
+ if (rx_mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) {
if (dcb_rx_conf->nb_tcs > pf->tc_max) {
hns3_err(hw, "nb_tcs(%u) > max_tc(%u) driver supported.",
dcb_rx_conf->nb_tcs, pf->tc_max);
if (!(dcb_rx_conf->nb_tcs == HNS3_4_TCS ||
dcb_rx_conf->nb_tcs == HNS3_8_TCS)) {
- hns3_err(hw, "on ETH_MQ_RX_DCB_RSS mode, "
+ hns3_err(hw, "on RTE_ETH_MQ_RX_DCB_RSS mode, "
"nb_tcs(%d) != %d or %d in rx direction.",
dcb_rx_conf->nb_tcs, HNS3_4_TCS, HNS3_8_TCS);
return -EINVAL;
* configure link_speeds (default 0), which means auto-negotiation.
* In this case, it should return success.
*/
- if (link_speeds == ETH_LINK_SPEED_AUTONEG &&
+ if (link_speeds == RTE_ETH_LINK_SPEED_AUTONEG &&
hw->mac.support_autoneg == 0)
return 0;
- if (link_speeds != ETH_LINK_SPEED_AUTONEG) {
+ if (link_speeds != RTE_ETH_LINK_SPEED_AUTONEG) {
ret = hns3_check_port_speed(hw, link_speeds);
if (ret)
return ret;
if (ret)
goto cfg_err;
- if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG) {
+ if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) {
ret = hns3_setup_dcb(dev);
if (ret)
goto cfg_err;
}
/* When RSS is not configured, redirect the packet queue 0 */
- if ((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) {
- conf->rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+ if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
+ conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
rss_conf = conf->rx_adv_conf.rss_conf;
hw->rss_dis_flag = false;
ret = hns3_dev_rss_hash_update(dev, &rss_conf);
goto cfg_err;
/* config hardware GRO */
- gro_en = conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO ? true : false;
+ gro_en = conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO ? true : false;
ret = hns3_config_gro(hw, gro_en);
if (ret)
goto cfg_err;
uint32_t speed_capa = 0;
if (supported_speed & HNS3_PHY_LINK_SPEED_10M_HD_BIT)
- speed_capa |= ETH_LINK_SPEED_10M_HD;
+ speed_capa |= RTE_ETH_LINK_SPEED_10M_HD;
if (supported_speed & HNS3_PHY_LINK_SPEED_10M_BIT)
- speed_capa |= ETH_LINK_SPEED_10M;
+ speed_capa |= RTE_ETH_LINK_SPEED_10M;
if (supported_speed & HNS3_PHY_LINK_SPEED_100M_HD_BIT)
- speed_capa |= ETH_LINK_SPEED_100M_HD;
+ speed_capa |= RTE_ETH_LINK_SPEED_100M_HD;
if (supported_speed & HNS3_PHY_LINK_SPEED_100M_BIT)
- speed_capa |= ETH_LINK_SPEED_100M;
+ speed_capa |= RTE_ETH_LINK_SPEED_100M;
if (supported_speed & HNS3_PHY_LINK_SPEED_1000M_BIT)
- speed_capa |= ETH_LINK_SPEED_1G;
+ speed_capa |= RTE_ETH_LINK_SPEED_1G;
return speed_capa;
}
uint32_t speed_capa = 0;
if (supported_speed & HNS3_FIBER_LINK_SPEED_1G_BIT)
- speed_capa |= ETH_LINK_SPEED_1G;
+ speed_capa |= RTE_ETH_LINK_SPEED_1G;
if (supported_speed & HNS3_FIBER_LINK_SPEED_10G_BIT)
- speed_capa |= ETH_LINK_SPEED_10G;
+ speed_capa |= RTE_ETH_LINK_SPEED_10G;
if (supported_speed & HNS3_FIBER_LINK_SPEED_25G_BIT)
- speed_capa |= ETH_LINK_SPEED_25G;
+ speed_capa |= RTE_ETH_LINK_SPEED_25G;
if (supported_speed & HNS3_FIBER_LINK_SPEED_40G_BIT)
- speed_capa |= ETH_LINK_SPEED_40G;
+ speed_capa |= RTE_ETH_LINK_SPEED_40G;
if (supported_speed & HNS3_FIBER_LINK_SPEED_50G_BIT)
- speed_capa |= ETH_LINK_SPEED_50G;
+ speed_capa |= RTE_ETH_LINK_SPEED_50G;
if (supported_speed & HNS3_FIBER_LINK_SPEED_100G_BIT)
- speed_capa |= ETH_LINK_SPEED_100G;
+ speed_capa |= RTE_ETH_LINK_SPEED_100G;
if (supported_speed & HNS3_FIBER_LINK_SPEED_200G_BIT)
- speed_capa |= ETH_LINK_SPEED_200G;
+ speed_capa |= RTE_ETH_LINK_SPEED_200G;
return speed_capa;
}
hns3_get_firber_port_speed_capa(mac->supported_speed);
if (mac->support_autoneg == 0)
- speed_capa |= ETH_LINK_SPEED_FIXED;
+ speed_capa |= RTE_ETH_LINK_SPEED_FIXED;
return speed_capa;
}
info->max_mac_addrs = HNS3_UC_MACADDR_NUM;
info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD;
info->max_lro_pkt_size = HNS3_MAX_LRO_SIZE;
- info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_SCTP_CKSUM |
- DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
- DEV_RX_OFFLOAD_KEEP_CRC |
- DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_VLAN_FILTER |
- DEV_RX_OFFLOAD_RSS_HASH |
- DEV_RX_OFFLOAD_TCP_LRO);
- info->tx_offload_capa = (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_SCTP_CKSUM |
- DEV_TX_OFFLOAD_MULTI_SEGS |
- DEV_TX_OFFLOAD_TCP_TSO |
- DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GRE_TNL_TSO |
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
- DEV_TX_OFFLOAD_MBUF_FAST_FREE |
+ info->rx_offload_capa = (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_KEEP_CRC |
+ RTE_ETH_RX_OFFLOAD_SCATTER |
+ RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+ RTE_ETH_RX_OFFLOAD_RSS_HASH |
+ RTE_ETH_RX_OFFLOAD_TCP_LRO);
+ info->tx_offload_capa = (RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+ RTE_ETH_TX_OFFLOAD_TCP_TSO |
+ RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE |
hns3_txvlan_cap_get(hw));
if (hns3_dev_get_support(hw, OUTER_UDP_CKSUM))
- info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
+ info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
if (hns3_dev_get_support(hw, INDEP_TXRX))
info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
if (hns3_dev_get_support(hw, PTP))
- info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP;
+ info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
info->rx_desc_lim = (struct rte_eth_desc_lim) {
.nb_max = HNS3_MAX_RING_DESC,
ret = hns3_update_link_info(eth_dev);
if (ret)
- hw->mac.link_status = ETH_LINK_DOWN;
+ hw->mac.link_status = RTE_ETH_LINK_DOWN;
return ret;
}
struct hns3_mac *mac = &hw->mac;
switch (mac->link_speed) {
- case ETH_SPEED_NUM_10M:
- case ETH_SPEED_NUM_100M:
- case ETH_SPEED_NUM_1G:
- case ETH_SPEED_NUM_10G:
- case ETH_SPEED_NUM_25G:
- case ETH_SPEED_NUM_40G:
- case ETH_SPEED_NUM_50G:
- case ETH_SPEED_NUM_100G:
- case ETH_SPEED_NUM_200G:
+ case RTE_ETH_SPEED_NUM_10M:
+ case RTE_ETH_SPEED_NUM_100M:
+ case RTE_ETH_SPEED_NUM_1G:
+ case RTE_ETH_SPEED_NUM_10G:
+ case RTE_ETH_SPEED_NUM_25G:
+ case RTE_ETH_SPEED_NUM_40G:
+ case RTE_ETH_SPEED_NUM_50G:
+ case RTE_ETH_SPEED_NUM_100G:
+ case RTE_ETH_SPEED_NUM_200G:
if (mac->link_status)
new_link->link_speed = mac->link_speed;
break;
default:
if (mac->link_status)
- new_link->link_speed = ETH_SPEED_NUM_UNKNOWN;
+ new_link->link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
break;
}
if (!mac->link_status)
- new_link->link_speed = ETH_SPEED_NUM_NONE;
+ new_link->link_speed = RTE_ETH_SPEED_NUM_NONE;
new_link->link_duplex = mac->link_duplex;
- new_link->link_status = mac->link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
+ new_link->link_status = mac->link_status ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
new_link->link_autoneg = mac->link_autoneg;
}
if (eth_dev->data->dev_started == 0) {
new_link.link_autoneg = mac->link_autoneg;
new_link.link_duplex = mac->link_duplex;
- new_link.link_speed = ETH_SPEED_NUM_NONE;
- new_link.link_status = ETH_LINK_DOWN;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+ new_link.link_status = RTE_ETH_LINK_DOWN;
goto out;
}
break;
}
- if (!wait_to_complete || mac->link_status == ETH_LINK_UP)
+ if (!wait_to_complete || mac->link_status == RTE_ETH_LINK_UP)
break;
rte_delay_ms(HNS3_LINK_CHECK_INTERVAL);
{
switch (speed_cmd) {
case HNS3_CFG_SPEED_10M:
- *speed = ETH_SPEED_NUM_10M;
+ *speed = RTE_ETH_SPEED_NUM_10M;
break;
case HNS3_CFG_SPEED_100M:
- *speed = ETH_SPEED_NUM_100M;
+ *speed = RTE_ETH_SPEED_NUM_100M;
break;
case HNS3_CFG_SPEED_1G:
- *speed = ETH_SPEED_NUM_1G;
+ *speed = RTE_ETH_SPEED_NUM_1G;
break;
case HNS3_CFG_SPEED_10G:
- *speed = ETH_SPEED_NUM_10G;
+ *speed = RTE_ETH_SPEED_NUM_10G;
break;
case HNS3_CFG_SPEED_25G:
- *speed = ETH_SPEED_NUM_25G;
+ *speed = RTE_ETH_SPEED_NUM_25G;
break;
case HNS3_CFG_SPEED_40G:
- *speed = ETH_SPEED_NUM_40G;
+ *speed = RTE_ETH_SPEED_NUM_40G;
break;
case HNS3_CFG_SPEED_50G:
- *speed = ETH_SPEED_NUM_50G;
+ *speed = RTE_ETH_SPEED_NUM_50G;
break;
case HNS3_CFG_SPEED_100G:
- *speed = ETH_SPEED_NUM_100G;
+ *speed = RTE_ETH_SPEED_NUM_100G;
break;
case HNS3_CFG_SPEED_200G:
- *speed = ETH_SPEED_NUM_200G;
+ *speed = RTE_ETH_SPEED_NUM_200G;
break;
default:
return -EINVAL;
hns3_set_bit(req->speed_dup, HNS3_CFG_DUPLEX_B, !!duplex ? 1 : 0);
switch (speed) {
- case ETH_SPEED_NUM_10M:
+ case RTE_ETH_SPEED_NUM_10M:
hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_10M);
break;
- case ETH_SPEED_NUM_100M:
+ case RTE_ETH_SPEED_NUM_100M:
hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100M);
break;
- case ETH_SPEED_NUM_1G:
+ case RTE_ETH_SPEED_NUM_1G:
hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_1G);
break;
- case ETH_SPEED_NUM_10G:
+ case RTE_ETH_SPEED_NUM_10G:
hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_10G);
break;
- case ETH_SPEED_NUM_25G:
+ case RTE_ETH_SPEED_NUM_25G:
hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_25G);
break;
- case ETH_SPEED_NUM_40G:
+ case RTE_ETH_SPEED_NUM_40G:
hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_40G);
break;
- case ETH_SPEED_NUM_50G:
+ case RTE_ETH_SPEED_NUM_50G:
hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_50G);
break;
- case ETH_SPEED_NUM_100G:
+ case RTE_ETH_SPEED_NUM_100G:
hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100G);
break;
- case ETH_SPEED_NUM_200G:
+ case RTE_ETH_SPEED_NUM_200G:
hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_200G);
break;
int ret;
pf->support_sfp_query = true;
- mac->link_duplex = ETH_LINK_FULL_DUPLEX;
+ mac->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
ret = hns3_cfg_mac_speed_dup_hw(hw, mac->link_speed, mac->link_duplex);
if (ret) {
PMD_INIT_LOG(ERR, "Config mac speed dup fail ret = %d", ret);
return ret;
}
- mac->link_status = ETH_LINK_DOWN;
+ mac->link_status = RTE_ETH_LINK_DOWN;
return hns3_config_mtu(hw, pf->mps);
}
* all packets coming in in the receiving direction.
*/
offloads = dev->data->dev_conf.rxmode.offloads;
- if (offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
+ if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
ret = hns3_enable_vlan_filter(hns, false);
if (ret) {
hns3_err(hw, "failed to enable promiscuous mode due to "
}
/* when promiscuous mode was disabled, restore the vlan filter status */
offloads = dev->data->dev_conf.rxmode.offloads;
- if (offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
+ if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
ret = hns3_enable_vlan_filter(hns, true);
if (ret) {
hns3_err(hw, "failed to disable promiscuous mode due to"
mac_info->supported_speed =
rte_le_to_cpu_32(resp->supported_speed);
mac_info->support_autoneg = resp->autoneg_ability;
- mac_info->link_autoneg = (resp->autoneg == 0) ? ETH_LINK_FIXED
- : ETH_LINK_AUTONEG;
+ mac_info->link_autoneg = (resp->autoneg == 0) ? RTE_ETH_LINK_FIXED
+ : RTE_ETH_LINK_AUTONEG;
} else {
mac_info->query_type = HNS3_DEFAULT_QUERY;
}
static uint8_t
hns3_check_speed_dup(uint8_t duplex, uint32_t speed)
{
- if (!(speed == ETH_SPEED_NUM_10M || speed == ETH_SPEED_NUM_100M))
- duplex = ETH_LINK_FULL_DUPLEX;
+ if (!(speed == RTE_ETH_SPEED_NUM_10M || speed == RTE_ETH_SPEED_NUM_100M))
+ duplex = RTE_ETH_LINK_FULL_DUPLEX;
return duplex;
}
return ret;
/* Do nothing if no SFP */
- if (mac_info.link_speed == ETH_SPEED_NUM_NONE)
+ if (mac_info.link_speed == RTE_ETH_SPEED_NUM_NONE)
return 0;
/*
/* Config full duplex for SFP */
return hns3_cfg_mac_speed_dup(hw, mac_info.link_speed,
- ETH_LINK_FULL_DUPLEX);
+ RTE_ETH_LINK_FULL_DUPLEX);
}
static void
hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_B, val);
/*
- * If DEV_RX_OFFLOAD_KEEP_CRC offload is set, MAC will not strip CRC
+ * If RTE_ETH_RX_OFFLOAD_KEEP_CRC offload is set, MAC will not strip CRC
* when receiving frames. Otherwise, CRC will be stripped.
*/
- if (hw->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ if (hw->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_STRIP_B, 0);
else
hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_STRIP_B, val);
ret = hns3_cmd_send(hw, &desc, 1);
if (ret) {
hns3_err(hw, "get link status cmd failed %d", ret);
- return ETH_LINK_DOWN;
+ return RTE_ETH_LINK_DOWN;
}
req = (struct hns3_link_status_cmd *)desc.data;
struct hns3_mac *mac = &hw->mac;
switch (mac->link_speed) {
- case ETH_SPEED_NUM_1G:
+ case RTE_ETH_SPEED_NUM_1G:
return HNS3_FIBER_LINK_SPEED_1G_BIT;
- case ETH_SPEED_NUM_10G:
+ case RTE_ETH_SPEED_NUM_10G:
return HNS3_FIBER_LINK_SPEED_10G_BIT;
- case ETH_SPEED_NUM_25G:
+ case RTE_ETH_SPEED_NUM_25G:
return HNS3_FIBER_LINK_SPEED_25G_BIT;
- case ETH_SPEED_NUM_40G:
+ case RTE_ETH_SPEED_NUM_40G:
return HNS3_FIBER_LINK_SPEED_40G_BIT;
- case ETH_SPEED_NUM_50G:
+ case RTE_ETH_SPEED_NUM_50G:
return HNS3_FIBER_LINK_SPEED_50G_BIT;
- case ETH_SPEED_NUM_100G:
+ case RTE_ETH_SPEED_NUM_100G:
return HNS3_FIBER_LINK_SPEED_100G_BIT;
- case ETH_SPEED_NUM_200G:
+ case RTE_ETH_SPEED_NUM_200G:
return HNS3_FIBER_LINK_SPEED_200G_BIT;
default:
hns3_warn(hw, "invalid speed %u Mbps.", mac->link_speed);
{
uint32_t speed_bit;
- switch (link_speeds & ~ETH_LINK_SPEED_FIXED) {
- case ETH_LINK_SPEED_10M:
+ switch (link_speeds & ~RTE_ETH_LINK_SPEED_FIXED) {
+ case RTE_ETH_LINK_SPEED_10M:
speed_bit = HNS3_PHY_LINK_SPEED_10M_BIT;
break;
- case ETH_LINK_SPEED_10M_HD:
+ case RTE_ETH_LINK_SPEED_10M_HD:
speed_bit = HNS3_PHY_LINK_SPEED_10M_HD_BIT;
break;
- case ETH_LINK_SPEED_100M:
+ case RTE_ETH_LINK_SPEED_100M:
speed_bit = HNS3_PHY_LINK_SPEED_100M_BIT;
break;
- case ETH_LINK_SPEED_100M_HD:
+ case RTE_ETH_LINK_SPEED_100M_HD:
speed_bit = HNS3_PHY_LINK_SPEED_100M_HD_BIT;
break;
- case ETH_LINK_SPEED_1G:
+ case RTE_ETH_LINK_SPEED_1G:
speed_bit = HNS3_PHY_LINK_SPEED_1000M_BIT;
break;
default:
{
uint32_t speed_bit;
- switch (link_speeds & ~ETH_LINK_SPEED_FIXED) {
- case ETH_LINK_SPEED_1G:
+ switch (link_speeds & ~RTE_ETH_LINK_SPEED_FIXED) {
+ case RTE_ETH_LINK_SPEED_1G:
speed_bit = HNS3_FIBER_LINK_SPEED_1G_BIT;
break;
- case ETH_LINK_SPEED_10G:
+ case RTE_ETH_LINK_SPEED_10G:
speed_bit = HNS3_FIBER_LINK_SPEED_10G_BIT;
break;
- case ETH_LINK_SPEED_25G:
+ case RTE_ETH_LINK_SPEED_25G:
speed_bit = HNS3_FIBER_LINK_SPEED_25G_BIT;
break;
- case ETH_LINK_SPEED_40G:
+ case RTE_ETH_LINK_SPEED_40G:
speed_bit = HNS3_FIBER_LINK_SPEED_40G_BIT;
break;
- case ETH_LINK_SPEED_50G:
+ case RTE_ETH_LINK_SPEED_50G:
speed_bit = HNS3_FIBER_LINK_SPEED_50G_BIT;
break;
- case ETH_LINK_SPEED_100G:
+ case RTE_ETH_LINK_SPEED_100G:
speed_bit = HNS3_FIBER_LINK_SPEED_100G_BIT;
break;
- case ETH_LINK_SPEED_200G:
+ case RTE_ETH_LINK_SPEED_200G:
speed_bit = HNS3_FIBER_LINK_SPEED_200G_BIT;
break;
default:
static inline uint32_t
hns3_get_link_speed(uint32_t link_speeds)
{
- uint32_t speed = ETH_SPEED_NUM_NONE;
-
- if (link_speeds & ETH_LINK_SPEED_10M ||
- link_speeds & ETH_LINK_SPEED_10M_HD)
- speed = ETH_SPEED_NUM_10M;
- if (link_speeds & ETH_LINK_SPEED_100M ||
- link_speeds & ETH_LINK_SPEED_100M_HD)
- speed = ETH_SPEED_NUM_100M;
- if (link_speeds & ETH_LINK_SPEED_1G)
- speed = ETH_SPEED_NUM_1G;
- if (link_speeds & ETH_LINK_SPEED_10G)
- speed = ETH_SPEED_NUM_10G;
- if (link_speeds & ETH_LINK_SPEED_25G)
- speed = ETH_SPEED_NUM_25G;
- if (link_speeds & ETH_LINK_SPEED_40G)
- speed = ETH_SPEED_NUM_40G;
- if (link_speeds & ETH_LINK_SPEED_50G)
- speed = ETH_SPEED_NUM_50G;
- if (link_speeds & ETH_LINK_SPEED_100G)
- speed = ETH_SPEED_NUM_100G;
- if (link_speeds & ETH_LINK_SPEED_200G)
- speed = ETH_SPEED_NUM_200G;
+ uint32_t speed = RTE_ETH_SPEED_NUM_NONE;
+
+ if (link_speeds & RTE_ETH_LINK_SPEED_10M ||
+ link_speeds & RTE_ETH_LINK_SPEED_10M_HD)
+ speed = RTE_ETH_SPEED_NUM_10M;
+ if (link_speeds & RTE_ETH_LINK_SPEED_100M ||
+ link_speeds & RTE_ETH_LINK_SPEED_100M_HD)
+ speed = RTE_ETH_SPEED_NUM_100M;
+ if (link_speeds & RTE_ETH_LINK_SPEED_1G)
+ speed = RTE_ETH_SPEED_NUM_1G;
+ if (link_speeds & RTE_ETH_LINK_SPEED_10G)
+ speed = RTE_ETH_SPEED_NUM_10G;
+ if (link_speeds & RTE_ETH_LINK_SPEED_25G)
+ speed = RTE_ETH_SPEED_NUM_25G;
+ if (link_speeds & RTE_ETH_LINK_SPEED_40G)
+ speed = RTE_ETH_SPEED_NUM_40G;
+ if (link_speeds & RTE_ETH_LINK_SPEED_50G)
+ speed = RTE_ETH_SPEED_NUM_50G;
+ if (link_speeds & RTE_ETH_LINK_SPEED_100G)
+ speed = RTE_ETH_SPEED_NUM_100G;
+ if (link_speeds & RTE_ETH_LINK_SPEED_200G)
+ speed = RTE_ETH_SPEED_NUM_200G;
return speed;
}
static uint8_t
hns3_get_link_duplex(uint32_t link_speeds)
{
- if ((link_speeds & ETH_LINK_SPEED_10M_HD) ||
- (link_speeds & ETH_LINK_SPEED_100M_HD))
- return ETH_LINK_HALF_DUPLEX;
+ if ((link_speeds & RTE_ETH_LINK_SPEED_10M_HD) ||
+ (link_speeds & RTE_ETH_LINK_SPEED_100M_HD))
+ return RTE_ETH_LINK_HALF_DUPLEX;
else
- return ETH_LINK_FULL_DUPLEX;
+ return RTE_ETH_LINK_FULL_DUPLEX;
}
static int
struct hns3_set_link_speed_cfg cfg;
memset(&cfg, 0, sizeof(struct hns3_set_link_speed_cfg));
- cfg.autoneg = (conf->link_speeds == ETH_LINK_SPEED_AUTONEG) ?
- ETH_LINK_AUTONEG : ETH_LINK_FIXED;
- if (cfg.autoneg != ETH_LINK_AUTONEG) {
+ cfg.autoneg = (conf->link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) ?
+ RTE_ETH_LINK_AUTONEG : RTE_ETH_LINK_FIXED;
+ if (cfg.autoneg != RTE_ETH_LINK_AUTONEG) {
cfg.speed = hns3_get_link_speed(conf->link_speeds);
cfg.duplex = hns3_get_link_duplex(conf->link_speeds);
}
ret = hns3_cfg_mac_mode(hw, false);
if (ret)
return ret;
- hw->mac.link_status = ETH_LINK_DOWN;
+ hw->mac.link_status = RTE_ETH_LINK_DOWN;
if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) {
hns3_configure_all_mac_addr(hns, true);
current_mode = hns3_get_current_fc_mode(dev);
switch (current_mode) {
case HNS3_FC_FULL:
- fc_conf->mode = RTE_FC_FULL;
+ fc_conf->mode = RTE_ETH_FC_FULL;
break;
case HNS3_FC_TX_PAUSE:
- fc_conf->mode = RTE_FC_TX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
break;
case HNS3_FC_RX_PAUSE:
- fc_conf->mode = RTE_FC_RX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
break;
case HNS3_FC_NONE:
default:
- fc_conf->mode = RTE_FC_NONE;
+ fc_conf->mode = RTE_ETH_FC_NONE;
break;
}
int i;
rte_spinlock_lock(&hw->lock);
- if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG)
+ if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_DCB_FLAG)
dcb_info->nb_tcs = pf->local_max_tc;
else
dcb_info->nb_tcs = 1;
struct rte_eth_dev *eth_dev;
eth_dev = &rte_eth_devices[hw->data->port_id];
- hw->mac.link_status = ETH_LINK_DOWN;
+ hw->mac.link_status = RTE_ETH_LINK_DOWN;
if (hw->adapter_state == HNS3_NIC_STARTED) {
rte_eal_alarm_cancel(hns3_service_handler, eth_dev);
hns3_update_linkstatus_and_event(hw, false);
* in device of link speed
* below 10 Gbps.
*/
- if (hw->mac.link_speed < ETH_SPEED_NUM_10G) {
+ if (hw->mac.link_speed < RTE_ETH_SPEED_NUM_10G) {
*state = 0;
return 0;
}
* configured FEC mode is returned.
* If link is up, current FEC mode is returned.
*/
- if (hw->mac.link_status == ETH_LINK_DOWN) {
+ if (hw->mac.link_status == RTE_ETH_LINK_DOWN) {
ret = get_current_fec_auto_state(hw, &auto_state);
if (ret)
return ret;
uint32_t cur_capa;
switch (mac->link_speed) {
- case ETH_SPEED_NUM_10G:
+ case RTE_ETH_SPEED_NUM_10G:
cur_capa = fec_capa[1].capa;
break;
- case ETH_SPEED_NUM_25G:
- case ETH_SPEED_NUM_100G:
- case ETH_SPEED_NUM_200G:
+ case RTE_ETH_SPEED_NUM_25G:
+ case RTE_ETH_SPEED_NUM_100G:
+ case RTE_ETH_SPEED_NUM_200G:
cur_capa = fec_capa[0].capa;
break;
default:
uint8_t mac_addr[RTE_ETHER_ADDR_LEN];
uint8_t media_type;
uint8_t phy_addr;
- uint8_t link_duplex : 1; /* ETH_LINK_[HALF/FULL]_DUPLEX */
- uint8_t link_autoneg : 1; /* ETH_LINK_[AUTONEG/FIXED] */
- uint8_t link_status : 1; /* ETH_LINK_[DOWN/UP] */
- uint32_t link_speed; /* ETH_SPEED_NUM_ */
+ uint8_t link_duplex : 1; /* RTE_ETH_LINK_[HALF/FULL]_DUPLEX */
+ uint8_t link_autoneg : 1; /* RTE_ETH_LINK_[AUTONEG/FIXED] */
+ uint8_t link_status : 1; /* RTE_ETH_LINK_[DOWN/UP] */
+ uint32_t link_speed; /* RTE_ETH_SPEED_NUM_ */
/*
* Some firmware versions support only the SFP speed query. In addition
* to the SFP speed query, some firmware supports the query of the speed
hns3_txvlan_cap_get(struct hns3_hw *hw)
{
if (hw->port_base_vlan_cfg.state)
- return DEV_TX_OFFLOAD_VLAN_INSERT;
+ return RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
else
- return DEV_TX_OFFLOAD_VLAN_INSERT | DEV_TX_OFFLOAD_QINQ_INSERT;
+ return RTE_ETH_TX_OFFLOAD_VLAN_INSERT | RTE_ETH_TX_OFFLOAD_QINQ_INSERT;
}
#endif /* _HNS3_ETHDEV_H_ */
}
hw->adapter_state = HNS3_NIC_CONFIGURING;
- if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
+ if (conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
hns3_err(hw, "setting link speed/duplex not supported");
ret = -EINVAL;
goto cfg_err;
}
/* When RSS is not configured, redirect the packet queue 0 */
- if ((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) {
- conf->rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+ if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
+ conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
hw->rss_dis_flag = false;
rss_conf = conf->rx_adv_conf.rss_conf;
ret = hns3_dev_rss_hash_update(dev, &rss_conf);
goto cfg_err;
/* config hardware GRO */
- gro_en = conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO ? true : false;
+ gro_en = conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO ? true : false;
ret = hns3_config_gro(hw, gro_en);
if (ret)
goto cfg_err;
info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD;
info->max_lro_pkt_size = HNS3_MAX_LRO_SIZE;
- info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_SCTP_CKSUM |
- DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
- DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_VLAN_FILTER |
- DEV_RX_OFFLOAD_RSS_HASH |
- DEV_RX_OFFLOAD_TCP_LRO);
- info->tx_offload_capa = (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_SCTP_CKSUM |
- DEV_TX_OFFLOAD_MULTI_SEGS |
- DEV_TX_OFFLOAD_TCP_TSO |
- DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GRE_TNL_TSO |
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
- DEV_TX_OFFLOAD_MBUF_FAST_FREE |
+ info->rx_offload_capa = (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_SCATTER |
+ RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+ RTE_ETH_RX_OFFLOAD_RSS_HASH |
+ RTE_ETH_RX_OFFLOAD_TCP_LRO);
+ info->tx_offload_capa = (RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+ RTE_ETH_TX_OFFLOAD_TCP_TSO |
+ RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE |
hns3_txvlan_cap_get(hw));
if (hns3_dev_get_support(hw, OUTER_UDP_CKSUM))
- info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
+ info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
if (hns3_dev_get_support(hw, INDEP_TXRX))
info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
tmp_mask = (unsigned int)mask;
- if (tmp_mask & ETH_VLAN_FILTER_MASK) {
+ if (tmp_mask & RTE_ETH_VLAN_FILTER_MASK) {
rte_spinlock_lock(&hw->lock);
/* Enable or disable VLAN filter */
- if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+ if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
ret = hns3vf_en_vlan_filter(hw, true);
else
ret = hns3vf_en_vlan_filter(hw, false);
}
/* Vlan stripping setting */
- if (tmp_mask & ETH_VLAN_STRIP_MASK) {
+ if (tmp_mask & RTE_ETH_VLAN_STRIP_MASK) {
rte_spinlock_lock(&hw->lock);
/* Enable or disable VLAN stripping */
- if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
ret = hns3vf_en_hw_strip_rxvtag(hw, true);
else
ret = hns3vf_en_hw_strip_rxvtag(hw, false);
int ret;
dev_conf = &hw->data->dev_conf;
- en = dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP ? true
+ en = dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP ? true
: false;
ret = hns3vf_en_hw_strip_rxvtag(hw, en);
if (ret)
}
/* Apply vlan offload setting */
- ret = hns3vf_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK |
- ETH_VLAN_FILTER_MASK);
+ ret = hns3vf_vlan_offload_set(dev, RTE_ETH_VLAN_STRIP_MASK |
+ RTE_ETH_VLAN_FILTER_MASK);
if (ret)
hns3_err(hw, "dev config vlan offload failed, ret = %d.", ret);
struct hns3_hw *hw = &hns->hw;
int ret;
- hw->mac.link_status = ETH_LINK_DOWN;
+ hw->mac.link_status = RTE_ETH_LINK_DOWN;
/*
* The "hns3vf_do_stop" function will also be called by .stop_service to
memset(&new_link, 0, sizeof(new_link));
switch (mac->link_speed) {
- case ETH_SPEED_NUM_10M:
- case ETH_SPEED_NUM_100M:
- case ETH_SPEED_NUM_1G:
- case ETH_SPEED_NUM_10G:
- case ETH_SPEED_NUM_25G:
- case ETH_SPEED_NUM_40G:
- case ETH_SPEED_NUM_50G:
- case ETH_SPEED_NUM_100G:
- case ETH_SPEED_NUM_200G:
+ case RTE_ETH_SPEED_NUM_10M:
+ case RTE_ETH_SPEED_NUM_100M:
+ case RTE_ETH_SPEED_NUM_1G:
+ case RTE_ETH_SPEED_NUM_10G:
+ case RTE_ETH_SPEED_NUM_25G:
+ case RTE_ETH_SPEED_NUM_40G:
+ case RTE_ETH_SPEED_NUM_50G:
+ case RTE_ETH_SPEED_NUM_100G:
+ case RTE_ETH_SPEED_NUM_200G:
if (mac->link_status)
new_link.link_speed = mac->link_speed;
break;
default:
if (mac->link_status)
- new_link.link_speed = ETH_SPEED_NUM_UNKNOWN;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
break;
}
if (!mac->link_status)
- new_link.link_speed = ETH_SPEED_NUM_NONE;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
new_link.link_duplex = mac->link_duplex;
- new_link.link_status = mac->link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
+ new_link.link_status = mac->link_status ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
new_link.link_autoneg =
- !(eth_dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED);
+ !(eth_dev->data->dev_conf.link_speeds & RTE_ETH_LINK_SPEED_FIXED);
return rte_eth_linkstatus_set(eth_dev, &new_link);
}
* Make sure call update link status before hns3vf_stop_poll_job
* because update link status depend on polling job exist.
*/
- hns3vf_update_link_status(hw, ETH_LINK_DOWN, hw->mac.link_speed,
+ hns3vf_update_link_status(hw, RTE_ETH_LINK_DOWN, hw->mac.link_speed,
hw->mac.link_duplex);
hns3vf_stop_poll_job(eth_dev);
}
- hw->mac.link_status = ETH_LINK_DOWN;
+ hw->mac.link_status = RTE_ETH_LINK_DOWN;
hns3_set_rxtx_function(eth_dev);
rte_wmb();
* Kunpeng930 and future kunpeng series support to use src/dst port
* fields to RSS hash for IPv6 SCTP packet type.
*/
- if (rss->types & (ETH_RSS_L4_DST_ONLY | ETH_RSS_L4_SRC_ONLY) &&
- (rss->types & ETH_RSS_IP ||
+ if (rss->types & (RTE_ETH_RSS_L4_DST_ONLY | RTE_ETH_RSS_L4_SRC_ONLY) &&
+ (rss->types & RTE_ETH_RSS_IP ||
(!hw->rss_info.ipv6_sctp_offload_supported &&
- rss->types & ETH_RSS_NONFRAG_IPV6_SCTP)))
+ rss->types & RTE_ETH_RSS_NONFRAG_IPV6_SCTP)))
return false;
return true;
struct hns3_hw *hw = &hns->hw;
int ret;
- if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+ if (!(conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
return 0;
ret = rte_mbuf_dyn_rx_timestamp_register
uint64_t rss_types;
uint64_t rss_field;
} hns3_set_tuple_table[] = {
- { ETH_RSS_FRAG_IPV4 | ETH_RSS_L3_SRC_ONLY,
+ { RTE_ETH_RSS_FRAG_IPV4 | RTE_ETH_RSS_L3_SRC_ONLY,
BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_S) },
- { ETH_RSS_FRAG_IPV4 | ETH_RSS_L3_DST_ONLY,
+ { RTE_ETH_RSS_FRAG_IPV4 | RTE_ETH_RSS_L3_DST_ONLY,
BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_D) },
- { ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_SRC_ONLY,
+ { RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_L3_SRC_ONLY,
BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_S) },
- { ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_DST_ONLY,
+ { RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_L3_DST_ONLY,
BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_D) },
- { ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L4_SRC_ONLY,
+ { RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_L4_SRC_ONLY,
BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_S) },
- { ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L4_DST_ONLY,
+ { RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_L4_DST_ONLY,
BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_D) },
- { ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_SRC_ONLY,
+ { RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_L3_SRC_ONLY,
BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_S) },
- { ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_DST_ONLY,
+ { RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_L3_DST_ONLY,
BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_D) },
- { ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L4_SRC_ONLY,
+ { RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_L4_SRC_ONLY,
BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_S) },
- { ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L4_DST_ONLY,
+ { RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_L4_DST_ONLY,
BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_D) },
- { ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_SRC_ONLY,
+ { RTE_ETH_RSS_NONFRAG_IPV4_SCTP | RTE_ETH_RSS_L3_SRC_ONLY,
BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_S) },
- { ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_DST_ONLY,
+ { RTE_ETH_RSS_NONFRAG_IPV4_SCTP | RTE_ETH_RSS_L3_DST_ONLY,
BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_D) },
- { ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L4_SRC_ONLY,
+ { RTE_ETH_RSS_NONFRAG_IPV4_SCTP | RTE_ETH_RSS_L4_SRC_ONLY,
BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_S) },
- { ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L4_DST_ONLY,
+ { RTE_ETH_RSS_NONFRAG_IPV4_SCTP | RTE_ETH_RSS_L4_DST_ONLY,
BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_D) },
- { ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L3_SRC_ONLY,
+ { RTE_ETH_RSS_NONFRAG_IPV4_OTHER | RTE_ETH_RSS_L3_SRC_ONLY,
BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_S) },
- { ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L3_DST_ONLY,
+ { RTE_ETH_RSS_NONFRAG_IPV4_OTHER | RTE_ETH_RSS_L3_DST_ONLY,
BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_D) },
- { ETH_RSS_FRAG_IPV6 | ETH_RSS_L3_SRC_ONLY,
+ { RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_L3_SRC_ONLY,
BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_S) },
- { ETH_RSS_FRAG_IPV6 | ETH_RSS_L3_DST_ONLY,
+ { RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_L3_DST_ONLY,
BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_D) },
- { ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_SRC_ONLY,
+ { RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_L3_SRC_ONLY,
BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_S) },
- { ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_DST_ONLY,
+ { RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_L3_DST_ONLY,
BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_D) },
- { ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L4_SRC_ONLY,
+ { RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_L4_SRC_ONLY,
BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_S) },
- { ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L4_DST_ONLY,
+ { RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_L4_DST_ONLY,
BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_D) },
- { ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_SRC_ONLY,
+ { RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_L3_SRC_ONLY,
BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_S) },
- { ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_DST_ONLY,
+ { RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_L3_DST_ONLY,
BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_D) },
- { ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L4_SRC_ONLY,
+ { RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_L4_SRC_ONLY,
BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_S) },
- { ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L4_DST_ONLY,
+ { RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_L4_DST_ONLY,
BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_D) },
- { ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_SRC_ONLY,
+ { RTE_ETH_RSS_NONFRAG_IPV6_SCTP | RTE_ETH_RSS_L3_SRC_ONLY,
BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_S) },
- { ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_DST_ONLY,
+ { RTE_ETH_RSS_NONFRAG_IPV6_SCTP | RTE_ETH_RSS_L3_DST_ONLY,
BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_D) },
- { ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L4_SRC_ONLY,
+ { RTE_ETH_RSS_NONFRAG_IPV6_SCTP | RTE_ETH_RSS_L4_SRC_ONLY,
BIT_ULL(HNS3_RSS_FILED_IPV6_SCTP_EN_SCTP_S) },
- { ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L4_DST_ONLY,
+ { RTE_ETH_RSS_NONFRAG_IPV6_SCTP | RTE_ETH_RSS_L4_DST_ONLY,
BIT_ULL(HNS3_RSS_FILED_IPV6_SCTP_EN_SCTP_D) },
- { ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L3_SRC_ONLY,
+ { RTE_ETH_RSS_NONFRAG_IPV6_OTHER | RTE_ETH_RSS_L3_SRC_ONLY,
BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_S) },
- { ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L3_DST_ONLY,
+ { RTE_ETH_RSS_NONFRAG_IPV6_OTHER | RTE_ETH_RSS_L3_DST_ONLY,
BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_D) },
};
uint64_t rss_types;
uint64_t rss_field;
} hns3_set_rss_types[] = {
- { ETH_RSS_FRAG_IPV4, BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_D) |
+ { RTE_ETH_RSS_FRAG_IPV4, BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_D) |
BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_S) },
- { ETH_RSS_NONFRAG_IPV4_TCP, BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_S) |
+ { RTE_ETH_RSS_NONFRAG_IPV4_TCP, BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_S) |
BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_D) |
BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_S) |
BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_D) },
- { ETH_RSS_NONFRAG_IPV4_TCP, BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_S) |
+ { RTE_ETH_RSS_NONFRAG_IPV4_TCP, BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_S) |
BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_D) |
BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_S) |
BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_D) },
- { ETH_RSS_NONFRAG_IPV4_UDP, BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_S) |
+ { RTE_ETH_RSS_NONFRAG_IPV4_UDP, BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_S) |
BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_D) |
BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_S) |
BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_D) },
- { ETH_RSS_NONFRAG_IPV4_SCTP, BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_S) |
+ { RTE_ETH_RSS_NONFRAG_IPV4_SCTP, BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_S) |
BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_D) |
BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_S) |
BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_D) |
BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_VER) },
- { ETH_RSS_NONFRAG_IPV4_OTHER,
+ { RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_S) |
BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_D) },
- { ETH_RSS_FRAG_IPV6, BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_S) |
+ { RTE_ETH_RSS_FRAG_IPV6, BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_S) |
BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_D) },
- { ETH_RSS_NONFRAG_IPV6_TCP, BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_S) |
+ { RTE_ETH_RSS_NONFRAG_IPV6_TCP, BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_S) |
BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_D) |
BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_S) |
BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_D) },
- { ETH_RSS_NONFRAG_IPV6_UDP, BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_S) |
+ { RTE_ETH_RSS_NONFRAG_IPV6_UDP, BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_S) |
BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_D) |
BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_S) |
BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_D) },
- { ETH_RSS_NONFRAG_IPV6_SCTP, BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_S) |
+ { RTE_ETH_RSS_NONFRAG_IPV6_SCTP, BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_S) |
BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_D) |
BIT_ULL(HNS3_RSS_FILED_IPV6_SCTP_EN_SCTP_D) |
BIT_ULL(HNS3_RSS_FILED_IPV6_SCTP_EN_SCTP_S) |
BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_VER) },
- { ETH_RSS_NONFRAG_IPV6_OTHER,
+ { RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_S) |
BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_D) }
};
* When user does not specify the following types or a combination of
* the following types, it enables all fields for the supported RSS
* types. the following types as:
- * - ETH_RSS_L3_SRC_ONLY
- * - ETH_RSS_L3_DST_ONLY
- * - ETH_RSS_L4_SRC_ONLY
- * - ETH_RSS_L4_DST_ONLY
+ * - RTE_ETH_RSS_L3_SRC_ONLY
+ * - RTE_ETH_RSS_L3_DST_ONLY
+ * - RTE_ETH_RSS_L4_SRC_ONLY
+ * - RTE_ETH_RSS_L4_DST_ONLY
*/
if (fields_count == 0) {
for (i = 0; i < RTE_DIM(hns3_set_rss_types); i++) {
memcpy(indirection_tbl, rss_cfg->rss_indirection_tbl,
sizeof(rss_cfg->rss_indirection_tbl));
for (i = 0; i < reta_size; i++) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
if (reta_conf[idx].reta[shift] >= hw->alloc_rss_size) {
rte_spinlock_unlock(&hw->lock);
hns3_err(hw, "queue id(%u) set to redirection table "
}
rte_spinlock_lock(&hw->lock);
for (i = 0; i < reta_size; i++) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
if (reta_conf[idx].mask & (1ULL << shift))
reta_conf[idx].reta[shift] =
rss_cfg->rss_indirection_tbl[i];
}
/* When RSS is off, redirect the packet queue 0 */
- if (((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) == 0)
+ if (((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) == 0)
hns3_rss_uninit(hns);
/* Configure RSS hash algorithm and hash key offset */
* When RSS is off, it doesn't need to configure rss redirection table
* to hardware.
*/
- if (((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG)) {
+ if (((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) {
ret = hns3_set_rss_indir_table(hw, rss_cfg->rss_indirection_tbl,
hw->rss_ind_tbl_size);
if (ret)
return ret;
rss_indir_table_uninit:
- if (((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG)) {
+ if (((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) {
ret1 = hns3_rss_reset_indir_table(hw);
if (ret1 != 0)
return ret;
#include <rte_flow.h>
#define HNS3_ETH_RSS_SUPPORT ( \
- ETH_RSS_FRAG_IPV4 | \
- ETH_RSS_NONFRAG_IPV4_TCP | \
- ETH_RSS_NONFRAG_IPV4_UDP | \
- ETH_RSS_NONFRAG_IPV4_SCTP | \
- ETH_RSS_NONFRAG_IPV4_OTHER | \
- ETH_RSS_FRAG_IPV6 | \
- ETH_RSS_NONFRAG_IPV6_TCP | \
- ETH_RSS_NONFRAG_IPV6_UDP | \
- ETH_RSS_NONFRAG_IPV6_SCTP | \
- ETH_RSS_NONFRAG_IPV6_OTHER | \
- ETH_RSS_L3_SRC_ONLY | \
- ETH_RSS_L3_DST_ONLY | \
- ETH_RSS_L4_SRC_ONLY | \
- ETH_RSS_L4_DST_ONLY)
+ RTE_ETH_RSS_FRAG_IPV4 | \
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
+ RTE_ETH_RSS_FRAG_IPV6 | \
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
+ RTE_ETH_RSS_L3_SRC_ONLY | \
+ RTE_ETH_RSS_L3_DST_ONLY | \
+ RTE_ETH_RSS_L4_SRC_ONLY | \
+ RTE_ETH_RSS_L4_DST_ONLY)
#define HNS3_RSS_IND_TBL_SIZE 512 /* The size of hash lookup table */
#define HNS3_RSS_IND_TBL_SIZE_MAX 2048
memset(&rxq->dfx_stats, 0, sizeof(struct hns3_rx_dfx_stats));
/* CRC len set here is used for amending packet length */
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
rxq->crc_len = RTE_ETHER_CRC_LEN;
else
rxq->crc_len = 0;
rxq->rx_buf_len);
}
- if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_SCATTER ||
+ if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER ||
dev->data->mtu + HNS3_ETH_OVERHEAD > hw->rx_buf_len)
dev->data->scattered_rx = true;
}
vec_allowed = vec_support && hns3_get_default_vec_support();
sve_allowed = vec_support && hns3_get_sve_support();
simple_allowed = !dev->data->scattered_rx &&
- (offloads & DEV_RX_OFFLOAD_TCP_LRO) == 0;
+ (offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) == 0;
if (hns->rx_func_hint == HNS3_IO_FUNC_HINT_VEC && vec_allowed)
return hns3_recv_pkts_vec;
int ret;
offloads = hw->data->dev_conf.rxmode.offloads;
- gro_en = offloads & DEV_RX_OFFLOAD_TCP_LRO ? true : false;
+ gro_en = offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO ? true : false;
ret = hns3_config_gro(hw, gro_en);
if (ret)
hns3_err(hw, "restore hardware GRO to %s failed, ret = %d",
if (hns3_dev_get_support(hw, PTP))
return false;
- return (offloads == (offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE));
+ return (offloads == (offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE));
}
static bool
return true;
#else
#define HNS3_DEV_TX_CSKUM_TSO_OFFLOAD_MASK (\
- DEV_TX_OFFLOAD_IPV4_CKSUM | \
- DEV_TX_OFFLOAD_TCP_CKSUM | \
- DEV_TX_OFFLOAD_UDP_CKSUM | \
- DEV_TX_OFFLOAD_SCTP_CKSUM | \
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
- DEV_TX_OFFLOAD_OUTER_UDP_CKSUM | \
- DEV_TX_OFFLOAD_TCP_TSO | \
- DEV_TX_OFFLOAD_VXLAN_TNL_TSO | \
- DEV_TX_OFFLOAD_GRE_TNL_TSO | \
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO)
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_TCP_TSO | \
+ RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | \
+ RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | \
+ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO)
uint64_t tx_offload = dev->data->dev_conf.txmode.offloads;
if (tx_offload & HNS3_DEV_TX_CSKUM_TSO_OFFLOAD_MASK)
uint16_t rx_rearm_start; /* index of BD that driver re-arming from */
uint16_t rx_rearm_nb; /* number of remaining BDs to be re-armed */
- /* 4 if DEV_RX_OFFLOAD_KEEP_CRC offload set, 0 otherwise */
+ /* 4 if RTE_ETH_RX_OFFLOAD_KEEP_CRC offload set, 0 otherwise */
uint8_t crc_len;
/*
if (hns3_dev_get_support(hw, PTP))
return -ENOTSUP;
- /* Only support DEV_TX_OFFLOAD_MBUF_FAST_FREE */
- if (txmode->offloads != DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ /* Only support RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE */
+ if (txmode->offloads != RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
return -ENOTSUP;
return 0;
int
hns3_rx_check_vec_support(struct rte_eth_dev *dev)
{
- struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
+ struct rte_eth_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
- uint64_t offloads_mask = DEV_RX_OFFLOAD_TCP_LRO |
- DEV_RX_OFFLOAD_VLAN;
+ uint64_t offloads_mask = RTE_ETH_RX_OFFLOAD_TCP_LRO |
+ RTE_ETH_RX_OFFLOAD_VLAN;
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
if (hns3_dev_get_support(hw, PTP))
/* Set the global registers with default ether type value */
if (!pf->support_multi_driver) {
- ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
+ ret = i40e_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_OUTER,
RTE_ETHER_TYPE_VLAN);
if (ret != I40E_SUCCESS) {
PMD_INIT_LOG(ERR,
ad->tx_simple_allowed = true;
ad->tx_vec_allowed = true;
- if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
- dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+ if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+ dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
/* Only legacy filter API needs the following fdir config. So when the
* legacy filter API is deprecated, the following codes should also be
* number, which will be available after rx_queue_setup(). dev_start()
* function is good to place RSS setup.
*/
- if (mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
+ if (mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG) {
ret = i40e_vmdq_setup(dev);
if (ret)
goto err;
}
- if (mq_mode & ETH_MQ_RX_DCB_FLAG) {
+ if (mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) {
ret = i40e_dcb_setup(dev);
if (ret) {
PMD_DRV_LOG(ERR, "failed to configure DCB.");
{
uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN;
- if (link_speeds & ETH_LINK_SPEED_40G)
+ if (link_speeds & RTE_ETH_LINK_SPEED_40G)
link_speed |= I40E_LINK_SPEED_40GB;
- if (link_speeds & ETH_LINK_SPEED_25G)
+ if (link_speeds & RTE_ETH_LINK_SPEED_25G)
link_speed |= I40E_LINK_SPEED_25GB;
- if (link_speeds & ETH_LINK_SPEED_20G)
+ if (link_speeds & RTE_ETH_LINK_SPEED_20G)
link_speed |= I40E_LINK_SPEED_20GB;
- if (link_speeds & ETH_LINK_SPEED_10G)
+ if (link_speeds & RTE_ETH_LINK_SPEED_10G)
link_speed |= I40E_LINK_SPEED_10GB;
- if (link_speeds & ETH_LINK_SPEED_1G)
+ if (link_speeds & RTE_ETH_LINK_SPEED_1G)
link_speed |= I40E_LINK_SPEED_1GB;
- if (link_speeds & ETH_LINK_SPEED_100M)
+ if (link_speeds & RTE_ETH_LINK_SPEED_100M)
link_speed |= I40E_LINK_SPEED_100MB;
return link_speed;
abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK |
I40E_AQ_PHY_LINK_ENABLED;
- if (conf->link_speeds == ETH_LINK_SPEED_AUTONEG) {
- conf->link_speeds = ETH_LINK_SPEED_40G |
- ETH_LINK_SPEED_25G |
- ETH_LINK_SPEED_20G |
- ETH_LINK_SPEED_10G |
- ETH_LINK_SPEED_1G |
- ETH_LINK_SPEED_100M;
+ if (conf->link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
+ conf->link_speeds = RTE_ETH_LINK_SPEED_40G |
+ RTE_ETH_LINK_SPEED_25G |
+ RTE_ETH_LINK_SPEED_20G |
+ RTE_ETH_LINK_SPEED_10G |
+ RTE_ETH_LINK_SPEED_1G |
+ RTE_ETH_LINK_SPEED_100M;
abilities |= I40E_AQ_PHY_AN_ENABLED;
} else {
/* Parse the link status */
switch (link_speed) {
case I40E_REG_SPEED_0:
- link->link_speed = ETH_SPEED_NUM_100M;
+ link->link_speed = RTE_ETH_SPEED_NUM_100M;
break;
case I40E_REG_SPEED_1:
- link->link_speed = ETH_SPEED_NUM_1G;
+ link->link_speed = RTE_ETH_SPEED_NUM_1G;
break;
case I40E_REG_SPEED_2:
if (hw->mac.type == I40E_MAC_X722)
- link->link_speed = ETH_SPEED_NUM_2_5G;
+ link->link_speed = RTE_ETH_SPEED_NUM_2_5G;
else
- link->link_speed = ETH_SPEED_NUM_10G;
+ link->link_speed = RTE_ETH_SPEED_NUM_10G;
break;
case I40E_REG_SPEED_3:
if (hw->mac.type == I40E_MAC_X722) {
- link->link_speed = ETH_SPEED_NUM_5G;
+ link->link_speed = RTE_ETH_SPEED_NUM_5G;
} else {
reg_val = I40E_READ_REG(hw, I40E_PRTMAC_MACC);
if (reg_val & I40E_REG_MACC_25GB)
- link->link_speed = ETH_SPEED_NUM_25G;
+ link->link_speed = RTE_ETH_SPEED_NUM_25G;
else
- link->link_speed = ETH_SPEED_NUM_40G;
+ link->link_speed = RTE_ETH_SPEED_NUM_40G;
}
break;
case I40E_REG_SPEED_4:
if (hw->mac.type == I40E_MAC_X722)
- link->link_speed = ETH_SPEED_NUM_10G;
+ link->link_speed = RTE_ETH_SPEED_NUM_10G;
else
- link->link_speed = ETH_SPEED_NUM_20G;
+ link->link_speed = RTE_ETH_SPEED_NUM_20G;
break;
default:
PMD_DRV_LOG(ERR, "Unknown link speed info %u", link_speed);
status = i40e_aq_get_link_info(hw, enable_lse,
&link_status, NULL);
if (unlikely(status != I40E_SUCCESS)) {
- link->link_speed = ETH_SPEED_NUM_NONE;
- link->link_duplex = ETH_LINK_FULL_DUPLEX;
+ link->link_speed = RTE_ETH_SPEED_NUM_NONE;
+ link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
PMD_DRV_LOG(ERR, "Failed to get link info");
return;
}
/* Parse the link status */
switch (link_status.link_speed) {
case I40E_LINK_SPEED_100MB:
- link->link_speed = ETH_SPEED_NUM_100M;
+ link->link_speed = RTE_ETH_SPEED_NUM_100M;
break;
case I40E_LINK_SPEED_1GB:
- link->link_speed = ETH_SPEED_NUM_1G;
+ link->link_speed = RTE_ETH_SPEED_NUM_1G;
break;
case I40E_LINK_SPEED_10GB:
- link->link_speed = ETH_SPEED_NUM_10G;
+ link->link_speed = RTE_ETH_SPEED_NUM_10G;
break;
case I40E_LINK_SPEED_20GB:
- link->link_speed = ETH_SPEED_NUM_20G;
+ link->link_speed = RTE_ETH_SPEED_NUM_20G;
break;
case I40E_LINK_SPEED_25GB:
- link->link_speed = ETH_SPEED_NUM_25G;
+ link->link_speed = RTE_ETH_SPEED_NUM_25G;
break;
case I40E_LINK_SPEED_40GB:
- link->link_speed = ETH_SPEED_NUM_40G;
+ link->link_speed = RTE_ETH_SPEED_NUM_40G;
break;
default:
if (link->link_status)
- link->link_speed = ETH_SPEED_NUM_UNKNOWN;
+ link->link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
else
- link->link_speed = ETH_SPEED_NUM_NONE;
+ link->link_speed = RTE_ETH_SPEED_NUM_NONE;
break;
}
}
memset(&link, 0, sizeof(link));
/* i40e uses full duplex only */
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
link.link_autoneg = !(dev->data->dev_conf.link_speeds &
- ETH_LINK_SPEED_FIXED);
+ RTE_ETH_LINK_SPEED_FIXED);
if (!wait_to_complete && !enable_lse)
update_link_reg(hw, &link);
dev_info->min_mtu = RTE_ETHER_MIN_MTU;
dev_info->rx_queue_offload_capa = 0;
dev_info->rx_offload_capa =
- DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_QINQ_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_KEEP_CRC |
- DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_VLAN_EXTEND |
- DEV_RX_OFFLOAD_VLAN_FILTER |
- DEV_RX_OFFLOAD_RSS_HASH;
-
- dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+ RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+ RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_KEEP_CRC |
+ RTE_ETH_RX_OFFLOAD_SCATTER |
+ RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+ RTE_ETH_RX_OFFLOAD_RSS_HASH;
+
+ dev_info->tx_queue_offload_capa = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
dev_info->tx_offload_capa =
- DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_QINQ_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_SCTP_CKSUM |
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO |
- DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GRE_TNL_TSO |
- DEV_TX_OFFLOAD_IPIP_TNL_TSO |
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
- DEV_TX_OFFLOAD_MULTI_SEGS |
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+ RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_TSO |
+ RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
dev_info->tx_queue_offload_capa;
dev_info->dev_capa =
RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
/* For XL710 */
- dev_info->speed_capa = ETH_LINK_SPEED_40G;
+ dev_info->speed_capa = RTE_ETH_LINK_SPEED_40G;
dev_info->default_rxportconf.nb_queues = 2;
dev_info->default_txportconf.nb_queues = 2;
if (dev->data->nb_rx_queues == 1)
} else if (I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) {
/* For XXV710 */
- dev_info->speed_capa = ETH_LINK_SPEED_25G;
+ dev_info->speed_capa = RTE_ETH_LINK_SPEED_25G;
dev_info->default_rxportconf.nb_queues = 1;
dev_info->default_txportconf.nb_queues = 1;
dev_info->default_rxportconf.ring_size = 256;
dev_info->default_txportconf.ring_size = 256;
} else {
/* For X710 */
- dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
+ dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G;
dev_info->default_rxportconf.nb_queues = 1;
dev_info->default_txportconf.nb_queues = 1;
- if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_10G) {
+ if (dev->data->dev_conf.link_speeds & RTE_ETH_LINK_SPEED_10G) {
dev_info->default_rxportconf.ring_size = 512;
dev_info->default_txportconf.ring_size = 256;
} else {
int ret;
if (qinq) {
- if (vlan_type == ETH_VLAN_TYPE_OUTER)
+ if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER)
reg_id = 2;
}
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
int qinq = dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_VLAN_EXTEND;
+ RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
int ret = 0;
- if ((vlan_type != ETH_VLAN_TYPE_INNER &&
- vlan_type != ETH_VLAN_TYPE_OUTER) ||
- (!qinq && vlan_type == ETH_VLAN_TYPE_INNER)) {
+ if ((vlan_type != RTE_ETH_VLAN_TYPE_INNER &&
+ vlan_type != RTE_ETH_VLAN_TYPE_OUTER) ||
+ (!qinq && vlan_type == RTE_ETH_VLAN_TYPE_INNER)) {
PMD_DRV_LOG(ERR,
"Unsupported vlan type.");
return -EINVAL;
/* 802.1ad frames ability is added in NVM API 1.7*/
if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
if (qinq) {
- if (vlan_type == ETH_VLAN_TYPE_OUTER)
+ if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER)
hw->first_tag = rte_cpu_to_le_16(tpid);
- else if (vlan_type == ETH_VLAN_TYPE_INNER)
+ else if (vlan_type == RTE_ETH_VLAN_TYPE_INNER)
hw->second_tag = rte_cpu_to_le_16(tpid);
} else {
- if (vlan_type == ETH_VLAN_TYPE_OUTER)
+ if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER)
hw->second_tag = rte_cpu_to_le_16(tpid);
}
ret = i40e_aq_set_switch_config(hw, 0, 0, 0, NULL);
struct rte_eth_rxmode *rxmode;
rxmode = &dev->data->dev_conf.rxmode;
- if (mask & ETH_VLAN_FILTER_MASK) {
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+ if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
i40e_vsi_config_vlan_filter(vsi, TRUE);
else
i40e_vsi_config_vlan_filter(vsi, FALSE);
}
- if (mask & ETH_VLAN_STRIP_MASK) {
+ if (mask & RTE_ETH_VLAN_STRIP_MASK) {
/* Enable or disable VLAN stripping */
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
i40e_vsi_config_vlan_stripping(vsi, TRUE);
else
i40e_vsi_config_vlan_stripping(vsi, FALSE);
}
- if (mask & ETH_VLAN_EXTEND_MASK) {
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
+ if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) {
i40e_vsi_config_double_vlan(vsi, TRUE);
/* Set global registers with default ethertype. */
- i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
+ i40e_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_OUTER,
RTE_ETHER_TYPE_VLAN);
- i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER,
+ i40e_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_INNER,
RTE_ETHER_TYPE_VLAN);
}
else
i40e_vsi_config_double_vlan(vsi, FALSE);
}
- if (mask & ETH_QINQ_STRIP_MASK) {
+ if (mask & RTE_ETH_QINQ_STRIP_MASK) {
/* Enable or disable outer VLAN stripping */
- if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
i40e_vsi_config_outer_vlan_stripping(vsi, TRUE);
else
i40e_vsi_config_outer_vlan_stripping(vsi, FALSE);
/* Return current mode according to actual setting*/
switch (hw->fc.current_mode) {
case I40E_FC_FULL:
- fc_conf->mode = RTE_FC_FULL;
+ fc_conf->mode = RTE_ETH_FC_FULL;
break;
case I40E_FC_TX_PAUSE:
- fc_conf->mode = RTE_FC_TX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
break;
case I40E_FC_RX_PAUSE:
- fc_conf->mode = RTE_FC_RX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
break;
case I40E_FC_NONE:
default:
- fc_conf->mode = RTE_FC_NONE;
+ fc_conf->mode = RTE_ETH_FC_NONE;
};
return 0;
struct i40e_hw *hw;
struct i40e_pf *pf;
enum i40e_fc_mode rte_fcmode_2_i40e_fcmode[] = {
- [RTE_FC_NONE] = I40E_FC_NONE,
- [RTE_FC_RX_PAUSE] = I40E_FC_RX_PAUSE,
- [RTE_FC_TX_PAUSE] = I40E_FC_TX_PAUSE,
- [RTE_FC_FULL] = I40E_FC_FULL
+ [RTE_ETH_FC_NONE] = I40E_FC_NONE,
+ [RTE_ETH_FC_RX_PAUSE] = I40E_FC_RX_PAUSE,
+ [RTE_ETH_FC_TX_PAUSE] = I40E_FC_TX_PAUSE,
+ [RTE_ETH_FC_FULL] = I40E_FC_FULL
};
/* high_water field in the rte_eth_fc_conf using the kilobytes unit */
}
rte_memcpy(&mac_filter.mac_addr, mac_addr, RTE_ETHER_ADDR_LEN);
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
mac_filter.filter_type = I40E_MACVLAN_PERFECT_MATCH;
else
mac_filter.filter_type = I40E_MAC_PERFECT_MATCH;
int ret;
if (reta_size != lut_size ||
- reta_size > ETH_RSS_RETA_SIZE_512) {
+ reta_size > RTE_ETH_RSS_RETA_SIZE_512) {
PMD_DRV_LOG(ERR,
"The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
reta_size, lut_size);
if (ret)
goto out;
for (i = 0; i < reta_size; i++) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
if (reta_conf[idx].mask & (1ULL << shift))
lut[i] = reta_conf[idx].reta[shift];
}
int ret;
if (reta_size != lut_size ||
- reta_size > ETH_RSS_RETA_SIZE_512) {
+ reta_size > RTE_ETH_RSS_RETA_SIZE_512) {
PMD_DRV_LOG(ERR,
"The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
reta_size, lut_size);
if (ret)
goto out;
for (i = 0; i < reta_size; i++) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
if (reta_conf[idx].mask & (1ULL << shift))
reta_conf[idx].reta[shift] = lut[i];
}
pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
hw->func_caps.num_vsis - vsi_count);
pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
- ETH_64_POOLS);
+ RTE_ETH_64_POOLS);
if (pf->max_nb_vmdq_vsi) {
pf->flags |= I40E_FLAG_VMDQ;
pf->vmdq_nb_qps = pf->vmdq_nb_qp_max;
int mask = 0;
/* Apply vlan offload setting */
- mask = ETH_VLAN_STRIP_MASK |
- ETH_QINQ_STRIP_MASK |
- ETH_VLAN_FILTER_MASK |
- ETH_VLAN_EXTEND_MASK;
+ mask = RTE_ETH_VLAN_STRIP_MASK |
+ RTE_ETH_QINQ_STRIP_MASK |
+ RTE_ETH_VLAN_FILTER_MASK |
+ RTE_ETH_VLAN_EXTEND_MASK;
ret = i40e_vlan_offload_set(dev, mask);
if (ret) {
PMD_DRV_LOG(INFO, "Failed to update vlan offload");
/* Configure filter control */
memset(&settings, 0, sizeof(settings));
- if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_128)
+ if (hw->func_caps.rss_table_size == RTE_ETH_RSS_RETA_SIZE_128)
settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
- else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512)
+ else if (hw->func_caps.rss_table_size == RTE_ETH_RSS_RETA_SIZE_512)
settings.hash_lut_size = I40E_HASH_LUT_SIZE_512;
else {
PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported",
{
uint32_t vid_idx, vid_bit;
- if (vlan_id > ETH_VLAN_ID_MAX)
+ if (vlan_id > RTE_ETH_VLAN_ID_MAX)
return 0;
vid_idx = I40E_VFTA_IDX(vlan_id);
struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
int ret;
- if (vlan_id > ETH_VLAN_ID_MAX)
+ if (vlan_id > RTE_ETH_VLAN_ID_MAX)
return;
i40e_store_vlan_filter(vsi, vlan_id, on);
i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
{
switch (filter_type) {
- case RTE_TUNNEL_FILTER_IMAC_IVLAN:
+ case RTE_ETH_TUNNEL_FILTER_IMAC_IVLAN:
*flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
break;
- case RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID:
+ case RTE_ETH_TUNNEL_FILTER_IMAC_IVLAN_TENID:
*flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
break;
- case RTE_TUNNEL_FILTER_IMAC_TENID:
+ case RTE_ETH_TUNNEL_FILTER_IMAC_TENID:
*flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID;
break;
- case RTE_TUNNEL_FILTER_OMAC_TENID_IMAC:
+ case RTE_ETH_TUNNEL_FILTER_OMAC_TENID_IMAC:
*flag = I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC;
break;
- case ETH_TUNNEL_FILTER_IMAC:
+ case RTE_ETH_TUNNEL_FILTER_IMAC:
*flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
break;
- case ETH_TUNNEL_FILTER_OIP:
+ case RTE_ETH_TUNNEL_FILTER_OIP:
*flag = I40E_AQC_ADD_CLOUD_FILTER_OIP;
break;
- case ETH_TUNNEL_FILTER_IIP:
+ case RTE_ETH_TUNNEL_FILTER_IIP:
*flag = I40E_AQC_ADD_CLOUD_FILTER_IIP;
break;
default:
return -EINVAL;
switch (udp_tunnel->prot_type) {
- case RTE_TUNNEL_TYPE_VXLAN:
+ case RTE_ETH_TUNNEL_TYPE_VXLAN:
ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port,
I40E_AQC_TUNNEL_TYPE_VXLAN);
break;
- case RTE_TUNNEL_TYPE_VXLAN_GPE:
+ case RTE_ETH_TUNNEL_TYPE_VXLAN_GPE:
ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port,
I40E_AQC_TUNNEL_TYPE_VXLAN_GPE);
break;
- case RTE_TUNNEL_TYPE_GENEVE:
- case RTE_TUNNEL_TYPE_TEREDO:
+ case RTE_ETH_TUNNEL_TYPE_GENEVE:
+ case RTE_ETH_TUNNEL_TYPE_TEREDO:
PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
ret = -1;
break;
return -EINVAL;
switch (udp_tunnel->prot_type) {
- case RTE_TUNNEL_TYPE_VXLAN:
- case RTE_TUNNEL_TYPE_VXLAN_GPE:
+ case RTE_ETH_TUNNEL_TYPE_VXLAN:
+ case RTE_ETH_TUNNEL_TYPE_VXLAN_GPE:
ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port);
break;
- case RTE_TUNNEL_TYPE_GENEVE:
- case RTE_TUNNEL_TYPE_TEREDO:
+ case RTE_ETH_TUNNEL_TYPE_GENEVE:
+ case RTE_ETH_TUNNEL_TYPE_TEREDO:
PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
ret = -1;
break;
i40e_pf_reset_rss_reta(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->adapter->hw;
- uint8_t lut[ETH_RSS_RETA_SIZE_512];
+ uint8_t lut[RTE_ETH_RSS_RETA_SIZE_512];
uint32_t i;
int num;
* configured. It's necessary to calculate the actual PF
* queues that are configured.
*/
- if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
+ if (pf->dev_data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG)
num = i40e_pf_calc_configured_queues_num(pf);
else
num = pf->dev_data->nb_rx_queues;
rss_hf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
if (!(rss_hf & pf->adapter->flow_types_mask) ||
- !(mq_mode & ETH_MQ_RX_RSS_FLAG))
+ !(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG))
return 0;
hw = I40E_PF_TO_HW(pf);
rte_eth_linkstatus_get(dev, &link);
switch (link.link_speed) {
- case ETH_SPEED_NUM_40G:
- case ETH_SPEED_NUM_25G:
+ case RTE_ETH_SPEED_NUM_40G:
+ case RTE_ETH_SPEED_NUM_25G:
tsync_inc_l = I40E_PTP_40GB_INCVAL & 0xFFFFFFFF;
tsync_inc_h = I40E_PTP_40GB_INCVAL >> 32;
break;
- case ETH_SPEED_NUM_10G:
+ case RTE_ETH_SPEED_NUM_10G:
tsync_inc_l = I40E_PTP_10GB_INCVAL & 0xFFFFFFFF;
tsync_inc_h = I40E_PTP_10GB_INCVAL >> 32;
break;
- case ETH_SPEED_NUM_1G:
+ case RTE_ETH_SPEED_NUM_1G:
tsync_inc_l = I40E_PTP_1GB_INCVAL & 0xFFFFFFFF;
tsync_inc_h = I40E_PTP_1GB_INCVAL >> 32;
break;
else
*tc_map = RTE_LEN2MASK(dcb_rx_conf->nb_tcs, uint8_t);
- if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
+ if (dev->data->dev_conf.dcb_capability_en & RTE_ETH_DCB_PFC_SUPPORT) {
dcb_cfg->pfc.willing = 0;
dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
dcb_cfg->pfc.pfcenable = *tc_map;
uint16_t bsf, tc_mapping;
int i, j = 0;
- if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
+ if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_DCB_FLAG)
dcb_info->nb_tcs = rte_bsf32(vsi->enabled_tc + 1);
else
dcb_info->nb_tcs = 1;
dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
}
j++;
- } while (j < RTE_MIN(pf->nb_cfg_vmdq_vsi, ETH_MAX_VMDQ_POOL));
+ } while (j < RTE_MIN(pf->nb_cfg_vmdq_vsi, RTE_ETH_MAX_VMDQ_POOL));
return 0;
}
I40E_FLAG_RSS_AQ_CAPABLE)
#define I40E_RSS_OFFLOAD_ALL ( \
- ETH_RSS_FRAG_IPV4 | \
- ETH_RSS_NONFRAG_IPV4_TCP | \
- ETH_RSS_NONFRAG_IPV4_UDP | \
- ETH_RSS_NONFRAG_IPV4_SCTP | \
- ETH_RSS_NONFRAG_IPV4_OTHER | \
- ETH_RSS_FRAG_IPV6 | \
- ETH_RSS_NONFRAG_IPV6_TCP | \
- ETH_RSS_NONFRAG_IPV6_UDP | \
- ETH_RSS_NONFRAG_IPV6_SCTP | \
- ETH_RSS_NONFRAG_IPV6_OTHER | \
- ETH_RSS_L2_PAYLOAD)
+ RTE_ETH_RSS_FRAG_IPV4 | \
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
+ RTE_ETH_RSS_FRAG_IPV6 | \
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
+ RTE_ETH_RSS_L2_PAYLOAD)
/* All bits of RSS hash enable for X722*/
#define I40E_RSS_HENA_ALL_X722 ( \
uint8_t key[(I40E_VFQF_HKEY_MAX_INDEX > I40E_PFQF_HKEY_MAX_INDEX ?
I40E_VFQF_HKEY_MAX_INDEX : I40E_PFQF_HKEY_MAX_INDEX + 1) *
sizeof(uint32_t)]; /**< Hash key. */
- uint16_t queue[ETH_RSS_RETA_SIZE_512]; /**< Queues indices to use. */
+ uint16_t queue[RTE_ETH_RSS_RETA_SIZE_512]; /**< Queues indices to use. */
bool symmetric_enable; /**< true, if enable symmetric */
uint64_t config_pctypes; /**< All PCTYPES with the flow */
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
int qinq = dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_VLAN_EXTEND;
+ RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
uint64_t reg_r = 0;
uint16_t reg_id;
uint16_t tpid;
}
static uint16_t i40e_supported_tunnel_filter_types[] = {
- ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID |
- ETH_TUNNEL_FILTER_IVLAN,
- ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
- ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID,
- ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID |
- ETH_TUNNEL_FILTER_IMAC,
- ETH_TUNNEL_FILTER_IMAC,
+ RTE_ETH_TUNNEL_FILTER_IMAC | RTE_ETH_TUNNEL_FILTER_TENID |
+ RTE_ETH_TUNNEL_FILTER_IVLAN,
+ RTE_ETH_TUNNEL_FILTER_IMAC | RTE_ETH_TUNNEL_FILTER_IVLAN,
+ RTE_ETH_TUNNEL_FILTER_IMAC | RTE_ETH_TUNNEL_FILTER_TENID,
+ RTE_ETH_TUNNEL_FILTER_OMAC | RTE_ETH_TUNNEL_FILTER_TENID |
+ RTE_ETH_TUNNEL_FILTER_IMAC,
+ RTE_ETH_TUNNEL_FILTER_IMAC,
};
static int
rte_memcpy(&filter->outer_mac,
ð_spec->dst,
RTE_ETHER_ADDR_LEN);
- filter_type |= ETH_TUNNEL_FILTER_OMAC;
+ filter_type |= RTE_ETH_TUNNEL_FILTER_OMAC;
} else {
rte_memcpy(&filter->inner_mac,
ð_spec->dst,
RTE_ETHER_ADDR_LEN);
- filter_type |= ETH_TUNNEL_FILTER_IMAC;
+ filter_type |= RTE_ETH_TUNNEL_FILTER_IMAC;
}
}
break;
filter->inner_vlan =
rte_be_to_cpu_16(vlan_spec->tci) &
I40E_VLAN_TCI_MASK;
- filter_type |= ETH_TUNNEL_FILTER_IVLAN;
+ filter_type |= RTE_ETH_TUNNEL_FILTER_IVLAN;
}
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
vxlan_spec->vni, 3);
filter->tenant_id =
rte_be_to_cpu_32(tenant_id_be);
- filter_type |= ETH_TUNNEL_FILTER_TENID;
+ filter_type |= RTE_ETH_TUNNEL_FILTER_TENID;
}
vxlan_flag = 1;
rte_memcpy(&filter->outer_mac,
ð_spec->dst,
RTE_ETHER_ADDR_LEN);
- filter_type |= ETH_TUNNEL_FILTER_OMAC;
+ filter_type |= RTE_ETH_TUNNEL_FILTER_OMAC;
} else {
rte_memcpy(&filter->inner_mac,
ð_spec->dst,
RTE_ETHER_ADDR_LEN);
- filter_type |= ETH_TUNNEL_FILTER_IMAC;
+ filter_type |= RTE_ETH_TUNNEL_FILTER_IMAC;
}
}
filter->inner_vlan =
rte_be_to_cpu_16(vlan_spec->tci) &
I40E_VLAN_TCI_MASK;
- filter_type |= ETH_TUNNEL_FILTER_IVLAN;
+ filter_type |= RTE_ETH_TUNNEL_FILTER_IVLAN;
}
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
nvgre_spec->tni, 3);
filter->tenant_id =
rte_be_to_cpu_32(tenant_id_be);
- filter_type |= ETH_TUNNEL_FILTER_TENID;
+ filter_type |= RTE_ETH_TUNNEL_FILTER_TENID;
}
nvgre_flag = 1;
const struct i40e_hash_map_rss_inset i40e_hash_rss_inset[] = {
/* IPv4 */
- { ETH_RSS_IPV4, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST },
- { ETH_RSS_FRAG_IPV4, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST },
+ { RTE_ETH_RSS_IPV4, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST },
+ { RTE_ETH_RSS_FRAG_IPV4, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST },
- { ETH_RSS_NONFRAG_IPV4_OTHER,
+ { RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST },
- { ETH_RSS_NONFRAG_IPV4_TCP, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ { RTE_ETH_RSS_NONFRAG_IPV4_TCP, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
- { ETH_RSS_NONFRAG_IPV4_UDP, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ { RTE_ETH_RSS_NONFRAG_IPV4_UDP, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
- { ETH_RSS_NONFRAG_IPV4_SCTP, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ { RTE_ETH_RSS_NONFRAG_IPV4_SCTP, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT },
/* IPv6 */
- { ETH_RSS_IPV6, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST },
- { ETH_RSS_FRAG_IPV6, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST },
+ { RTE_ETH_RSS_IPV6, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST },
+ { RTE_ETH_RSS_FRAG_IPV6, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST },
- { ETH_RSS_NONFRAG_IPV6_OTHER,
+ { RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST },
- { ETH_RSS_NONFRAG_IPV6_TCP, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+ { RTE_ETH_RSS_NONFRAG_IPV6_TCP, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
- { ETH_RSS_NONFRAG_IPV6_UDP, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+ { RTE_ETH_RSS_NONFRAG_IPV6_UDP, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
- { ETH_RSS_NONFRAG_IPV6_SCTP, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+ { RTE_ETH_RSS_NONFRAG_IPV6_SCTP, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT },
/* Port */
- { ETH_RSS_PORT, I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
+ { RTE_ETH_RSS_PORT, I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
/* Ether */
- { ETH_RSS_L2_PAYLOAD, I40E_INSET_LAST_ETHER_TYPE },
- { ETH_RSS_ETH, I40E_INSET_DMAC | I40E_INSET_SMAC },
+ { RTE_ETH_RSS_L2_PAYLOAD, I40E_INSET_LAST_ETHER_TYPE },
+ { RTE_ETH_RSS_ETH, I40E_INSET_DMAC | I40E_INSET_SMAC },
/* VLAN */
- { ETH_RSS_S_VLAN, I40E_INSET_VLAN_OUTER },
- { ETH_RSS_C_VLAN, I40E_INSET_VLAN_INNER },
+ { RTE_ETH_RSS_S_VLAN, I40E_INSET_VLAN_OUTER },
+ { RTE_ETH_RSS_C_VLAN, I40E_INSET_VLAN_INNER },
};
#define I40E_HASH_VOID_NEXT_ALLOW BIT_ULL(RTE_FLOW_ITEM_TYPE_ETH)
#define I40E_HASH_MAP_CUS_PATTERN(pattern, rss_mask, cus_pctype) { \
pattern, rss_mask, true, cus_pctype }
-#define I40E_HASH_L2_RSS_MASK (ETH_RSS_VLAN | ETH_RSS_ETH | \
- ETH_RSS_L2_SRC_ONLY | \
- ETH_RSS_L2_DST_ONLY)
+#define I40E_HASH_L2_RSS_MASK (RTE_ETH_RSS_VLAN | RTE_ETH_RSS_ETH | \
+ RTE_ETH_RSS_L2_SRC_ONLY | \
+ RTE_ETH_RSS_L2_DST_ONLY)
#define I40E_HASH_L23_RSS_MASK (I40E_HASH_L2_RSS_MASK | \
- ETH_RSS_L3_SRC_ONLY | \
- ETH_RSS_L3_DST_ONLY)
+ RTE_ETH_RSS_L3_SRC_ONLY | \
+ RTE_ETH_RSS_L3_DST_ONLY)
-#define I40E_HASH_IPV4_L23_RSS_MASK (ETH_RSS_IPV4 | I40E_HASH_L23_RSS_MASK)
-#define I40E_HASH_IPV6_L23_RSS_MASK (ETH_RSS_IPV6 | I40E_HASH_L23_RSS_MASK)
+#define I40E_HASH_IPV4_L23_RSS_MASK (RTE_ETH_RSS_IPV4 | I40E_HASH_L23_RSS_MASK)
+#define I40E_HASH_IPV6_L23_RSS_MASK (RTE_ETH_RSS_IPV6 | I40E_HASH_L23_RSS_MASK)
#define I40E_HASH_L234_RSS_MASK (I40E_HASH_L23_RSS_MASK | \
- ETH_RSS_PORT | ETH_RSS_L4_SRC_ONLY | \
- ETH_RSS_L4_DST_ONLY)
+ RTE_ETH_RSS_PORT | RTE_ETH_RSS_L4_SRC_ONLY | \
+ RTE_ETH_RSS_L4_DST_ONLY)
-#define I40E_HASH_IPV4_L234_RSS_MASK (I40E_HASH_L234_RSS_MASK | ETH_RSS_IPV4)
-#define I40E_HASH_IPV6_L234_RSS_MASK (I40E_HASH_L234_RSS_MASK | ETH_RSS_IPV6)
+#define I40E_HASH_IPV4_L234_RSS_MASK (I40E_HASH_L234_RSS_MASK | RTE_ETH_RSS_IPV4)
+#define I40E_HASH_IPV6_L234_RSS_MASK (I40E_HASH_L234_RSS_MASK | RTE_ETH_RSS_IPV6)
-#define I40E_HASH_L4_TYPES (ETH_RSS_NONFRAG_IPV4_TCP | \
- ETH_RSS_NONFRAG_IPV4_UDP | \
- ETH_RSS_NONFRAG_IPV4_SCTP | \
- ETH_RSS_NONFRAG_IPV6_TCP | \
- ETH_RSS_NONFRAG_IPV6_UDP | \
- ETH_RSS_NONFRAG_IPV6_SCTP)
+#define I40E_HASH_L4_TYPES (RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
/* Current supported patterns and RSS types.
* All items that have the same pattern types are together.
static const struct i40e_hash_match_pattern match_patterns[] = {
/* Ether */
I40E_HASH_MAP_PATTERN(I40E_PHINT_ETH,
- ETH_RSS_L2_PAYLOAD | I40E_HASH_L2_RSS_MASK,
+ RTE_ETH_RSS_L2_PAYLOAD | I40E_HASH_L2_RSS_MASK,
I40E_FILTER_PCTYPE_L2_PAYLOAD),
/* IPv4 */
I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV4,
- ETH_RSS_FRAG_IPV4 | I40E_HASH_IPV4_L23_RSS_MASK,
+ RTE_ETH_RSS_FRAG_IPV4 | I40E_HASH_IPV4_L23_RSS_MASK,
I40E_FILTER_PCTYPE_FRAG_IPV4),
I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV4,
- ETH_RSS_NONFRAG_IPV4_OTHER |
+ RTE_ETH_RSS_NONFRAG_IPV4_OTHER |
I40E_HASH_IPV4_L23_RSS_MASK,
I40E_FILTER_PCTYPE_NONF_IPV4_OTHER),
I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV4_TCP,
- ETH_RSS_NONFRAG_IPV4_TCP |
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP |
I40E_HASH_IPV4_L234_RSS_MASK,
I40E_FILTER_PCTYPE_NONF_IPV4_TCP),
I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV4_UDP,
- ETH_RSS_NONFRAG_IPV4_UDP |
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP |
I40E_HASH_IPV4_L234_RSS_MASK,
I40E_FILTER_PCTYPE_NONF_IPV4_UDP),
I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV4_SCTP,
- ETH_RSS_NONFRAG_IPV4_SCTP |
+ RTE_ETH_RSS_NONFRAG_IPV4_SCTP |
I40E_HASH_IPV4_L234_RSS_MASK,
I40E_FILTER_PCTYPE_NONF_IPV4_SCTP),
/* IPv6 */
I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6,
- ETH_RSS_FRAG_IPV6 | I40E_HASH_IPV6_L23_RSS_MASK,
+ RTE_ETH_RSS_FRAG_IPV6 | I40E_HASH_IPV6_L23_RSS_MASK,
I40E_FILTER_PCTYPE_FRAG_IPV6),
I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6,
- ETH_RSS_NONFRAG_IPV6_OTHER |
+ RTE_ETH_RSS_NONFRAG_IPV6_OTHER |
I40E_HASH_IPV6_L23_RSS_MASK,
I40E_FILTER_PCTYPE_NONF_IPV6_OTHER),
I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6_FRAG,
- ETH_RSS_FRAG_IPV6 | I40E_HASH_L23_RSS_MASK,
+ RTE_ETH_RSS_FRAG_IPV6 | I40E_HASH_L23_RSS_MASK,
I40E_FILTER_PCTYPE_FRAG_IPV6),
I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6_TCP,
- ETH_RSS_NONFRAG_IPV6_TCP |
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP |
I40E_HASH_IPV6_L234_RSS_MASK,
I40E_FILTER_PCTYPE_NONF_IPV6_TCP),
I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6_UDP,
- ETH_RSS_NONFRAG_IPV6_UDP |
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP |
I40E_HASH_IPV6_L234_RSS_MASK,
I40E_FILTER_PCTYPE_NONF_IPV6_UDP),
I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6_SCTP,
- ETH_RSS_NONFRAG_IPV6_SCTP |
+ RTE_ETH_RSS_NONFRAG_IPV6_SCTP |
I40E_HASH_IPV6_L234_RSS_MASK,
I40E_FILTER_PCTYPE_NONF_IPV6_SCTP),
/* ESP */
I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_ESP,
- ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV4),
+ RTE_ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV4),
I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_ESP,
- ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV6),
+ RTE_ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV6),
I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_UDP_ESP,
- ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV4_UDP),
+ RTE_ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV4_UDP),
I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_UDP_ESP,
- ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV6_UDP),
+ RTE_ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV6_UDP),
/* GTPC */
I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_GTPC,
I40E_HASH_IPV4_L234_RSS_MASK,
I40E_CUSTOMIZED_GTPU),
I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_GTPU_IPV4,
- ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV4),
+ RTE_ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV4),
I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_GTPU_IPV6,
- ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV6),
+ RTE_ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV6),
I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_GTPU,
I40E_HASH_IPV6_L234_RSS_MASK,
I40E_CUSTOMIZED_GTPU),
I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_GTPU_IPV4,
- ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV4),
+ RTE_ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV4),
I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_GTPU_IPV6,
- ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV6),
+ RTE_ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV6),
/* L2TPV3 */
I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_L2TPV3,
- ETH_RSS_L2TPV3, I40E_CUSTOMIZED_IPV4_L2TPV3),
+ RTE_ETH_RSS_L2TPV3, I40E_CUSTOMIZED_IPV4_L2TPV3),
I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_L2TPV3,
- ETH_RSS_L2TPV3, I40E_CUSTOMIZED_IPV6_L2TPV3),
+ RTE_ETH_RSS_L2TPV3, I40E_CUSTOMIZED_IPV6_L2TPV3),
/* AH */
- I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_AH, ETH_RSS_AH,
+ I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_AH, RTE_ETH_RSS_AH,
I40E_CUSTOMIZED_AH_IPV4),
- I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_AH, ETH_RSS_AH,
+ I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_AH, RTE_ETH_RSS_AH,
I40E_CUSTOMIZED_AH_IPV6),
};
/* If SRC_ONLY and DST_ONLY of the same level are used simultaneously,
* it is the same case as none of them are added.
*/
- mask = rss_types & (ETH_RSS_L2_SRC_ONLY | ETH_RSS_L2_DST_ONLY);
- if (mask == ETH_RSS_L2_SRC_ONLY)
+ mask = rss_types & (RTE_ETH_RSS_L2_SRC_ONLY | RTE_ETH_RSS_L2_DST_ONLY);
+ if (mask == RTE_ETH_RSS_L2_SRC_ONLY)
inset &= ~I40E_INSET_DMAC;
- else if (mask == ETH_RSS_L2_DST_ONLY)
+ else if (mask == RTE_ETH_RSS_L2_DST_ONLY)
inset &= ~I40E_INSET_SMAC;
- mask = rss_types & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY);
- if (mask == ETH_RSS_L3_SRC_ONLY)
+ mask = rss_types & (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY);
+ if (mask == RTE_ETH_RSS_L3_SRC_ONLY)
inset &= ~(I40E_INSET_IPV4_DST | I40E_INSET_IPV6_DST);
- else if (mask == ETH_RSS_L3_DST_ONLY)
+ else if (mask == RTE_ETH_RSS_L3_DST_ONLY)
inset &= ~(I40E_INSET_IPV4_SRC | I40E_INSET_IPV6_SRC);
- mask = rss_types & (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY);
- if (mask == ETH_RSS_L4_SRC_ONLY)
+ mask = rss_types & (RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY);
+ if (mask == RTE_ETH_RSS_L4_SRC_ONLY)
inset &= ~I40E_INSET_DST_PORT;
- else if (mask == ETH_RSS_L4_DST_ONLY)
+ else if (mask == RTE_ETH_RSS_L4_DST_ONLY)
inset &= ~I40E_INSET_SRC_PORT;
if (rss_types & I40E_HASH_L4_TYPES) {
uint64_t l3_mask = rss_types &
- (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY);
+ (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY);
uint64_t l4_mask = rss_types &
- (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY);
+ (RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY);
if (l3_mask && !l4_mask)
inset &= ~(I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT);
/* Update lookup table */
if (rss_info->queue_num > 0) {
- uint8_t lut[ETH_RSS_RETA_SIZE_512];
+ uint8_t lut[RTE_ETH_RSS_RETA_SIZE_512];
uint32_t i, j = 0;
for (i = 0; i < hw->func_caps.rss_table_size; i++) {
"RSS key is ignored when queues specified");
pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
- if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
+ if (pf->dev_data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG)
max_queue = i40e_pf_calc_configured_queues_num(pf);
else
max_queue = pf->dev_data->nb_rx_queues;
uint64_t type, mask;
/* Validate L2 */
- type = ETH_RSS_ETH & rss_types;
- mask = (ETH_RSS_L2_SRC_ONLY | ETH_RSS_L2_DST_ONLY) & rss_types;
+ type = RTE_ETH_RSS_ETH & rss_types;
+ mask = (RTE_ETH_RSS_L2_SRC_ONLY | RTE_ETH_RSS_L2_DST_ONLY) & rss_types;
if (!type && mask)
return false;
/* Validate L3 */
- type = (I40E_HASH_L4_TYPES | ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
- ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_IPV6 |
- ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER) & rss_types;
- mask = (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY) & rss_types;
+ type = (I40E_HASH_L4_TYPES | RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+ RTE_ETH_RSS_NONFRAG_IPV4_OTHER | RTE_ETH_RSS_IPV6 |
+ RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_OTHER) & rss_types;
+ mask = (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY) & rss_types;
if (!type && mask)
return false;
/* Validate L4 */
- type = (I40E_HASH_L4_TYPES | ETH_RSS_PORT) & rss_types;
- mask = (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY) & rss_types;
+ type = (I40E_HASH_L4_TYPES | RTE_ETH_RSS_PORT) & rss_types;
+ mask = (RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY) & rss_types;
if (!type && mask)
return false;
event.event_data.link_event.link_status =
dev->data->dev_link.link_status;
- /* need to convert the ETH_SPEED_xxx into VIRTCHNL_LINK_SPEED_xxx */
+ /* need to convert the RTE_ETH_SPEED_xxx into VIRTCHNL_LINK_SPEED_xxx */
switch (dev->data->dev_link.link_speed) {
- case ETH_SPEED_NUM_100M:
+ case RTE_ETH_SPEED_NUM_100M:
event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_100MB;
break;
- case ETH_SPEED_NUM_1G:
+ case RTE_ETH_SPEED_NUM_1G:
event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_1GB;
break;
- case ETH_SPEED_NUM_10G:
+ case RTE_ETH_SPEED_NUM_10G:
event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_10GB;
break;
- case ETH_SPEED_NUM_20G:
+ case RTE_ETH_SPEED_NUM_20G:
event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_20GB;
break;
- case ETH_SPEED_NUM_25G:
+ case RTE_ETH_SPEED_NUM_25G:
event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_25GB;
break;
- case ETH_SPEED_NUM_40G:
+ case RTE_ETH_SPEED_NUM_40G:
event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_40GB;
break;
default:
for (i = 0; i < tx_rs_thresh; i++)
rte_prefetch0((txep + i)->mbuf);
- if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
+ if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) {
if (k) {
for (j = 0; j != k; j += RTE_I40E_TX_MAX_FREE_BUF_SZ) {
for (i = 0; i < RTE_I40E_TX_MAX_FREE_BUF_SZ; ++i, ++txep) {
rxq->queue_id = queue_idx;
rxq->reg_idx = reg_idx;
rxq->port_id = dev->data->port_id;
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
rxq->crc_len = RTE_ETHER_CRC_LEN;
else
rxq->crc_len = 0;
}
/* check simple tx conflict */
if (ad->tx_simple_allowed) {
- if ((txq->offloads & ~DEV_TX_OFFLOAD_MBUF_FAST_FREE) != 0 ||
+ if ((txq->offloads & ~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) != 0 ||
txq->tx_rs_thresh < RTE_PMD_I40E_TX_MAX_BURST) {
PMD_DRV_LOG(ERR, "No-simple tx is required.");
return -EINVAL;
/* Use a simple Tx queue if possible (only fast free is allowed) */
ad->tx_simple_allowed =
(txq->offloads ==
- (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
+ (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) &&
txq->tx_rs_thresh >= RTE_PMD_I40E_TX_MAX_BURST);
ad->tx_vec_allowed = (ad->tx_simple_allowed &&
txq->tx_rs_thresh <= RTE_I40E_TX_MAX_FREE_BUF_SZ);
bool rx_deferred_start; /**< don't start this queue in dev start */
uint16_t rx_using_sse; /**<flag indicate the usage of vPMD for rx */
uint8_t dcb_tc; /**< Traffic class of rx queue */
- uint64_t offloads; /**< Rx offload flags of DEV_RX_OFFLOAD_* */
+ uint64_t offloads; /**< Rx offload flags of RTE_ETH_RX_OFFLOAD_* */
const struct rte_memzone *mz;
};
bool q_set; /**< indicate if tx queue has been configured */
bool tx_deferred_start; /**< don't start this queue in dev start */
uint8_t dcb_tc; /**< Traffic class of tx queue */
- uint64_t offloads; /**< Tx offload flags of DEV_RX_OFFLOAD_* */
+ uint64_t offloads; /**< Tx offload flags of RTE_ETH_RX_OFFLOAD_* */
const struct rte_memzone *mz;
};
txep = (void *)txq->sw_ring;
txep += txq->tx_next_dd - (n - 1);
- if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
+ if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
struct rte_mempool *mp = txep[0].mbuf->pool;
void **cache_objs;
struct rte_mempool_cache *cache = rte_mempool_default_cache(mp,
*/
txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)];
- if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
+ if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) {
for (i = 0; i < n; i++) {
free[i] = txep[i].mbuf;
txep[i].mbuf = NULL;
struct i40e_adapter *ad =
I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
- struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
+ struct rte_eth_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
struct i40e_rx_queue *rxq;
uint16_t desc, i;
bool first_queue;
return -1;
/* no header split support */
- if (rxmode->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT)
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_HEADER_SPLIT)
return -1;
/* no QinQ support */
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
return -1;
/**
dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
dev_info->hash_key_size = (I40E_VFQF_HKEY_MAX_INDEX + 1) *
sizeof(uint32_t);
- dev_info->reta_size = ETH_RSS_RETA_SIZE_64;
+ dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_64;
dev_info->flow_type_rss_offloads = I40E_RSS_OFFLOAD_ALL;
dev_info->max_mac_addrs = I40E_NUM_MACADDR_MAX;
dev_info->rx_offload_capa =
- DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_QINQ_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_VLAN_FILTER;
+ RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+ RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+ RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
dev_info->tx_offload_capa =
- DEV_TX_OFFLOAD_MULTI_SEGS |
- DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_QINQ_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_SCTP_CKSUM |
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO |
- DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GRE_TNL_TSO |
- DEV_TX_OFFLOAD_IPIP_TNL_TSO |
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+ RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_TSO |
+ RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO;
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_thresh = {
return -EINVAL;
}
- if (mask & ETH_VLAN_FILTER_MASK) {
+ if (mask & RTE_ETH_VLAN_FILTER_MASK) {
/* Enable or disable VLAN filtering offload */
if (ethdev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_VLAN_FILTER)
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
return i40e_vsi_config_vlan_filter(vsi, TRUE);
else
return i40e_vsi_config_vlan_filter(vsi, FALSE);
}
- if (mask & ETH_VLAN_STRIP_MASK) {
+ if (mask & RTE_ETH_VLAN_STRIP_MASK) {
/* Enable or disable VLAN stripping offload */
if (ethdev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_VLAN_STRIP)
+ RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
return i40e_vsi_config_vlan_stripping(vsi, TRUE);
else
return i40e_vsi_config_vlan_stripping(vsi, FALSE);
VIRTCHNL_VF_OFFLOAD_RX_POLLING)
#define IAVF_RSS_OFFLOAD_ALL ( \
- ETH_RSS_IPV4 | \
- ETH_RSS_FRAG_IPV4 | \
- ETH_RSS_NONFRAG_IPV4_TCP | \
- ETH_RSS_NONFRAG_IPV4_UDP | \
- ETH_RSS_NONFRAG_IPV4_SCTP | \
- ETH_RSS_NONFRAG_IPV4_OTHER | \
- ETH_RSS_IPV6 | \
- ETH_RSS_FRAG_IPV6 | \
- ETH_RSS_NONFRAG_IPV6_TCP | \
- ETH_RSS_NONFRAG_IPV6_UDP | \
- ETH_RSS_NONFRAG_IPV6_SCTP | \
- ETH_RSS_NONFRAG_IPV6_OTHER)
+ RTE_ETH_RSS_IPV4 | \
+ RTE_ETH_RSS_FRAG_IPV4 | \
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
+ RTE_ETH_RSS_IPV6 | \
+ RTE_ETH_RSS_FRAG_IPV6 | \
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_OTHER)
#define IAVF_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET
#define IAVF_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET
static const uint64_t map_hena_rss[] = {
/* IPv4 */
[IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
- ETH_RSS_NONFRAG_IPV4_UDP,
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP,
[IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
- ETH_RSS_NONFRAG_IPV4_UDP,
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP,
[IAVF_FILTER_PCTYPE_NONF_IPV4_UDP] =
- ETH_RSS_NONFRAG_IPV4_UDP,
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP,
[IAVF_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
- ETH_RSS_NONFRAG_IPV4_TCP,
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP,
[IAVF_FILTER_PCTYPE_NONF_IPV4_TCP] =
- ETH_RSS_NONFRAG_IPV4_TCP,
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP,
[IAVF_FILTER_PCTYPE_NONF_IPV4_SCTP] =
- ETH_RSS_NONFRAG_IPV4_SCTP,
+ RTE_ETH_RSS_NONFRAG_IPV4_SCTP,
[IAVF_FILTER_PCTYPE_NONF_IPV4_OTHER] =
- ETH_RSS_NONFRAG_IPV4_OTHER,
- [IAVF_FILTER_PCTYPE_FRAG_IPV4] = ETH_RSS_FRAG_IPV4,
+ RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
+ [IAVF_FILTER_PCTYPE_FRAG_IPV4] = RTE_ETH_RSS_FRAG_IPV4,
/* IPv6 */
[IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
- ETH_RSS_NONFRAG_IPV6_UDP,
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP,
[IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
- ETH_RSS_NONFRAG_IPV6_UDP,
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP,
[IAVF_FILTER_PCTYPE_NONF_IPV6_UDP] =
- ETH_RSS_NONFRAG_IPV6_UDP,
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP,
[IAVF_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
- ETH_RSS_NONFRAG_IPV6_TCP,
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP,
[IAVF_FILTER_PCTYPE_NONF_IPV6_TCP] =
- ETH_RSS_NONFRAG_IPV6_TCP,
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP,
[IAVF_FILTER_PCTYPE_NONF_IPV6_SCTP] =
- ETH_RSS_NONFRAG_IPV6_SCTP,
+ RTE_ETH_RSS_NONFRAG_IPV6_SCTP,
[IAVF_FILTER_PCTYPE_NONF_IPV6_OTHER] =
- ETH_RSS_NONFRAG_IPV6_OTHER,
- [IAVF_FILTER_PCTYPE_FRAG_IPV6] = ETH_RSS_FRAG_IPV6,
+ RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
+ [IAVF_FILTER_PCTYPE_FRAG_IPV6] = RTE_ETH_RSS_FRAG_IPV6,
/* L2 Payload */
- [IAVF_FILTER_PCTYPE_L2_PAYLOAD] = ETH_RSS_L2_PAYLOAD
+ [IAVF_FILTER_PCTYPE_L2_PAYLOAD] = RTE_ETH_RSS_L2_PAYLOAD
};
- const uint64_t ipv4_rss = ETH_RSS_NONFRAG_IPV4_UDP |
- ETH_RSS_NONFRAG_IPV4_TCP |
- ETH_RSS_NONFRAG_IPV4_SCTP |
- ETH_RSS_NONFRAG_IPV4_OTHER |
- ETH_RSS_FRAG_IPV4;
+ const uint64_t ipv4_rss = RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+ RTE_ETH_RSS_NONFRAG_IPV4_SCTP |
+ RTE_ETH_RSS_NONFRAG_IPV4_OTHER |
+ RTE_ETH_RSS_FRAG_IPV4;
- const uint64_t ipv6_rss = ETH_RSS_NONFRAG_IPV6_UDP |
- ETH_RSS_NONFRAG_IPV6_TCP |
- ETH_RSS_NONFRAG_IPV6_SCTP |
- ETH_RSS_NONFRAG_IPV6_OTHER |
- ETH_RSS_FRAG_IPV6;
+ const uint64_t ipv6_rss = RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+ RTE_ETH_RSS_NONFRAG_IPV6_SCTP |
+ RTE_ETH_RSS_NONFRAG_IPV6_OTHER |
+ RTE_ETH_RSS_FRAG_IPV6;
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
uint64_t caps = 0, hena = 0, valid_rss_hf = 0;
}
/**
- * ETH_RSS_IPV4 and ETH_RSS_IPV6 can be considered as 2
+ * RTE_ETH_RSS_IPV4 and RTE_ETH_RSS_IPV6 can be considered as 2
* generalizations of all other IPv4 and IPv6 RSS types.
*/
- if (rss_hf & ETH_RSS_IPV4)
+ if (rss_hf & RTE_ETH_RSS_IPV4)
rss_hf |= ipv4_rss;
- if (rss_hf & ETH_RSS_IPV6)
+ if (rss_hf & RTE_ETH_RSS_IPV6)
rss_hf |= ipv6_rss;
RTE_BUILD_BUG_ON(RTE_DIM(map_hena_rss) > sizeof(uint64_t) * CHAR_BIT);
}
if (valid_rss_hf & ipv4_rss)
- valid_rss_hf |= rss_hf & ETH_RSS_IPV4;
+ valid_rss_hf |= rss_hf & RTE_ETH_RSS_IPV4;
if (valid_rss_hf & ipv6_rss)
- valid_rss_hf |= rss_hf & ETH_RSS_IPV6;
+ valid_rss_hf |= rss_hf & RTE_ETH_RSS_IPV6;
if (rss_hf & ~valid_rss_hf)
PMD_DRV_LOG(WARNING, "Unsupported rss_hf 0x%" PRIx64,
return 0;
enable = !!(dev->data->dev_conf.txmode.offloads &
- DEV_TX_OFFLOAD_VLAN_INSERT);
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT);
iavf_config_vlan_insert_v2(adapter, enable);
return 0;
int err;
err = iavf_dev_vlan_offload_set(dev,
- ETH_VLAN_STRIP_MASK |
- ETH_QINQ_STRIP_MASK |
- ETH_VLAN_FILTER_MASK |
- ETH_VLAN_EXTEND_MASK);
+ RTE_ETH_VLAN_STRIP_MASK |
+ RTE_ETH_QINQ_STRIP_MASK |
+ RTE_ETH_VLAN_FILTER_MASK |
+ RTE_ETH_VLAN_EXTEND_MASK);
if (err) {
PMD_DRV_LOG(ERR, "Failed to update vlan offload");
return err;
ad->rx_vec_allowed = true;
ad->tx_vec_allowed = true;
- if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
- dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+ if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+ dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
/* Large VF setting */
if (num_queue_pairs > IAVF_MAX_NUM_QUEUES_DFLT) {
}
rxq->max_pkt_len = max_pkt_len;
- if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
+ if ((dev_data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ||
rxq->max_pkt_len > buf_size) {
dev_data->scattered_rx = 1;
}
dev_info->flow_type_rss_offloads = IAVF_RSS_OFFLOAD_ALL;
dev_info->max_mac_addrs = IAVF_NUM_MACADDR_MAX;
dev_info->rx_offload_capa =
- DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_QINQ_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_VLAN_FILTER |
- DEV_RX_OFFLOAD_RSS_HASH;
+ RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+ RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+ RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_SCATTER |
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+ RTE_ETH_RX_OFFLOAD_RSS_HASH;
dev_info->tx_offload_capa =
- DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_QINQ_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_SCTP_CKSUM |
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO |
- DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GRE_TNL_TSO |
- DEV_TX_OFFLOAD_IPIP_TNL_TSO |
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
- DEV_TX_OFFLOAD_MULTI_SEGS |
- DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+ RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_TSO |
+ RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_CRC)
- dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_KEEP_CRC;
+ dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_free_thresh = IAVF_DEFAULT_RX_FREE_THRESH,
*/
switch (vf->link_speed) {
case 10:
- new_link.link_speed = ETH_SPEED_NUM_10M;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_10M;
break;
case 100:
- new_link.link_speed = ETH_SPEED_NUM_100M;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_100M;
break;
case 1000:
- new_link.link_speed = ETH_SPEED_NUM_1G;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_1G;
break;
case 10000:
- new_link.link_speed = ETH_SPEED_NUM_10G;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_10G;
break;
case 20000:
- new_link.link_speed = ETH_SPEED_NUM_20G;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_20G;
break;
case 25000:
- new_link.link_speed = ETH_SPEED_NUM_25G;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_25G;
break;
case 40000:
- new_link.link_speed = ETH_SPEED_NUM_40G;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_40G;
break;
case 50000:
- new_link.link_speed = ETH_SPEED_NUM_50G;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_50G;
break;
case 100000:
- new_link.link_speed = ETH_SPEED_NUM_100G;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_100G;
break;
default:
- new_link.link_speed = ETH_SPEED_NUM_NONE;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
break;
}
- new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
- new_link.link_status = vf->link_up ? ETH_LINK_UP :
- ETH_LINK_DOWN;
+ new_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+ new_link.link_status = vf->link_up ? RTE_ETH_LINK_UP :
+ RTE_ETH_LINK_DOWN;
new_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
- ETH_LINK_SPEED_FIXED);
+ RTE_ETH_LINK_SPEED_FIXED);
return rte_eth_linkstatus_set(dev, &new_link);
}
bool enable;
int err;
- if (mask & ETH_VLAN_FILTER_MASK) {
- enable = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER);
+ if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+ enable = !!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER);
iavf_iterate_vlan_filters_v2(dev, enable);
}
- if (mask & ETH_VLAN_STRIP_MASK) {
- enable = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+ if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+ enable = !!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
err = iavf_config_vlan_strip_v2(adapter, enable);
/* If not support, the stripping is already disabled by PF */
return -ENOTSUP;
/* Vlan stripping setting */
- if (mask & ETH_VLAN_STRIP_MASK) {
+ if (mask & RTE_ETH_VLAN_STRIP_MASK) {
/* Enable or disable VLAN stripping */
- if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
err = iavf_enable_vlan_strip(adapter);
else
err = iavf_disable_vlan_strip(adapter);
rte_memcpy(lut, vf->rss_lut, reta_size);
for (i = 0; i < reta_size; i++) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
if (reta_conf[idx].mask & (1ULL << shift))
lut[i] = reta_conf[idx].reta[shift];
}
}
for (i = 0; i < reta_size; i++) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
if (reta_conf[idx].mask & (1ULL << shift))
reta_conf[idx].reta[shift] = vf->rss_lut[i];
}
ret = iavf_query_stats(adapter, &pstats);
if (ret == 0) {
uint8_t crc_stats_len = (dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_KEEP_CRC) ? 0 :
+ RTE_ETH_RX_OFFLOAD_KEEP_CRC) ? 0 :
RTE_ETHER_CRC_LEN;
iavf_update_stats(vsi, pstats);
stats->ipackets = pstats->rx_unicast + pstats->rx_multicast +
/* rss type super set */
/* IPv4 outer */
-#define IAVF_RSS_TYPE_OUTER_IPV4 (ETH_RSS_ETH | ETH_RSS_IPV4 | \
- ETH_RSS_FRAG_IPV4 | \
- ETH_RSS_IPV4_CHKSUM)
+#define IAVF_RSS_TYPE_OUTER_IPV4 (RTE_ETH_RSS_ETH | RTE_ETH_RSS_IPV4 | \
+ RTE_ETH_RSS_FRAG_IPV4 | \
+ RTE_ETH_RSS_IPV4_CHKSUM)
#define IAVF_RSS_TYPE_OUTER_IPV4_UDP (IAVF_RSS_TYPE_OUTER_IPV4 | \
- ETH_RSS_NONFRAG_IPV4_UDP | \
- ETH_RSS_L4_CHKSUM)
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+ RTE_ETH_RSS_L4_CHKSUM)
#define IAVF_RSS_TYPE_OUTER_IPV4_TCP (IAVF_RSS_TYPE_OUTER_IPV4 | \
- ETH_RSS_NONFRAG_IPV4_TCP | \
- ETH_RSS_L4_CHKSUM)
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+ RTE_ETH_RSS_L4_CHKSUM)
#define IAVF_RSS_TYPE_OUTER_IPV4_SCTP (IAVF_RSS_TYPE_OUTER_IPV4 | \
- ETH_RSS_NONFRAG_IPV4_SCTP | \
- ETH_RSS_L4_CHKSUM)
+ RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+ RTE_ETH_RSS_L4_CHKSUM)
/* IPv6 outer */
-#define IAVF_RSS_TYPE_OUTER_IPV6 (ETH_RSS_ETH | ETH_RSS_IPV6)
+#define IAVF_RSS_TYPE_OUTER_IPV6 (RTE_ETH_RSS_ETH | RTE_ETH_RSS_IPV6)
#define IAVF_RSS_TYPE_OUTER_IPV6_FRAG (IAVF_RSS_TYPE_OUTER_IPV6 | \
- ETH_RSS_FRAG_IPV6)
+ RTE_ETH_RSS_FRAG_IPV6)
#define IAVF_RSS_TYPE_OUTER_IPV6_UDP (IAVF_RSS_TYPE_OUTER_IPV6 | \
- ETH_RSS_NONFRAG_IPV6_UDP | \
- ETH_RSS_L4_CHKSUM)
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+ RTE_ETH_RSS_L4_CHKSUM)
#define IAVF_RSS_TYPE_OUTER_IPV6_TCP (IAVF_RSS_TYPE_OUTER_IPV6 | \
- ETH_RSS_NONFRAG_IPV6_TCP | \
- ETH_RSS_L4_CHKSUM)
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+ RTE_ETH_RSS_L4_CHKSUM)
#define IAVF_RSS_TYPE_OUTER_IPV6_SCTP (IAVF_RSS_TYPE_OUTER_IPV6 | \
- ETH_RSS_NONFRAG_IPV6_SCTP | \
- ETH_RSS_L4_CHKSUM)
+ RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
+ RTE_ETH_RSS_L4_CHKSUM)
/* VLAN IPV4 */
#define IAVF_RSS_TYPE_VLAN_IPV4 (IAVF_RSS_TYPE_OUTER_IPV4 | \
- ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+ RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
#define IAVF_RSS_TYPE_VLAN_IPV4_UDP (IAVF_RSS_TYPE_OUTER_IPV4_UDP | \
- ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+ RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
#define IAVF_RSS_TYPE_VLAN_IPV4_TCP (IAVF_RSS_TYPE_OUTER_IPV4_TCP | \
- ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+ RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
#define IAVF_RSS_TYPE_VLAN_IPV4_SCTP (IAVF_RSS_TYPE_OUTER_IPV4_SCTP | \
- ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+ RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
/* VLAN IPv6 */
#define IAVF_RSS_TYPE_VLAN_IPV6 (IAVF_RSS_TYPE_OUTER_IPV6 | \
- ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+ RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
#define IAVF_RSS_TYPE_VLAN_IPV6_FRAG (IAVF_RSS_TYPE_OUTER_IPV6_FRAG | \
- ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+ RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
#define IAVF_RSS_TYPE_VLAN_IPV6_UDP (IAVF_RSS_TYPE_OUTER_IPV6_UDP | \
- ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+ RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
#define IAVF_RSS_TYPE_VLAN_IPV6_TCP (IAVF_RSS_TYPE_OUTER_IPV6_TCP | \
- ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+ RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
#define IAVF_RSS_TYPE_VLAN_IPV6_SCTP (IAVF_RSS_TYPE_OUTER_IPV6_SCTP | \
- ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+ RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
/* IPv4 inner */
-#define IAVF_RSS_TYPE_INNER_IPV4 ETH_RSS_IPV4
-#define IAVF_RSS_TYPE_INNER_IPV4_UDP (ETH_RSS_IPV4 | \
- ETH_RSS_NONFRAG_IPV4_UDP)
-#define IAVF_RSS_TYPE_INNER_IPV4_TCP (ETH_RSS_IPV4 | \
- ETH_RSS_NONFRAG_IPV4_TCP)
-#define IAVF_RSS_TYPE_INNER_IPV4_SCTP (ETH_RSS_IPV4 | \
- ETH_RSS_NONFRAG_IPV4_SCTP)
+#define IAVF_RSS_TYPE_INNER_IPV4 RTE_ETH_RSS_IPV4
+#define IAVF_RSS_TYPE_INNER_IPV4_UDP (RTE_ETH_RSS_IPV4 | \
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP)
+#define IAVF_RSS_TYPE_INNER_IPV4_TCP (RTE_ETH_RSS_IPV4 | \
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP)
+#define IAVF_RSS_TYPE_INNER_IPV4_SCTP (RTE_ETH_RSS_IPV4 | \
+ RTE_ETH_RSS_NONFRAG_IPV4_SCTP)
/* IPv6 inner */
-#define IAVF_RSS_TYPE_INNER_IPV6 ETH_RSS_IPV6
-#define IAVF_RSS_TYPE_INNER_IPV6_UDP (ETH_RSS_IPV6 | \
- ETH_RSS_NONFRAG_IPV6_UDP)
-#define IAVF_RSS_TYPE_INNER_IPV6_TCP (ETH_RSS_IPV6 | \
- ETH_RSS_NONFRAG_IPV6_TCP)
-#define IAVF_RSS_TYPE_INNER_IPV6_SCTP (ETH_RSS_IPV6 | \
- ETH_RSS_NONFRAG_IPV6_SCTP)
+#define IAVF_RSS_TYPE_INNER_IPV6 RTE_ETH_RSS_IPV6
+#define IAVF_RSS_TYPE_INNER_IPV6_UDP (RTE_ETH_RSS_IPV6 | \
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP)
+#define IAVF_RSS_TYPE_INNER_IPV6_TCP (RTE_ETH_RSS_IPV6 | \
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP)
+#define IAVF_RSS_TYPE_INNER_IPV6_SCTP (RTE_ETH_RSS_IPV6 | \
+ RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
/* GTPU IPv4 */
#define IAVF_RSS_TYPE_GTPU_IPV4 (IAVF_RSS_TYPE_INNER_IPV4 | \
- ETH_RSS_GTPU)
+ RTE_ETH_RSS_GTPU)
#define IAVF_RSS_TYPE_GTPU_IPV4_UDP (IAVF_RSS_TYPE_INNER_IPV4_UDP | \
- ETH_RSS_GTPU)
+ RTE_ETH_RSS_GTPU)
#define IAVF_RSS_TYPE_GTPU_IPV4_TCP (IAVF_RSS_TYPE_INNER_IPV4_TCP | \
- ETH_RSS_GTPU)
+ RTE_ETH_RSS_GTPU)
/* GTPU IPv6 */
#define IAVF_RSS_TYPE_GTPU_IPV6 (IAVF_RSS_TYPE_INNER_IPV6 | \
- ETH_RSS_GTPU)
+ RTE_ETH_RSS_GTPU)
#define IAVF_RSS_TYPE_GTPU_IPV6_UDP (IAVF_RSS_TYPE_INNER_IPV6_UDP | \
- ETH_RSS_GTPU)
+ RTE_ETH_RSS_GTPU)
#define IAVF_RSS_TYPE_GTPU_IPV6_TCP (IAVF_RSS_TYPE_INNER_IPV6_TCP | \
- ETH_RSS_GTPU)
+ RTE_ETH_RSS_GTPU)
/* ESP, AH, L2TPV3 and PFCP */
-#define IAVF_RSS_TYPE_IPV4_ESP (ETH_RSS_ESP | ETH_RSS_IPV4)
-#define IAVF_RSS_TYPE_IPV4_AH (ETH_RSS_AH | ETH_RSS_IPV4)
-#define IAVF_RSS_TYPE_IPV6_ESP (ETH_RSS_ESP | ETH_RSS_IPV6)
-#define IAVF_RSS_TYPE_IPV6_AH (ETH_RSS_AH | ETH_RSS_IPV6)
-#define IAVF_RSS_TYPE_IPV4_L2TPV3 (ETH_RSS_L2TPV3 | ETH_RSS_IPV4)
-#define IAVF_RSS_TYPE_IPV6_L2TPV3 (ETH_RSS_L2TPV3 | ETH_RSS_IPV6)
-#define IAVF_RSS_TYPE_IPV4_PFCP (ETH_RSS_PFCP | ETH_RSS_IPV4)
-#define IAVF_RSS_TYPE_IPV6_PFCP (ETH_RSS_PFCP | ETH_RSS_IPV6)
+#define IAVF_RSS_TYPE_IPV4_ESP (RTE_ETH_RSS_ESP | RTE_ETH_RSS_IPV4)
+#define IAVF_RSS_TYPE_IPV4_AH (RTE_ETH_RSS_AH | RTE_ETH_RSS_IPV4)
+#define IAVF_RSS_TYPE_IPV6_ESP (RTE_ETH_RSS_ESP | RTE_ETH_RSS_IPV6)
+#define IAVF_RSS_TYPE_IPV6_AH (RTE_ETH_RSS_AH | RTE_ETH_RSS_IPV6)
+#define IAVF_RSS_TYPE_IPV4_L2TPV3 (RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_IPV4)
+#define IAVF_RSS_TYPE_IPV6_L2TPV3 (RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_IPV6)
+#define IAVF_RSS_TYPE_IPV4_PFCP (RTE_ETH_RSS_PFCP | RTE_ETH_RSS_IPV4)
+#define IAVF_RSS_TYPE_IPV6_PFCP (RTE_ETH_RSS_PFCP | RTE_ETH_RSS_IPV6)
/**
* Supported pattern for hash.
{iavf_pattern_eth_vlan_ipv4_udp, IAVF_RSS_TYPE_VLAN_IPV4_UDP, &outer_ipv4_udp_tmplt},
{iavf_pattern_eth_vlan_ipv4_tcp, IAVF_RSS_TYPE_VLAN_IPV4_TCP, &outer_ipv4_tcp_tmplt},
{iavf_pattern_eth_vlan_ipv4_sctp, IAVF_RSS_TYPE_VLAN_IPV4_SCTP, &outer_ipv4_sctp_tmplt},
- {iavf_pattern_eth_ipv4_gtpu, ETH_RSS_IPV4, &outer_ipv4_udp_tmplt},
+ {iavf_pattern_eth_ipv4_gtpu, RTE_ETH_RSS_IPV4, &outer_ipv4_udp_tmplt},
{iavf_pattern_eth_ipv4_gtpu_ipv4, IAVF_RSS_TYPE_GTPU_IPV4, &inner_ipv4_tmplt},
{iavf_pattern_eth_ipv4_gtpu_ipv4_udp, IAVF_RSS_TYPE_GTPU_IPV4_UDP, &inner_ipv4_udp_tmplt},
{iavf_pattern_eth_ipv4_gtpu_ipv4_tcp, IAVF_RSS_TYPE_GTPU_IPV4_TCP, &inner_ipv4_tcp_tmplt},
{iavf_pattern_eth_ipv4_ah, IAVF_RSS_TYPE_IPV4_AH, &ipv4_ah_tmplt},
{iavf_pattern_eth_ipv4_l2tpv3, IAVF_RSS_TYPE_IPV4_L2TPV3, &ipv4_l2tpv3_tmplt},
{iavf_pattern_eth_ipv4_pfcp, IAVF_RSS_TYPE_IPV4_PFCP, &ipv4_pfcp_tmplt},
- {iavf_pattern_eth_ipv4_gtpc, ETH_RSS_IPV4, &ipv4_udp_gtpc_tmplt},
- {iavf_pattern_eth_ecpri, ETH_RSS_ECPRI, ð_ecpri_tmplt},
- {iavf_pattern_eth_ipv4_ecpri, ETH_RSS_ECPRI, &ipv4_ecpri_tmplt},
+ {iavf_pattern_eth_ipv4_gtpc, RTE_ETH_RSS_IPV4, &ipv4_udp_gtpc_tmplt},
+ {iavf_pattern_eth_ecpri, RTE_ETH_RSS_ECPRI, ð_ecpri_tmplt},
+ {iavf_pattern_eth_ipv4_ecpri, RTE_ETH_RSS_ECPRI, &ipv4_ecpri_tmplt},
{iavf_pattern_eth_ipv4_gre_ipv4, IAVF_RSS_TYPE_INNER_IPV4, &inner_ipv4_tmplt},
{iavf_pattern_eth_ipv6_gre_ipv4, IAVF_RSS_TYPE_INNER_IPV4, &inner_ipv4_tmplt},
{iavf_pattern_eth_ipv4_gre_ipv4_tcp, IAVF_RSS_TYPE_INNER_IPV4_TCP, &inner_ipv4_tcp_tmplt},
{iavf_pattern_eth_vlan_ipv6_udp, IAVF_RSS_TYPE_VLAN_IPV6_UDP, &outer_ipv6_udp_tmplt},
{iavf_pattern_eth_vlan_ipv6_tcp, IAVF_RSS_TYPE_VLAN_IPV6_TCP, &outer_ipv6_tcp_tmplt},
{iavf_pattern_eth_vlan_ipv6_sctp, IAVF_RSS_TYPE_VLAN_IPV6_SCTP, &outer_ipv6_sctp_tmplt},
- {iavf_pattern_eth_ipv6_gtpu, ETH_RSS_IPV6, &outer_ipv6_udp_tmplt},
+ {iavf_pattern_eth_ipv6_gtpu, RTE_ETH_RSS_IPV6, &outer_ipv6_udp_tmplt},
{iavf_pattern_eth_ipv4_gtpu_ipv6, IAVF_RSS_TYPE_GTPU_IPV6, &inner_ipv6_tmplt},
{iavf_pattern_eth_ipv4_gtpu_ipv6_udp, IAVF_RSS_TYPE_GTPU_IPV6_UDP, &inner_ipv6_udp_tmplt},
{iavf_pattern_eth_ipv4_gtpu_ipv6_tcp, IAVF_RSS_TYPE_GTPU_IPV6_TCP, &inner_ipv6_tcp_tmplt},
{iavf_pattern_eth_ipv6_ah, IAVF_RSS_TYPE_IPV6_AH, &ipv6_ah_tmplt},
{iavf_pattern_eth_ipv6_l2tpv3, IAVF_RSS_TYPE_IPV6_L2TPV3, &ipv6_l2tpv3_tmplt},
{iavf_pattern_eth_ipv6_pfcp, IAVF_RSS_TYPE_IPV6_PFCP, &ipv6_pfcp_tmplt},
- {iavf_pattern_eth_ipv6_gtpc, ETH_RSS_IPV6, &ipv6_udp_gtpc_tmplt},
+ {iavf_pattern_eth_ipv6_gtpc, RTE_ETH_RSS_IPV6, &ipv6_udp_gtpc_tmplt},
{iavf_pattern_eth_ipv4_gre_ipv6, IAVF_RSS_TYPE_INNER_IPV6, &inner_ipv6_tmplt},
{iavf_pattern_eth_ipv6_gre_ipv6, IAVF_RSS_TYPE_INNER_IPV6, &inner_ipv6_tmplt},
{iavf_pattern_eth_ipv4_gre_ipv6_tcp, IAVF_RSS_TYPE_INNER_IPV6_TCP, &inner_ipv6_tcp_tmplt},
struct virtchnl_rss_cfg rss_cfg;
#define IAVF_RSS_HF_ALL ( \
- ETH_RSS_IPV4 | \
- ETH_RSS_IPV6 | \
- ETH_RSS_NONFRAG_IPV4_UDP | \
- ETH_RSS_NONFRAG_IPV6_UDP | \
- ETH_RSS_NONFRAG_IPV4_TCP | \
- ETH_RSS_NONFRAG_IPV6_TCP | \
- ETH_RSS_NONFRAG_IPV4_SCTP | \
- ETH_RSS_NONFRAG_IPV6_SCTP)
+ RTE_ETH_RSS_IPV4 | \
+ RTE_ETH_RSS_IPV6 | \
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
rss_cfg.rss_algorithm = VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC;
- if (rss_hf & ETH_RSS_IPV4) {
+ if (rss_hf & RTE_ETH_RSS_IPV4) {
rss_cfg.proto_hdrs = inner_ipv4_tmplt;
iavf_add_del_rss_cfg(ad, &rss_cfg, add);
}
- if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) {
rss_cfg.proto_hdrs = inner_ipv4_udp_tmplt;
iavf_add_del_rss_cfg(ad, &rss_cfg, add);
}
- if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) {
rss_cfg.proto_hdrs = inner_ipv4_tcp_tmplt;
iavf_add_del_rss_cfg(ad, &rss_cfg, add);
}
- if (rss_hf & ETH_RSS_NONFRAG_IPV4_SCTP) {
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_SCTP) {
rss_cfg.proto_hdrs = inner_ipv4_sctp_tmplt;
iavf_add_del_rss_cfg(ad, &rss_cfg, add);
}
- if (rss_hf & ETH_RSS_IPV6) {
+ if (rss_hf & RTE_ETH_RSS_IPV6) {
rss_cfg.proto_hdrs = inner_ipv6_tmplt;
iavf_add_del_rss_cfg(ad, &rss_cfg, add);
}
- if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) {
rss_cfg.proto_hdrs = inner_ipv6_udp_tmplt;
iavf_add_del_rss_cfg(ad, &rss_cfg, add);
}
- if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) {
rss_cfg.proto_hdrs = inner_ipv6_tcp_tmplt;
iavf_add_del_rss_cfg(ad, &rss_cfg, add);
}
- if (rss_hf & ETH_RSS_NONFRAG_IPV6_SCTP) {
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_SCTP) {
rss_cfg.proto_hdrs = inner_ipv6_sctp_tmplt;
iavf_add_del_rss_cfg(ad, &rss_cfg, add);
}
hdr = &proto_hdrs->proto_hdr[i];
switch (hdr->type) {
case VIRTCHNL_PROTO_HDR_ETH:
- if (!(rss_type & ETH_RSS_ETH))
+ if (!(rss_type & RTE_ETH_RSS_ETH))
hdr->field_selector = 0;
- else if (rss_type & ETH_RSS_L2_SRC_ONLY)
+ else if (rss_type & RTE_ETH_RSS_L2_SRC_ONLY)
REFINE_PROTO_FLD(DEL, ETH_DST);
- else if (rss_type & ETH_RSS_L2_DST_ONLY)
+ else if (rss_type & RTE_ETH_RSS_L2_DST_ONLY)
REFINE_PROTO_FLD(DEL, ETH_SRC);
break;
case VIRTCHNL_PROTO_HDR_IPV4:
if (rss_type &
- (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
- ETH_RSS_NONFRAG_IPV4_UDP |
- ETH_RSS_NONFRAG_IPV4_TCP |
- ETH_RSS_NONFRAG_IPV4_SCTP)) {
- if (rss_type & ETH_RSS_FRAG_IPV4) {
+ (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+ RTE_ETH_RSS_NONFRAG_IPV4_SCTP)) {
+ if (rss_type & RTE_ETH_RSS_FRAG_IPV4) {
iavf_hash_add_fragment_hdr(proto_hdrs, i + 1);
- } else if (rss_type & ETH_RSS_L3_SRC_ONLY) {
+ } else if (rss_type & RTE_ETH_RSS_L3_SRC_ONLY) {
REFINE_PROTO_FLD(DEL, IPV4_DST);
- } else if (rss_type & ETH_RSS_L3_DST_ONLY) {
+ } else if (rss_type & RTE_ETH_RSS_L3_DST_ONLY) {
REFINE_PROTO_FLD(DEL, IPV4_SRC);
} else if (rss_type &
- (ETH_RSS_L4_SRC_ONLY |
- ETH_RSS_L4_DST_ONLY)) {
+ (RTE_ETH_RSS_L4_SRC_ONLY |
+ RTE_ETH_RSS_L4_DST_ONLY)) {
REFINE_PROTO_FLD(DEL, IPV4_DST);
REFINE_PROTO_FLD(DEL, IPV4_SRC);
}
hdr->field_selector = 0;
}
- if (rss_type & ETH_RSS_IPV4_CHKSUM)
+ if (rss_type & RTE_ETH_RSS_IPV4_CHKSUM)
REFINE_PROTO_FLD(ADD, IPV4_CHKSUM);
break;
case VIRTCHNL_PROTO_HDR_IPV4_FRAG:
if (rss_type &
- (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
- ETH_RSS_NONFRAG_IPV4_UDP |
- ETH_RSS_NONFRAG_IPV4_TCP |
- ETH_RSS_NONFRAG_IPV4_SCTP)) {
- if (rss_type & ETH_RSS_FRAG_IPV4)
+ (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+ RTE_ETH_RSS_NONFRAG_IPV4_SCTP)) {
+ if (rss_type & RTE_ETH_RSS_FRAG_IPV4)
REFINE_PROTO_FLD(ADD, IPV4_FRAG_PKID);
} else {
hdr->field_selector = 0;
}
- if (rss_type & ETH_RSS_IPV4_CHKSUM)
+ if (rss_type & RTE_ETH_RSS_IPV4_CHKSUM)
REFINE_PROTO_FLD(ADD, IPV4_CHKSUM);
break;
case VIRTCHNL_PROTO_HDR_IPV6:
if (rss_type &
- (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
- ETH_RSS_NONFRAG_IPV6_UDP |
- ETH_RSS_NONFRAG_IPV6_TCP |
- ETH_RSS_NONFRAG_IPV6_SCTP)) {
- if (rss_type & ETH_RSS_L3_SRC_ONLY) {
+ (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+ RTE_ETH_RSS_NONFRAG_IPV6_SCTP)) {
+ if (rss_type & RTE_ETH_RSS_L3_SRC_ONLY) {
REFINE_PROTO_FLD(DEL, IPV6_DST);
- } else if (rss_type & ETH_RSS_L3_DST_ONLY) {
+ } else if (rss_type & RTE_ETH_RSS_L3_DST_ONLY) {
REFINE_PROTO_FLD(DEL, IPV6_SRC);
} else if (rss_type &
- (ETH_RSS_L4_SRC_ONLY |
- ETH_RSS_L4_DST_ONLY)) {
+ (RTE_ETH_RSS_L4_SRC_ONLY |
+ RTE_ETH_RSS_L4_DST_ONLY)) {
REFINE_PROTO_FLD(DEL, IPV6_DST);
REFINE_PROTO_FLD(DEL, IPV6_SRC);
}
}
break;
case VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG:
- if (rss_type & ETH_RSS_FRAG_IPV6)
+ if (rss_type & RTE_ETH_RSS_FRAG_IPV6)
REFINE_PROTO_FLD(ADD, IPV6_EH_FRAG_PKID);
else
hdr->field_selector = 0;
break;
case VIRTCHNL_PROTO_HDR_UDP:
if (rss_type &
- (ETH_RSS_NONFRAG_IPV4_UDP |
- ETH_RSS_NONFRAG_IPV6_UDP)) {
- if (rss_type & ETH_RSS_L4_SRC_ONLY)
+ (RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP)) {
+ if (rss_type & RTE_ETH_RSS_L4_SRC_ONLY)
REFINE_PROTO_FLD(DEL, UDP_DST_PORT);
- else if (rss_type & ETH_RSS_L4_DST_ONLY)
+ else if (rss_type & RTE_ETH_RSS_L4_DST_ONLY)
REFINE_PROTO_FLD(DEL, UDP_SRC_PORT);
else if (rss_type &
- (ETH_RSS_L3_SRC_ONLY |
- ETH_RSS_L3_DST_ONLY))
+ (RTE_ETH_RSS_L3_SRC_ONLY |
+ RTE_ETH_RSS_L3_DST_ONLY))
hdr->field_selector = 0;
} else {
hdr->field_selector = 0;
}
- if (rss_type & ETH_RSS_L4_CHKSUM)
+ if (rss_type & RTE_ETH_RSS_L4_CHKSUM)
REFINE_PROTO_FLD(ADD, UDP_CHKSUM);
break;
case VIRTCHNL_PROTO_HDR_TCP:
if (rss_type &
- (ETH_RSS_NONFRAG_IPV4_TCP |
- ETH_RSS_NONFRAG_IPV6_TCP)) {
- if (rss_type & ETH_RSS_L4_SRC_ONLY)
+ (RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP)) {
+ if (rss_type & RTE_ETH_RSS_L4_SRC_ONLY)
REFINE_PROTO_FLD(DEL, TCP_DST_PORT);
- else if (rss_type & ETH_RSS_L4_DST_ONLY)
+ else if (rss_type & RTE_ETH_RSS_L4_DST_ONLY)
REFINE_PROTO_FLD(DEL, TCP_SRC_PORT);
else if (rss_type &
- (ETH_RSS_L3_SRC_ONLY |
- ETH_RSS_L3_DST_ONLY))
+ (RTE_ETH_RSS_L3_SRC_ONLY |
+ RTE_ETH_RSS_L3_DST_ONLY))
hdr->field_selector = 0;
} else {
hdr->field_selector = 0;
}
- if (rss_type & ETH_RSS_L4_CHKSUM)
+ if (rss_type & RTE_ETH_RSS_L4_CHKSUM)
REFINE_PROTO_FLD(ADD, TCP_CHKSUM);
break;
case VIRTCHNL_PROTO_HDR_SCTP:
if (rss_type &
- (ETH_RSS_NONFRAG_IPV4_SCTP |
- ETH_RSS_NONFRAG_IPV6_SCTP)) {
- if (rss_type & ETH_RSS_L4_SRC_ONLY)
+ (RTE_ETH_RSS_NONFRAG_IPV4_SCTP |
+ RTE_ETH_RSS_NONFRAG_IPV6_SCTP)) {
+ if (rss_type & RTE_ETH_RSS_L4_SRC_ONLY)
REFINE_PROTO_FLD(DEL, SCTP_DST_PORT);
- else if (rss_type & ETH_RSS_L4_DST_ONLY)
+ else if (rss_type & RTE_ETH_RSS_L4_DST_ONLY)
REFINE_PROTO_FLD(DEL, SCTP_SRC_PORT);
else if (rss_type &
- (ETH_RSS_L3_SRC_ONLY |
- ETH_RSS_L3_DST_ONLY))
+ (RTE_ETH_RSS_L3_SRC_ONLY |
+ RTE_ETH_RSS_L3_DST_ONLY))
hdr->field_selector = 0;
} else {
hdr->field_selector = 0;
}
- if (rss_type & ETH_RSS_L4_CHKSUM)
+ if (rss_type & RTE_ETH_RSS_L4_CHKSUM)
REFINE_PROTO_FLD(ADD, SCTP_CHKSUM);
break;
case VIRTCHNL_PROTO_HDR_S_VLAN:
- if (!(rss_type & ETH_RSS_S_VLAN))
+ if (!(rss_type & RTE_ETH_RSS_S_VLAN))
hdr->field_selector = 0;
break;
case VIRTCHNL_PROTO_HDR_C_VLAN:
- if (!(rss_type & ETH_RSS_C_VLAN))
+ if (!(rss_type & RTE_ETH_RSS_C_VLAN))
hdr->field_selector = 0;
break;
case VIRTCHNL_PROTO_HDR_L2TPV3:
- if (!(rss_type & ETH_RSS_L2TPV3))
+ if (!(rss_type & RTE_ETH_RSS_L2TPV3))
hdr->field_selector = 0;
break;
case VIRTCHNL_PROTO_HDR_ESP:
- if (!(rss_type & ETH_RSS_ESP))
+ if (!(rss_type & RTE_ETH_RSS_ESP))
hdr->field_selector = 0;
break;
case VIRTCHNL_PROTO_HDR_AH:
- if (!(rss_type & ETH_RSS_AH))
+ if (!(rss_type & RTE_ETH_RSS_AH))
hdr->field_selector = 0;
break;
case VIRTCHNL_PROTO_HDR_PFCP:
- if (!(rss_type & ETH_RSS_PFCP))
+ if (!(rss_type & RTE_ETH_RSS_PFCP))
hdr->field_selector = 0;
break;
case VIRTCHNL_PROTO_HDR_ECPRI:
- if (!(rss_type & ETH_RSS_ECPRI))
+ if (!(rss_type & RTE_ETH_RSS_ECPRI))
hdr->field_selector = 0;
break;
default:
struct virtchnl_proto_hdr *hdr;
int i;
- if (!(rss_type & ETH_RSS_GTPU))
+ if (!(rss_type & RTE_ETH_RSS_GTPU))
return;
for (i = 0; i < proto_hdrs->count; i++) {
}
static uint64_t invalid_rss_comb[] = {
- ETH_RSS_IPV4 | ETH_RSS_NONFRAG_IPV4_UDP,
- ETH_RSS_IPV4 | ETH_RSS_NONFRAG_IPV4_TCP,
- ETH_RSS_IPV6 | ETH_RSS_NONFRAG_IPV6_UDP,
- ETH_RSS_IPV6 | ETH_RSS_NONFRAG_IPV6_TCP,
+ RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_NONFRAG_IPV4_UDP,
+ RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_NONFRAG_IPV4_TCP,
+ RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_UDP,
+ RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_TCP,
RTE_ETH_RSS_L3_PRE32 | RTE_ETH_RSS_L3_PRE40 |
RTE_ETH_RSS_L3_PRE48 | RTE_ETH_RSS_L3_PRE56 |
RTE_ETH_RSS_L3_PRE96
uint64_t type;
};
-#define VALID_RSS_IPV4_L4 (ETH_RSS_NONFRAG_IPV4_UDP | \
- ETH_RSS_NONFRAG_IPV4_TCP | \
- ETH_RSS_NONFRAG_IPV4_SCTP)
+#define VALID_RSS_IPV4_L4 (RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_SCTP)
-#define VALID_RSS_IPV6_L4 (ETH_RSS_NONFRAG_IPV6_UDP | \
- ETH_RSS_NONFRAG_IPV6_TCP | \
- ETH_RSS_NONFRAG_IPV6_SCTP)
+#define VALID_RSS_IPV6_L4 (RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
-#define VALID_RSS_IPV4 (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | \
+#define VALID_RSS_IPV4 (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | \
VALID_RSS_IPV4_L4)
-#define VALID_RSS_IPV6 (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | \
+#define VALID_RSS_IPV6 (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | \
VALID_RSS_IPV6_L4)
#define VALID_RSS_L3 (VALID_RSS_IPV4 | VALID_RSS_IPV6)
#define VALID_RSS_L4 (VALID_RSS_IPV4_L4 | VALID_RSS_IPV6_L4)
-#define VALID_RSS_ATTR (ETH_RSS_L3_SRC_ONLY | \
- ETH_RSS_L3_DST_ONLY | \
- ETH_RSS_L4_SRC_ONLY | \
- ETH_RSS_L4_DST_ONLY | \
- ETH_RSS_L2_SRC_ONLY | \
- ETH_RSS_L2_DST_ONLY | \
+#define VALID_RSS_ATTR (RTE_ETH_RSS_L3_SRC_ONLY | \
+ RTE_ETH_RSS_L3_DST_ONLY | \
+ RTE_ETH_RSS_L4_SRC_ONLY | \
+ RTE_ETH_RSS_L4_DST_ONLY | \
+ RTE_ETH_RSS_L2_SRC_ONLY | \
+ RTE_ETH_RSS_L2_DST_ONLY | \
RTE_ETH_RSS_L3_PRE64)
#define INVALID_RSS_ATTR (RTE_ETH_RSS_L3_PRE32 | \
RTE_ETH_RSS_L3_PRE96)
static struct rss_attr_type rss_attr_to_valid_type[] = {
- {ETH_RSS_L2_SRC_ONLY | ETH_RSS_L2_DST_ONLY, ETH_RSS_ETH},
- {ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY, VALID_RSS_L3},
- {ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY, VALID_RSS_L4},
+ {RTE_ETH_RSS_L2_SRC_ONLY | RTE_ETH_RSS_L2_DST_ONLY, RTE_ETH_RSS_ETH},
+ {RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY, VALID_RSS_L3},
+ {RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY, VALID_RSS_L4},
/* current ipv6 prefix only supports prefix 64 bits*/
{RTE_ETH_RSS_L3_PRE64, VALID_RSS_IPV6},
{INVALID_RSS_ATTR, 0}
* hash function.
*/
if (rss_func == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ) {
- if (rss_type & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY |
- ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY))
+ if (rss_type & (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY |
+ RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY))
return true;
if (!(rss_type &
- (ETH_RSS_IPV4 | ETH_RSS_IPV6 |
- ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP |
- ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV6_TCP |
- ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_NONFRAG_IPV6_SCTP)))
+ (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6 |
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+ RTE_ETH_RSS_NONFRAG_IPV4_SCTP | RTE_ETH_RSS_NONFRAG_IPV6_SCTP)))
return true;
}
rxq->vsi = vsi;
rxq->offloads = offloads;
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
rxq->crc_len = RTE_ETHER_CRC_LEN;
else
rxq->crc_len = 0;
#define IAVF_VPMD_TX_MAX_FREE_BUF 64
#define IAVF_TX_NO_VECTOR_FLAGS ( \
- DEV_TX_OFFLOAD_MULTI_SEGS | \
- DEV_TX_OFFLOAD_TCP_TSO)
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS | \
+ RTE_ETH_TX_OFFLOAD_TCP_TSO)
#define IAVF_TX_VECTOR_OFFLOAD ( \
- DEV_TX_OFFLOAD_VLAN_INSERT | \
- DEV_TX_OFFLOAD_QINQ_INSERT | \
- DEV_TX_OFFLOAD_IPV4_CKSUM | \
- DEV_TX_OFFLOAD_SCTP_CKSUM | \
- DEV_TX_OFFLOAD_UDP_CKSUM | \
- DEV_TX_OFFLOAD_TCP_CKSUM)
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT | \
+ RTE_ETH_TX_OFFLOAD_QINQ_INSERT | \
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
#define IAVF_RX_VECTOR_OFFLOAD ( \
- DEV_RX_OFFLOAD_CHECKSUM | \
- DEV_RX_OFFLOAD_SCTP_CKSUM | \
- DEV_RX_OFFLOAD_VLAN | \
- DEV_RX_OFFLOAD_RSS_HASH)
+ RTE_ETH_RX_OFFLOAD_CHECKSUM | \
+ RTE_ETH_RX_OFFLOAD_SCTP_CKSUM | \
+ RTE_ETH_RX_OFFLOAD_VLAN | \
+ RTE_ETH_RX_OFFLOAD_RSS_HASH)
#define IAVF_VECTOR_PATH 0
#define IAVF_VECTOR_OFFLOAD_PATH 1
* needs to load 2nd 16B of each desc for RSS hash parsing,
* will cause performance drop to get into this context.
*/
- if (offloads & DEV_RX_OFFLOAD_RSS_HASH ||
+ if (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH ||
rxq->rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2) {
/* load bottom half of every 32B desc */
const __m128i raw_desc_bh7 =
(_mm256_castsi128_si256(raw_desc_bh0),
raw_desc_bh1, 1);
- if (offloads & DEV_RX_OFFLOAD_RSS_HASH) {
+ if (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH) {
/**
* to shift the 32b RSS hash value to the
* highest 32b of each 128b before mask
* needs to load 2nd 16B of each desc for RSS hash parsing,
* will cause performance drop to get into this context.
*/
- if (offloads & DEV_RX_OFFLOAD_RSS_HASH ||
+ if (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH ||
rxq->rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2) {
/* load bottom half of every 32B desc */
const __m128i raw_desc_bh7 =
(_mm256_castsi128_si256(raw_desc_bh0),
raw_desc_bh1, 1);
- if (offloads & DEV_RX_OFFLOAD_RSS_HASH) {
+ if (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH) {
/**
* to shift the 32b RSS hash value to the
* highest 32b of each 128b before mask
txep = (void *)txq->sw_ring;
txep += txq->next_dd - (n - 1);
- if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
+ if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
struct rte_mempool *mp = txep[0].mbuf->pool;
struct rte_mempool_cache *cache = rte_mempool_default_cache(mp,
rte_lcore_id());
* needs to load 2nd 16B of each desc for RSS hash parsing,
* will cause performance drop to get into this context.
*/
- if (offloads & DEV_RX_OFFLOAD_RSS_HASH) {
+ if (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH) {
/* load bottom half of every 32B desc */
const __m128i raw_desc_bh3 =
_mm_load_si128
PMD_DRV_LOG(DEBUG, "RSS is not supported");
return -ENOTSUP;
}
- if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
+ if (dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_RSS) {
PMD_DRV_LOG(WARNING, "RSS is enabled by PF by default");
/* set all lut items to default queue */
memset(hw->rss_lut, 0, hw->vf_res->rss_lut_size);
}
rxq->max_pkt_len = max_pkt_len;
- if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
+ if ((dev_data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ||
(rxq->max_pkt_len + 2 * ICE_VLAN_TAG_SIZE) > buf_size) {
dev_data->scattered_rx = 1;
}
return ret;
}
- dev->data->dev_link.link_status = ETH_LINK_UP;
+ dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
return 0;
}
}
ice_dcf_add_del_all_mac_addr(&dcf_ad->real_hw, false);
- dev->data->dev_link.link_status = ETH_LINK_DOWN;
+ dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
ad->pf.adapter_stopped = 1;
hw->tm_conf.committed = false;
ad->rx_bulk_alloc_allowed = true;
ad->tx_simple_allowed = true;
- if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
- dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+ if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+ dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
return 0;
}
dev_info->flow_type_rss_offloads = ICE_RSS_OFFLOAD_ALL;
dev_info->rx_offload_capa =
- DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_VLAN_FILTER |
- DEV_RX_OFFLOAD_RSS_HASH;
+ RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+ RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_SCATTER |
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+ RTE_ETH_RX_OFFLOAD_RSS_HASH;
dev_info->tx_offload_capa =
- DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_SCTP_CKSUM |
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO |
- DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GRE_TNL_TSO |
- DEV_TX_OFFLOAD_IPIP_TNL_TSO |
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
- DEV_TX_OFFLOAD_MULTI_SEGS;
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_TSO |
+ RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_thresh = {
*/
switch (hw->link_speed) {
case 10:
- new_link.link_speed = ETH_SPEED_NUM_10M;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_10M;
break;
case 100:
- new_link.link_speed = ETH_SPEED_NUM_100M;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_100M;
break;
case 1000:
- new_link.link_speed = ETH_SPEED_NUM_1G;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_1G;
break;
case 10000:
- new_link.link_speed = ETH_SPEED_NUM_10G;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_10G;
break;
case 20000:
- new_link.link_speed = ETH_SPEED_NUM_20G;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_20G;
break;
case 25000:
- new_link.link_speed = ETH_SPEED_NUM_25G;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_25G;
break;
case 40000:
- new_link.link_speed = ETH_SPEED_NUM_40G;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_40G;
break;
case 50000:
- new_link.link_speed = ETH_SPEED_NUM_50G;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_50G;
break;
case 100000:
- new_link.link_speed = ETH_SPEED_NUM_100G;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_100G;
break;
default:
- new_link.link_speed = ETH_SPEED_NUM_NONE;
+ new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
break;
}
- new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
- new_link.link_status = hw->link_up ? ETH_LINK_UP :
- ETH_LINK_DOWN;
+ new_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+ new_link.link_status = hw->link_up ? RTE_ETH_LINK_UP :
+ RTE_ETH_LINK_DOWN;
new_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
- ETH_LINK_SPEED_FIXED);
+ RTE_ETH_LINK_SPEED_FIXED);
return rte_eth_linkstatus_set(dev, &new_link);
}
return -EINVAL;
switch (udp_tunnel->prot_type) {
- case RTE_TUNNEL_TYPE_VXLAN:
+ case RTE_ETH_TUNNEL_TYPE_VXLAN:
ret = ice_create_tunnel(parent_hw, TNL_VXLAN,
udp_tunnel->udp_port);
break;
- case RTE_TUNNEL_TYPE_ECPRI:
+ case RTE_ETH_TUNNEL_TYPE_ECPRI:
ret = ice_create_tunnel(parent_hw, TNL_ECPRI,
udp_tunnel->udp_port);
break;
return -EINVAL;
switch (udp_tunnel->prot_type) {
- case RTE_TUNNEL_TYPE_VXLAN:
- case RTE_TUNNEL_TYPE_ECPRI:
+ case RTE_ETH_TUNNEL_TYPE_VXLAN:
+ case RTE_ETH_TUNNEL_TYPE_ECPRI:
ret = ice_destroy_tunnel(parent_hw, udp_tunnel->udp_port, 0);
break;
default:
static int
ice_dcf_vf_repr_dev_start(struct rte_eth_dev *dev)
{
- dev->data->dev_link.link_status = ETH_LINK_UP;
+ dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
return 0;
}
static int
ice_dcf_vf_repr_dev_stop(struct rte_eth_dev *dev)
{
- dev->data->dev_link.link_status = ETH_LINK_DOWN;
+ dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
return 0;
}
dev_info->flow_type_rss_offloads = ICE_RSS_OFFLOAD_ALL;
dev_info->rx_offload_capa =
- DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_VLAN_FILTER |
- DEV_RX_OFFLOAD_VLAN_EXTEND |
- DEV_RX_OFFLOAD_RSS_HASH;
+ RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+ RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_SCATTER |
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+ RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+ RTE_ETH_RX_OFFLOAD_RSS_HASH;
dev_info->tx_offload_capa =
- DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_SCTP_CKSUM |
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO |
- DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GRE_TNL_TSO |
- DEV_TX_OFFLOAD_IPIP_TNL_TSO |
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
- DEV_TX_OFFLOAD_MULTI_SEGS;
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_TSO |
+ RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_thresh = {
return -ENOTSUP;
/* Vlan stripping setting */
- if (mask & ETH_VLAN_STRIP_MASK) {
+ if (mask & RTE_ETH_VLAN_STRIP_MASK) {
bool enable = !!(dev_conf->rxmode.offloads &
- DEV_RX_OFFLOAD_VLAN_STRIP);
+ RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
if (enable && repr->outer_vlan_info.port_vlan_ena) {
PMD_DRV_LOG(ERR,
if (!ice_dcf_vlan_offload_ena(repr))
return -ENOTSUP;
- if (vlan_type != ETH_VLAN_TYPE_OUTER) {
+ if (vlan_type != RTE_ETH_VLAN_TYPE_OUTER) {
PMD_DRV_LOG(ERR,
"Can accelerate only outer VLAN in QinQ\n");
return -EINVAL;
if (repr->outer_vlan_info.stripping_ena) {
err = ice_dcf_vf_repr_vlan_offload_set(dev,
- ETH_VLAN_STRIP_MASK);
+ RTE_ETH_VLAN_STRIP_MASK);
if (err) {
PMD_DRV_LOG(ERR,
"Failed to reset VLAN stripping : %d\n",
int err;
err = ice_dcf_vf_repr_vlan_offload_set(vf_rep_eth_dev,
- ETH_VLAN_STRIP_MASK);
+ RTE_ETH_VLAN_STRIP_MASK);
if (err) {
PMD_DRV_LOG(ERR, "Failed to set VLAN offload");
return err;
TAILQ_INIT(&vsi->mac_list);
TAILQ_INIT(&vsi->vlan_list);
- /* Be sync with ETH_RSS_RETA_SIZE_x maximum value definition */
+ /* Be sync with RTE_ETH_RSS_RETA_SIZE_x maximum value definition */
pf->hash_lut_size = hw->func_caps.common_cap.rss_table_size >
- ETH_RSS_RETA_SIZE_512 ? ETH_RSS_RETA_SIZE_512 :
+ RTE_ETH_RSS_RETA_SIZE_512 ? RTE_ETH_RSS_RETA_SIZE_512 :
hw->func_caps.common_cap.rss_table_size;
pf->flags |= ICE_FLAG_RSS_AQ_CAPABLE;
int ret;
#define ICE_RSS_HF_ALL ( \
- ETH_RSS_IPV4 | \
- ETH_RSS_IPV6 | \
- ETH_RSS_NONFRAG_IPV4_UDP | \
- ETH_RSS_NONFRAG_IPV6_UDP | \
- ETH_RSS_NONFRAG_IPV4_TCP | \
- ETH_RSS_NONFRAG_IPV6_TCP | \
- ETH_RSS_NONFRAG_IPV4_SCTP | \
- ETH_RSS_NONFRAG_IPV6_SCTP)
+ RTE_ETH_RSS_IPV4 | \
+ RTE_ETH_RSS_IPV6 | \
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
ret = ice_rem_vsi_rss_cfg(hw, vsi->idx);
if (ret)
cfg.symm = 0;
cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
/* Configure RSS for IPv4 with src/dst addr as input set */
- if (rss_hf & ETH_RSS_IPV4) {
+ if (rss_hf & RTE_ETH_RSS_IPV4) {
cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
cfg.hash_flds = ICE_FLOW_HASH_IPV4;
ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
}
/* Configure RSS for IPv6 with src/dst addr as input set */
- if (rss_hf & ETH_RSS_IPV6) {
+ if (rss_hf & RTE_ETH_RSS_IPV6) {
cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
cfg.hash_flds = ICE_FLOW_HASH_IPV6;
ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
}
/* Configure RSS for udp4 with src/dst addr and port as input set */
- if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) {
cfg.addl_hdrs = ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4 |
ICE_FLOW_SEG_HDR_IPV_OTHER;
cfg.hash_flds = ICE_HASH_UDP_IPV4;
}
/* Configure RSS for udp6 with src/dst addr and port as input set */
- if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) {
cfg.addl_hdrs = ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6 |
ICE_FLOW_SEG_HDR_IPV_OTHER;
cfg.hash_flds = ICE_HASH_UDP_IPV6;
}
/* Configure RSS for tcp4 with src/dst addr and port as input set */
- if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) {
cfg.addl_hdrs = ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4 |
ICE_FLOW_SEG_HDR_IPV_OTHER;
cfg.hash_flds = ICE_HASH_TCP_IPV4;
}
/* Configure RSS for tcp6 with src/dst addr and port as input set */
- if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) {
cfg.addl_hdrs = ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6 |
ICE_FLOW_SEG_HDR_IPV_OTHER;
cfg.hash_flds = ICE_HASH_TCP_IPV6;
}
/* Configure RSS for sctp4 with src/dst addr and port as input set */
- if (rss_hf & ETH_RSS_NONFRAG_IPV4_SCTP) {
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_SCTP) {
cfg.addl_hdrs = ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4 |
ICE_FLOW_SEG_HDR_IPV_OTHER;
cfg.hash_flds = ICE_HASH_SCTP_IPV4;
}
/* Configure RSS for sctp6 with src/dst addr and port as input set */
- if (rss_hf & ETH_RSS_NONFRAG_IPV6_SCTP) {
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_SCTP) {
cfg.addl_hdrs = ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6 |
ICE_FLOW_SEG_HDR_IPV_OTHER;
cfg.hash_flds = ICE_HASH_SCTP_IPV6;
__func__, ret);
}
- if (rss_hf & ETH_RSS_IPV4) {
+ if (rss_hf & RTE_ETH_RSS_IPV4) {
cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_IPV4 |
ICE_FLOW_SEG_HDR_IPV_OTHER;
cfg.hash_flds = ICE_FLOW_HASH_IPV4;
__func__, ret);
}
- if (rss_hf & ETH_RSS_IPV6) {
+ if (rss_hf & RTE_ETH_RSS_IPV6) {
cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_IPV6 |
ICE_FLOW_SEG_HDR_IPV_OTHER;
cfg.hash_flds = ICE_FLOW_HASH_IPV6;
__func__, ret);
}
- if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) {
cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_UDP |
ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
cfg.hash_flds = ICE_HASH_UDP_IPV4;
__func__, ret);
}
- if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) {
cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_UDP |
ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
cfg.hash_flds = ICE_HASH_UDP_IPV6;
__func__, ret);
}
- if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) {
cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_TCP |
ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
cfg.hash_flds = ICE_HASH_TCP_IPV4;
__func__, ret);
}
- if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) {
cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_TCP |
ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
cfg.hash_flds = ICE_HASH_TCP_IPV6;
ad->rx_bulk_alloc_allowed = true;
ad->tx_simple_allowed = true;
- if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
- dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+ if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+ dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
if (dev->data->nb_rx_queues) {
ret = ice_init_rss(pf);
ice_set_rx_function(dev);
ice_set_tx_function(dev);
- mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
- ETH_VLAN_EXTEND_MASK;
+ mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+ RTE_ETH_VLAN_EXTEND_MASK;
ret = ice_vlan_offload_set(dev, mask);
if (ret) {
PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
dev_info->min_mtu = RTE_ETHER_MIN_MTU;
dev_info->rx_offload_capa =
- DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_KEEP_CRC |
- DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_VLAN_FILTER;
+ RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+ RTE_ETH_RX_OFFLOAD_KEEP_CRC |
+ RTE_ETH_RX_OFFLOAD_SCATTER |
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
dev_info->tx_offload_capa =
- DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_TCP_TSO |
- DEV_TX_OFFLOAD_MULTI_SEGS |
- DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+ RTE_ETH_TX_OFFLOAD_TCP_TSO |
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
dev_info->flow_type_rss_offloads = 0;
if (!is_safe_mode) {
dev_info->rx_offload_capa |=
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_QINQ_STRIP |
- DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_VLAN_EXTEND |
- DEV_RX_OFFLOAD_RSS_HASH |
- DEV_RX_OFFLOAD_TIMESTAMP;
+ RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+ RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+ RTE_ETH_RX_OFFLOAD_RSS_HASH |
+ RTE_ETH_RX_OFFLOAD_TIMESTAMP;
dev_info->tx_offload_capa |=
- DEV_TX_OFFLOAD_QINQ_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_SCTP_CKSUM |
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
+ RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
dev_info->flow_type_rss_offloads |= ICE_RSS_OFFLOAD_ALL;
}
dev_info->rx_queue_offload_capa = 0;
- dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ dev_info->tx_queue_offload_capa = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
dev_info->reta_size = pf->hash_lut_size;
dev_info->hash_key_size = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
.nb_align = ICE_ALIGN_RING_DESC,
};
- dev_info->speed_capa = ETH_LINK_SPEED_10M |
- ETH_LINK_SPEED_100M |
- ETH_LINK_SPEED_1G |
- ETH_LINK_SPEED_2_5G |
- ETH_LINK_SPEED_5G |
- ETH_LINK_SPEED_10G |
- ETH_LINK_SPEED_20G |
- ETH_LINK_SPEED_25G;
+ dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M |
+ RTE_ETH_LINK_SPEED_100M |
+ RTE_ETH_LINK_SPEED_1G |
+ RTE_ETH_LINK_SPEED_2_5G |
+ RTE_ETH_LINK_SPEED_5G |
+ RTE_ETH_LINK_SPEED_10G |
+ RTE_ETH_LINK_SPEED_20G |
+ RTE_ETH_LINK_SPEED_25G;
phy_type_low = hw->port_info->phy.phy_type_low;
phy_type_high = hw->port_info->phy.phy_type_high;
if (ICE_PHY_TYPE_SUPPORT_50G(phy_type_low))
- dev_info->speed_capa |= ETH_LINK_SPEED_50G;
+ dev_info->speed_capa |= RTE_ETH_LINK_SPEED_50G;
if (ICE_PHY_TYPE_SUPPORT_100G_LOW(phy_type_low) ||
ICE_PHY_TYPE_SUPPORT_100G_HIGH(phy_type_high))
- dev_info->speed_capa |= ETH_LINK_SPEED_100G;
+ dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100G;
dev_info->nb_rx_queues = dev->data->nb_rx_queues;
dev_info->nb_tx_queues = dev->data->nb_tx_queues;
status = ice_aq_get_link_info(hw->port_info, enable_lse,
&link_status, NULL);
if (status != ICE_SUCCESS) {
- link.link_speed = ETH_SPEED_NUM_100M;
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_speed = RTE_ETH_SPEED_NUM_100M;
+ link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
PMD_DRV_LOG(ERR, "Failed to get link info");
goto out;
}
goto out;
/* Full-duplex operation at all supported speeds */
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
/* Parse the link status */
switch (link_status.link_speed) {
case ICE_AQ_LINK_SPEED_10MB:
- link.link_speed = ETH_SPEED_NUM_10M;
+ link.link_speed = RTE_ETH_SPEED_NUM_10M;
break;
case ICE_AQ_LINK_SPEED_100MB:
- link.link_speed = ETH_SPEED_NUM_100M;
+ link.link_speed = RTE_ETH_SPEED_NUM_100M;
break;
case ICE_AQ_LINK_SPEED_1000MB:
- link.link_speed = ETH_SPEED_NUM_1G;
+ link.link_speed = RTE_ETH_SPEED_NUM_1G;
break;
case ICE_AQ_LINK_SPEED_2500MB:
- link.link_speed = ETH_SPEED_NUM_2_5G;
+ link.link_speed = RTE_ETH_SPEED_NUM_2_5G;
break;
case ICE_AQ_LINK_SPEED_5GB:
- link.link_speed = ETH_SPEED_NUM_5G;
+ link.link_speed = RTE_ETH_SPEED_NUM_5G;
break;
case ICE_AQ_LINK_SPEED_10GB:
- link.link_speed = ETH_SPEED_NUM_10G;
+ link.link_speed = RTE_ETH_SPEED_NUM_10G;
break;
case ICE_AQ_LINK_SPEED_20GB:
- link.link_speed = ETH_SPEED_NUM_20G;
+ link.link_speed = RTE_ETH_SPEED_NUM_20G;
break;
case ICE_AQ_LINK_SPEED_25GB:
- link.link_speed = ETH_SPEED_NUM_25G;
+ link.link_speed = RTE_ETH_SPEED_NUM_25G;
break;
case ICE_AQ_LINK_SPEED_40GB:
- link.link_speed = ETH_SPEED_NUM_40G;
+ link.link_speed = RTE_ETH_SPEED_NUM_40G;
break;
case ICE_AQ_LINK_SPEED_50GB:
- link.link_speed = ETH_SPEED_NUM_50G;
+ link.link_speed = RTE_ETH_SPEED_NUM_50G;
break;
case ICE_AQ_LINK_SPEED_100GB:
- link.link_speed = ETH_SPEED_NUM_100G;
+ link.link_speed = RTE_ETH_SPEED_NUM_100G;
break;
case ICE_AQ_LINK_SPEED_UNKNOWN:
PMD_DRV_LOG(ERR, "Unknown link speed");
- link.link_speed = ETH_SPEED_NUM_UNKNOWN;
+ link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
break;
default:
PMD_DRV_LOG(ERR, "None link speed");
- link.link_speed = ETH_SPEED_NUM_NONE;
+ link.link_speed = RTE_ETH_SPEED_NUM_NONE;
break;
}
link.link_autoneg = !(dev->data->dev_conf.link_speeds &
- ETH_LINK_SPEED_FIXED);
+ RTE_ETH_LINK_SPEED_FIXED);
out:
ice_atomic_write_link_status(dev, &link);
struct rte_eth_rxmode *rxmode;
rxmode = &dev->data->dev_conf.rxmode;
- if (mask & ETH_VLAN_FILTER_MASK) {
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+ if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
ice_vsi_config_vlan_filter(vsi, true);
else
ice_vsi_config_vlan_filter(vsi, false);
}
- if (mask & ETH_VLAN_STRIP_MASK) {
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
ice_vsi_config_vlan_stripping(vsi, true);
else
ice_vsi_config_vlan_stripping(vsi, false);
goto out;
for (i = 0; i < reta_size; i++) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
if (reta_conf[idx].mask & (1ULL << shift))
lut[i] = reta_conf[idx].reta[shift];
}
goto out;
for (i = 0; i < reta_size; i++) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
if (reta_conf[idx].mask & (1ULL << shift))
reta_conf[idx].reta[shift] = lut[i];
}
return -EINVAL;
switch (udp_tunnel->prot_type) {
- case RTE_TUNNEL_TYPE_VXLAN:
+ case RTE_ETH_TUNNEL_TYPE_VXLAN:
ret = ice_create_tunnel(hw, TNL_VXLAN, udp_tunnel->udp_port);
break;
default:
return -EINVAL;
switch (udp_tunnel->prot_type) {
- case RTE_TUNNEL_TYPE_VXLAN:
+ case RTE_ETH_TUNNEL_TYPE_VXLAN:
ret = ice_destroy_tunnel(hw, udp_tunnel->udp_port, 0);
break;
default:
int ret;
if (dev->data->dev_started && !(dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_TIMESTAMP)) {
+ RTE_ETH_RX_OFFLOAD_TIMESTAMP)) {
PMD_DRV_LOG(ERR, "Rx timestamp offload not configured");
return -1;
}
ICE_FLAG_VF_MAC_BY_PF)
#define ICE_RSS_OFFLOAD_ALL ( \
- ETH_RSS_IPV4 | \
- ETH_RSS_FRAG_IPV4 | \
- ETH_RSS_NONFRAG_IPV4_TCP | \
- ETH_RSS_NONFRAG_IPV4_UDP | \
- ETH_RSS_NONFRAG_IPV4_SCTP | \
- ETH_RSS_NONFRAG_IPV4_OTHER | \
- ETH_RSS_IPV6 | \
- ETH_RSS_FRAG_IPV6 | \
- ETH_RSS_NONFRAG_IPV6_TCP | \
- ETH_RSS_NONFRAG_IPV6_UDP | \
- ETH_RSS_NONFRAG_IPV6_SCTP | \
- ETH_RSS_NONFRAG_IPV6_OTHER | \
- ETH_RSS_L2_PAYLOAD)
+ RTE_ETH_RSS_IPV4 | \
+ RTE_ETH_RSS_FRAG_IPV4 | \
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
+ RTE_ETH_RSS_IPV6 | \
+ RTE_ETH_RSS_FRAG_IPV6 | \
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
+ RTE_ETH_RSS_L2_PAYLOAD)
/**
* The overhead from MTU to max frame size.
#define ICE_IPV4_PROT BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)
#define ICE_IPV6_PROT BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)
-#define VALID_RSS_IPV4_L4 (ETH_RSS_NONFRAG_IPV4_UDP | \
- ETH_RSS_NONFRAG_IPV4_TCP | \
- ETH_RSS_NONFRAG_IPV4_SCTP)
+#define VALID_RSS_IPV4_L4 (RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_SCTP)
-#define VALID_RSS_IPV6_L4 (ETH_RSS_NONFRAG_IPV6_UDP | \
- ETH_RSS_NONFRAG_IPV6_TCP | \
- ETH_RSS_NONFRAG_IPV6_SCTP)
+#define VALID_RSS_IPV6_L4 (RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
-#define VALID_RSS_IPV4 (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | \
+#define VALID_RSS_IPV4 (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | \
VALID_RSS_IPV4_L4)
-#define VALID_RSS_IPV6 (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | \
+#define VALID_RSS_IPV6 (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | \
VALID_RSS_IPV6_L4)
#define VALID_RSS_L3 (VALID_RSS_IPV4 | VALID_RSS_IPV6)
#define VALID_RSS_L4 (VALID_RSS_IPV4_L4 | VALID_RSS_IPV6_L4)
-#define VALID_RSS_ATTR (ETH_RSS_L3_SRC_ONLY | \
- ETH_RSS_L3_DST_ONLY | \
- ETH_RSS_L4_SRC_ONLY | \
- ETH_RSS_L4_DST_ONLY | \
- ETH_RSS_L2_SRC_ONLY | \
- ETH_RSS_L2_DST_ONLY | \
+#define VALID_RSS_ATTR (RTE_ETH_RSS_L3_SRC_ONLY | \
+ RTE_ETH_RSS_L3_DST_ONLY | \
+ RTE_ETH_RSS_L4_SRC_ONLY | \
+ RTE_ETH_RSS_L4_DST_ONLY | \
+ RTE_ETH_RSS_L2_SRC_ONLY | \
+ RTE_ETH_RSS_L2_DST_ONLY | \
RTE_ETH_RSS_L3_PRE32 | \
RTE_ETH_RSS_L3_PRE48 | \
RTE_ETH_RSS_L3_PRE64)
};
/* IPv4 */
-#define ICE_RSS_TYPE_ETH_IPV4 (ETH_RSS_ETH | ETH_RSS_IPV4 | \
- ETH_RSS_FRAG_IPV4 | \
- ETH_RSS_IPV4_CHKSUM)
+#define ICE_RSS_TYPE_ETH_IPV4 (RTE_ETH_RSS_ETH | RTE_ETH_RSS_IPV4 | \
+ RTE_ETH_RSS_FRAG_IPV4 | \
+ RTE_ETH_RSS_IPV4_CHKSUM)
#define ICE_RSS_TYPE_ETH_IPV4_UDP (ICE_RSS_TYPE_ETH_IPV4 | \
- ETH_RSS_NONFRAG_IPV4_UDP | \
- ETH_RSS_L4_CHKSUM)
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+ RTE_ETH_RSS_L4_CHKSUM)
#define ICE_RSS_TYPE_ETH_IPV4_TCP (ICE_RSS_TYPE_ETH_IPV4 | \
- ETH_RSS_NONFRAG_IPV4_TCP | \
- ETH_RSS_L4_CHKSUM)
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+ RTE_ETH_RSS_L4_CHKSUM)
#define ICE_RSS_TYPE_ETH_IPV4_SCTP (ICE_RSS_TYPE_ETH_IPV4 | \
- ETH_RSS_NONFRAG_IPV4_SCTP | \
- ETH_RSS_L4_CHKSUM)
-#define ICE_RSS_TYPE_IPV4 ETH_RSS_IPV4
-#define ICE_RSS_TYPE_IPV4_UDP (ETH_RSS_IPV4 | \
- ETH_RSS_NONFRAG_IPV4_UDP)
-#define ICE_RSS_TYPE_IPV4_TCP (ETH_RSS_IPV4 | \
- ETH_RSS_NONFRAG_IPV4_TCP)
-#define ICE_RSS_TYPE_IPV4_SCTP (ETH_RSS_IPV4 | \
- ETH_RSS_NONFRAG_IPV4_SCTP)
+ RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+ RTE_ETH_RSS_L4_CHKSUM)
+#define ICE_RSS_TYPE_IPV4 RTE_ETH_RSS_IPV4
+#define ICE_RSS_TYPE_IPV4_UDP (RTE_ETH_RSS_IPV4 | \
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP)
+#define ICE_RSS_TYPE_IPV4_TCP (RTE_ETH_RSS_IPV4 | \
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP)
+#define ICE_RSS_TYPE_IPV4_SCTP (RTE_ETH_RSS_IPV4 | \
+ RTE_ETH_RSS_NONFRAG_IPV4_SCTP)
/* IPv6 */
-#define ICE_RSS_TYPE_ETH_IPV6 (ETH_RSS_ETH | ETH_RSS_IPV6)
-#define ICE_RSS_TYPE_ETH_IPV6_FRAG (ETH_RSS_ETH | ETH_RSS_IPV6 | \
- ETH_RSS_FRAG_IPV6)
+#define ICE_RSS_TYPE_ETH_IPV6 (RTE_ETH_RSS_ETH | RTE_ETH_RSS_IPV6)
+#define ICE_RSS_TYPE_ETH_IPV6_FRAG (RTE_ETH_RSS_ETH | RTE_ETH_RSS_IPV6 | \
+ RTE_ETH_RSS_FRAG_IPV6)
#define ICE_RSS_TYPE_ETH_IPV6_UDP (ICE_RSS_TYPE_ETH_IPV6 | \
- ETH_RSS_NONFRAG_IPV6_UDP | \
- ETH_RSS_L4_CHKSUM)
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+ RTE_ETH_RSS_L4_CHKSUM)
#define ICE_RSS_TYPE_ETH_IPV6_TCP (ICE_RSS_TYPE_ETH_IPV6 | \
- ETH_RSS_NONFRAG_IPV6_TCP | \
- ETH_RSS_L4_CHKSUM)
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+ RTE_ETH_RSS_L4_CHKSUM)
#define ICE_RSS_TYPE_ETH_IPV6_SCTP (ICE_RSS_TYPE_ETH_IPV6 | \
- ETH_RSS_NONFRAG_IPV6_SCTP | \
- ETH_RSS_L4_CHKSUM)
-#define ICE_RSS_TYPE_IPV6 ETH_RSS_IPV6
-#define ICE_RSS_TYPE_IPV6_UDP (ETH_RSS_IPV6 | \
- ETH_RSS_NONFRAG_IPV6_UDP)
-#define ICE_RSS_TYPE_IPV6_TCP (ETH_RSS_IPV6 | \
- ETH_RSS_NONFRAG_IPV6_TCP)
-#define ICE_RSS_TYPE_IPV6_SCTP (ETH_RSS_IPV6 | \
- ETH_RSS_NONFRAG_IPV6_SCTP)
+ RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
+ RTE_ETH_RSS_L4_CHKSUM)
+#define ICE_RSS_TYPE_IPV6 RTE_ETH_RSS_IPV6
+#define ICE_RSS_TYPE_IPV6_UDP (RTE_ETH_RSS_IPV6 | \
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP)
+#define ICE_RSS_TYPE_IPV6_TCP (RTE_ETH_RSS_IPV6 | \
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP)
+#define ICE_RSS_TYPE_IPV6_SCTP (RTE_ETH_RSS_IPV6 | \
+ RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
/* VLAN IPV4 */
#define ICE_RSS_TYPE_VLAN_IPV4 (ICE_RSS_TYPE_IPV4 | \
- ETH_RSS_S_VLAN | ETH_RSS_C_VLAN | \
- ETH_RSS_FRAG_IPV4)
+ RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN | \
+ RTE_ETH_RSS_FRAG_IPV4)
#define ICE_RSS_TYPE_VLAN_IPV4_UDP (ICE_RSS_TYPE_IPV4_UDP | \
- ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+ RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
#define ICE_RSS_TYPE_VLAN_IPV4_TCP (ICE_RSS_TYPE_IPV4_TCP | \
- ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+ RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
#define ICE_RSS_TYPE_VLAN_IPV4_SCTP (ICE_RSS_TYPE_IPV4_SCTP | \
- ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+ RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
/* VLAN IPv6 */
#define ICE_RSS_TYPE_VLAN_IPV6 (ICE_RSS_TYPE_IPV6 | \
- ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+ RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
#define ICE_RSS_TYPE_VLAN_IPV6_FRAG (ICE_RSS_TYPE_IPV6 | \
- ETH_RSS_S_VLAN | ETH_RSS_C_VLAN | \
- ETH_RSS_FRAG_IPV6)
+ RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN | \
+ RTE_ETH_RSS_FRAG_IPV6)
#define ICE_RSS_TYPE_VLAN_IPV6_UDP (ICE_RSS_TYPE_IPV6_UDP | \
- ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+ RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
#define ICE_RSS_TYPE_VLAN_IPV6_TCP (ICE_RSS_TYPE_IPV6_TCP | \
- ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+ RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
#define ICE_RSS_TYPE_VLAN_IPV6_SCTP (ICE_RSS_TYPE_IPV6_SCTP | \
- ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+ RTE_ETH_RSS_S_VLAN | RTE_ETH_RSS_C_VLAN)
/* GTPU IPv4 */
#define ICE_RSS_TYPE_GTPU_IPV4 (ICE_RSS_TYPE_IPV4 | \
- ETH_RSS_GTPU)
+ RTE_ETH_RSS_GTPU)
#define ICE_RSS_TYPE_GTPU_IPV4_UDP (ICE_RSS_TYPE_IPV4_UDP | \
- ETH_RSS_GTPU)
+ RTE_ETH_RSS_GTPU)
#define ICE_RSS_TYPE_GTPU_IPV4_TCP (ICE_RSS_TYPE_IPV4_TCP | \
- ETH_RSS_GTPU)
+ RTE_ETH_RSS_GTPU)
/* GTPU IPv6 */
#define ICE_RSS_TYPE_GTPU_IPV6 (ICE_RSS_TYPE_IPV6 | \
- ETH_RSS_GTPU)
+ RTE_ETH_RSS_GTPU)
#define ICE_RSS_TYPE_GTPU_IPV6_UDP (ICE_RSS_TYPE_IPV6_UDP | \
- ETH_RSS_GTPU)
+ RTE_ETH_RSS_GTPU)
#define ICE_RSS_TYPE_GTPU_IPV6_TCP (ICE_RSS_TYPE_IPV6_TCP | \
- ETH_RSS_GTPU)
+ RTE_ETH_RSS_GTPU)
/* PPPOE */
-#define ICE_RSS_TYPE_PPPOE (ETH_RSS_ETH | ETH_RSS_PPPOE)
+#define ICE_RSS_TYPE_PPPOE (RTE_ETH_RSS_ETH | RTE_ETH_RSS_PPPOE)
/* PPPOE IPv4 */
#define ICE_RSS_TYPE_PPPOE_IPV4 (ICE_RSS_TYPE_IPV4 | \
ICE_RSS_TYPE_PPPOE)
/* ESP, AH, L2TPV3 and PFCP */
-#define ICE_RSS_TYPE_IPV4_ESP (ETH_RSS_ESP | ETH_RSS_IPV4)
-#define ICE_RSS_TYPE_IPV6_ESP (ETH_RSS_ESP | ETH_RSS_IPV6)
-#define ICE_RSS_TYPE_IPV4_AH (ETH_RSS_AH | ETH_RSS_IPV4)
-#define ICE_RSS_TYPE_IPV6_AH (ETH_RSS_AH | ETH_RSS_IPV6)
-#define ICE_RSS_TYPE_IPV4_L2TPV3 (ETH_RSS_L2TPV3 | ETH_RSS_IPV4)
-#define ICE_RSS_TYPE_IPV6_L2TPV3 (ETH_RSS_L2TPV3 | ETH_RSS_IPV6)
-#define ICE_RSS_TYPE_IPV4_PFCP (ETH_RSS_PFCP | ETH_RSS_IPV4)
-#define ICE_RSS_TYPE_IPV6_PFCP (ETH_RSS_PFCP | ETH_RSS_IPV6)
+#define ICE_RSS_TYPE_IPV4_ESP (RTE_ETH_RSS_ESP | RTE_ETH_RSS_IPV4)
+#define ICE_RSS_TYPE_IPV6_ESP (RTE_ETH_RSS_ESP | RTE_ETH_RSS_IPV6)
+#define ICE_RSS_TYPE_IPV4_AH (RTE_ETH_RSS_AH | RTE_ETH_RSS_IPV4)
+#define ICE_RSS_TYPE_IPV6_AH (RTE_ETH_RSS_AH | RTE_ETH_RSS_IPV6)
+#define ICE_RSS_TYPE_IPV4_L2TPV3 (RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_IPV4)
+#define ICE_RSS_TYPE_IPV6_L2TPV3 (RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_IPV6)
+#define ICE_RSS_TYPE_IPV4_PFCP (RTE_ETH_RSS_PFCP | RTE_ETH_RSS_IPV4)
+#define ICE_RSS_TYPE_IPV6_PFCP (RTE_ETH_RSS_PFCP | RTE_ETH_RSS_IPV6)
/* MAC */
-#define ICE_RSS_TYPE_ETH ETH_RSS_ETH
+#define ICE_RSS_TYPE_ETH RTE_ETH_RSS_ETH
/**
* Supported pattern for hash.
uint64_t *hash_flds = &hash_cfg->hash_flds;
if (*addl_hdrs & ICE_FLOW_SEG_HDR_ETH) {
- if (!(rss_type & ETH_RSS_ETH))
+ if (!(rss_type & RTE_ETH_RSS_ETH))
*hash_flds &= ~ICE_FLOW_HASH_ETH;
- if (rss_type & ETH_RSS_L2_SRC_ONLY)
+ if (rss_type & RTE_ETH_RSS_L2_SRC_ONLY)
*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_DA));
- else if (rss_type & ETH_RSS_L2_DST_ONLY)
+ else if (rss_type & RTE_ETH_RSS_L2_DST_ONLY)
*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_SA));
*addl_hdrs &= ~ICE_FLOW_SEG_HDR_ETH;
}
if (*addl_hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) {
- if (rss_type & ETH_RSS_ETH)
+ if (rss_type & RTE_ETH_RSS_ETH)
*hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_TYPE);
}
if (*addl_hdrs & ICE_FLOW_SEG_HDR_VLAN) {
- if (rss_type & ETH_RSS_C_VLAN)
+ if (rss_type & RTE_ETH_RSS_C_VLAN)
*hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_C_VLAN);
- else if (rss_type & ETH_RSS_S_VLAN)
+ else if (rss_type & RTE_ETH_RSS_S_VLAN)
*hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_S_VLAN);
}
if (*addl_hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
- if (!(rss_type & ETH_RSS_PPPOE))
+ if (!(rss_type & RTE_ETH_RSS_PPPOE))
*hash_flds &= ~ICE_FLOW_HASH_PPPOE_SESS_ID;
}
if (*addl_hdrs & ICE_FLOW_SEG_HDR_IPV4) {
if (rss_type &
- (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
- ETH_RSS_NONFRAG_IPV4_UDP |
- ETH_RSS_NONFRAG_IPV4_TCP |
- ETH_RSS_NONFRAG_IPV4_SCTP)) {
- if (rss_type & ETH_RSS_FRAG_IPV4) {
+ (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+ RTE_ETH_RSS_NONFRAG_IPV4_SCTP)) {
+ if (rss_type & RTE_ETH_RSS_FRAG_IPV4) {
*addl_hdrs |= ICE_FLOW_SEG_HDR_IPV_FRAG;
*addl_hdrs &= ~(ICE_FLOW_SEG_HDR_IPV_OTHER);
*hash_flds |=
BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_ID);
}
- if (rss_type & ETH_RSS_L3_SRC_ONLY)
+ if (rss_type & RTE_ETH_RSS_L3_SRC_ONLY)
*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA));
- else if (rss_type & ETH_RSS_L3_DST_ONLY)
+ else if (rss_type & RTE_ETH_RSS_L3_DST_ONLY)
*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA));
else if (rss_type &
- (ETH_RSS_L4_SRC_ONLY |
- ETH_RSS_L4_DST_ONLY))
+ (RTE_ETH_RSS_L4_SRC_ONLY |
+ RTE_ETH_RSS_L4_DST_ONLY))
*hash_flds &= ~ICE_FLOW_HASH_IPV4;
} else {
*hash_flds &= ~ICE_FLOW_HASH_IPV4;
}
- if (rss_type & ETH_RSS_IPV4_CHKSUM)
+ if (rss_type & RTE_ETH_RSS_IPV4_CHKSUM)
*hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM);
}
if (*addl_hdrs & ICE_FLOW_SEG_HDR_IPV6) {
if (rss_type &
- (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
- ETH_RSS_NONFRAG_IPV6_UDP |
- ETH_RSS_NONFRAG_IPV6_TCP |
- ETH_RSS_NONFRAG_IPV6_SCTP)) {
- if (rss_type & ETH_RSS_FRAG_IPV6)
+ (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+ RTE_ETH_RSS_NONFRAG_IPV6_SCTP)) {
+ if (rss_type & RTE_ETH_RSS_FRAG_IPV6)
*hash_flds |=
BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_ID);
- if (rss_type & ETH_RSS_L3_SRC_ONLY)
+ if (rss_type & RTE_ETH_RSS_L3_SRC_ONLY)
*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA));
- else if (rss_type & ETH_RSS_L3_DST_ONLY)
+ else if (rss_type & RTE_ETH_RSS_L3_DST_ONLY)
*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA));
else if (rss_type &
- (ETH_RSS_L4_SRC_ONLY |
- ETH_RSS_L4_DST_ONLY))
+ (RTE_ETH_RSS_L4_SRC_ONLY |
+ RTE_ETH_RSS_L4_DST_ONLY))
*hash_flds &= ~ICE_FLOW_HASH_IPV6;
} else {
*hash_flds &= ~ICE_FLOW_HASH_IPV6;
}
if (rss_type & RTE_ETH_RSS_L3_PRE32) {
- if (rss_type & ETH_RSS_L3_SRC_ONLY) {
+ if (rss_type & RTE_ETH_RSS_L3_SRC_ONLY) {
*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA));
*hash_flds |= (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA));
- } else if (rss_type & ETH_RSS_L3_DST_ONLY) {
+ } else if (rss_type & RTE_ETH_RSS_L3_DST_ONLY) {
*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA));
*hash_flds |= (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA));
} else {
}
}
if (rss_type & RTE_ETH_RSS_L3_PRE48) {
- if (rss_type & ETH_RSS_L3_SRC_ONLY) {
+ if (rss_type & RTE_ETH_RSS_L3_SRC_ONLY) {
*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA));
*hash_flds |= (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA));
- } else if (rss_type & ETH_RSS_L3_DST_ONLY) {
+ } else if (rss_type & RTE_ETH_RSS_L3_DST_ONLY) {
*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA));
*hash_flds |= (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA));
} else {
}
}
if (rss_type & RTE_ETH_RSS_L3_PRE64) {
- if (rss_type & ETH_RSS_L3_SRC_ONLY) {
+ if (rss_type & RTE_ETH_RSS_L3_SRC_ONLY) {
*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA));
*hash_flds |= (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA));
- } else if (rss_type & ETH_RSS_L3_DST_ONLY) {
+ } else if (rss_type & RTE_ETH_RSS_L3_DST_ONLY) {
*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA));
*hash_flds |= (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA));
} else {
if (*addl_hdrs & ICE_FLOW_SEG_HDR_UDP) {
if (rss_type &
- (ETH_RSS_NONFRAG_IPV4_UDP |
- ETH_RSS_NONFRAG_IPV6_UDP)) {
- if (rss_type & ETH_RSS_L4_SRC_ONLY)
+ (RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP)) {
+ if (rss_type & RTE_ETH_RSS_L4_SRC_ONLY)
*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT));
- else if (rss_type & ETH_RSS_L4_DST_ONLY)
+ else if (rss_type & RTE_ETH_RSS_L4_DST_ONLY)
*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT));
else if (rss_type &
- (ETH_RSS_L3_SRC_ONLY |
- ETH_RSS_L3_DST_ONLY))
+ (RTE_ETH_RSS_L3_SRC_ONLY |
+ RTE_ETH_RSS_L3_DST_ONLY))
*hash_flds &= ~ICE_FLOW_HASH_UDP_PORT;
} else {
*hash_flds &= ~ICE_FLOW_HASH_UDP_PORT;
}
- if (rss_type & ETH_RSS_L4_CHKSUM)
+ if (rss_type & RTE_ETH_RSS_L4_CHKSUM)
*hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_CHKSUM);
}
if (*addl_hdrs & ICE_FLOW_SEG_HDR_TCP) {
if (rss_type &
- (ETH_RSS_NONFRAG_IPV4_TCP |
- ETH_RSS_NONFRAG_IPV6_TCP)) {
- if (rss_type & ETH_RSS_L4_SRC_ONLY)
+ (RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP)) {
+ if (rss_type & RTE_ETH_RSS_L4_SRC_ONLY)
*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT));
- else if (rss_type & ETH_RSS_L4_DST_ONLY)
+ else if (rss_type & RTE_ETH_RSS_L4_DST_ONLY)
*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT));
else if (rss_type &
- (ETH_RSS_L3_SRC_ONLY |
- ETH_RSS_L3_DST_ONLY))
+ (RTE_ETH_RSS_L3_SRC_ONLY |
+ RTE_ETH_RSS_L3_DST_ONLY))
*hash_flds &= ~ICE_FLOW_HASH_TCP_PORT;
} else {
*hash_flds &= ~ICE_FLOW_HASH_TCP_PORT;
}
- if (rss_type & ETH_RSS_L4_CHKSUM)
+ if (rss_type & RTE_ETH_RSS_L4_CHKSUM)
*hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_CHKSUM);
}
if (*addl_hdrs & ICE_FLOW_SEG_HDR_SCTP) {
if (rss_type &
- (ETH_RSS_NONFRAG_IPV4_SCTP |
- ETH_RSS_NONFRAG_IPV6_SCTP)) {
- if (rss_type & ETH_RSS_L4_SRC_ONLY)
+ (RTE_ETH_RSS_NONFRAG_IPV4_SCTP |
+ RTE_ETH_RSS_NONFRAG_IPV6_SCTP)) {
+ if (rss_type & RTE_ETH_RSS_L4_SRC_ONLY)
*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT));
- else if (rss_type & ETH_RSS_L4_DST_ONLY)
+ else if (rss_type & RTE_ETH_RSS_L4_DST_ONLY)
*hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT));
else if (rss_type &
- (ETH_RSS_L3_SRC_ONLY |
- ETH_RSS_L3_DST_ONLY))
+ (RTE_ETH_RSS_L3_SRC_ONLY |
+ RTE_ETH_RSS_L3_DST_ONLY))
*hash_flds &= ~ICE_FLOW_HASH_SCTP_PORT;
} else {
*hash_flds &= ~ICE_FLOW_HASH_SCTP_PORT;
}
- if (rss_type & ETH_RSS_L4_CHKSUM)
+ if (rss_type & RTE_ETH_RSS_L4_CHKSUM)
*hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_CHKSUM);
}
if (*addl_hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
- if (!(rss_type & ETH_RSS_L2TPV3))
+ if (!(rss_type & RTE_ETH_RSS_L2TPV3))
*hash_flds &= ~ICE_FLOW_HASH_L2TPV3_SESS_ID;
}
if (*addl_hdrs & ICE_FLOW_SEG_HDR_ESP) {
- if (!(rss_type & ETH_RSS_ESP))
+ if (!(rss_type & RTE_ETH_RSS_ESP))
*hash_flds &= ~ICE_FLOW_HASH_ESP_SPI;
}
if (*addl_hdrs & ICE_FLOW_SEG_HDR_AH) {
- if (!(rss_type & ETH_RSS_AH))
+ if (!(rss_type & RTE_ETH_RSS_AH))
*hash_flds &= ~ICE_FLOW_HASH_AH_SPI;
}
if (*addl_hdrs & ICE_FLOW_SEG_HDR_PFCP_SESSION) {
- if (!(rss_type & ETH_RSS_PFCP))
+ if (!(rss_type & RTE_ETH_RSS_PFCP))
*hash_flds &= ~ICE_FLOW_HASH_PFCP_SEID;
}
}
uint64_t *hash_flds = &hash_cfg->hash_flds;
/* update hash field for gtpu eh/gtpu dwn/gtpu up. */
- if (!(rss_type & ETH_RSS_GTPU))
+ if (!(rss_type & RTE_ETH_RSS_GTPU))
return;
if (*addl_hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN)
}
static uint64_t invalid_rss_comb[] = {
- ETH_RSS_IPV4 | ETH_RSS_NONFRAG_IPV4_UDP,
- ETH_RSS_IPV4 | ETH_RSS_NONFRAG_IPV4_TCP,
- ETH_RSS_IPV6 | ETH_RSS_NONFRAG_IPV6_UDP,
- ETH_RSS_IPV6 | ETH_RSS_NONFRAG_IPV6_TCP,
+ RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_NONFRAG_IPV4_UDP,
+ RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_NONFRAG_IPV4_TCP,
+ RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_UDP,
+ RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_TCP,
RTE_ETH_RSS_L3_PRE40 |
RTE_ETH_RSS_L3_PRE56 |
RTE_ETH_RSS_L3_PRE96
};
static struct rss_attr_type rss_attr_to_valid_type[] = {
- {ETH_RSS_L2_SRC_ONLY | ETH_RSS_L2_DST_ONLY, ETH_RSS_ETH},
- {ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY, VALID_RSS_L3},
- {ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY, VALID_RSS_L4},
+ {RTE_ETH_RSS_L2_SRC_ONLY | RTE_ETH_RSS_L2_DST_ONLY, RTE_ETH_RSS_ETH},
+ {RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY, VALID_RSS_L3},
+ {RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY, VALID_RSS_L4},
/* current ipv6 prefix only supports prefix 64 bits*/
{RTE_ETH_RSS_L3_PRE32, VALID_RSS_IPV6},
{RTE_ETH_RSS_L3_PRE48, VALID_RSS_IPV6},
* hash function.
*/
if (rss_func == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ) {
- if (rss_type & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY |
- ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY))
+ if (rss_type & (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY |
+ RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY))
return true;
if (!(rss_type &
- (ETH_RSS_IPV4 | ETH_RSS_IPV6 |
- ETH_RSS_FRAG_IPV4 | ETH_RSS_FRAG_IPV6 |
- ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP |
- ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV6_TCP |
- ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_NONFRAG_IPV6_SCTP)))
+ (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6 |
+ RTE_ETH_RSS_FRAG_IPV4 | RTE_ETH_RSS_FRAG_IPV6 |
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+ RTE_ETH_RSS_NONFRAG_IPV4_SCTP | RTE_ETH_RSS_NONFRAG_IPV6_SCTP)))
return true;
}
return -EINVAL;
}
- if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
+ if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
/* Register mbuf field and flag for Rx timestamp */
err = rte_mbuf_dyn_rx_timestamp_register(
&ice_timestamp_dynfield_offset,
regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
QRXFLXP_CNTXT_RXDID_PRIO_M;
- if (ad->ptp_ena || rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP)
+ if (ad->ptp_ena || rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
regval |= QRXFLXP_CNTXT_TS_M;
ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
rxq->reg_idx = vsi->base_queue + queue_idx;
rxq->port_id = dev->data->port_id;
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
rxq->crc_len = RTE_ETHER_CRC_LEN;
else
rxq->crc_len = 0;
ice_rxd_to_vlan_tci(mb, &rxdp[j]);
rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
- if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
+ if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
ts_ns = ice_tstamp_convert_32b_64b(hw,
rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high));
if (ice_timestamp_dynflag > 0) {
rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
- if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
+ if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
ts_ns = ice_tstamp_convert_32b_64b(hw,
rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
if (ice_timestamp_dynflag > 0) {
rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
- if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
+ if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
ts_ns = ice_tstamp_convert_32b_64b(hw,
rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
if (ice_timestamp_dynflag > 0) {
for (i = 0; i < txq->tx_rs_thresh; i++)
rte_prefetch0((txep + i)->mbuf);
- if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
+ if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) {
for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
rte_mempool_put(txep->mbuf->pool, txep->mbuf);
txep->mbuf = NULL;
/* Use a simple Tx queue if possible (only fast free is allowed) */
ad->tx_simple_allowed =
(txq->offloads ==
- (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
+ (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) &&
txq->tx_rs_thresh >= ICE_TX_MAX_BURST);
if (ad->tx_simple_allowed)
* will cause performance drop to get into this context.
*/
if (rxq->vsi->adapter->pf.dev_data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_RSS_HASH) {
+ RTE_ETH_RX_OFFLOAD_RSS_HASH) {
/* load bottom half of every 32B desc */
const __m128i raw_desc_bh7 =
_mm_load_si128
* will cause performance drop to get into this context.
*/
if (rxq->vsi->adapter->pf.dev_data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_RSS_HASH) {
+ RTE_ETH_RX_OFFLOAD_RSS_HASH) {
/* load bottom half of every 32B desc */
const __m128i raw_desc_bh7 =
_mm_load_si128
txep = (void *)txq->sw_ring;
txep += txq->tx_next_dd - (n - 1);
- if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
+ if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
struct rte_mempool *mp = txep[0].mbuf->pool;
void **cache_objs;
struct rte_mempool_cache *cache = rte_mempool_default_cache(mp,
}
#define ICE_TX_NO_VECTOR_FLAGS ( \
- DEV_TX_OFFLOAD_MULTI_SEGS | \
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
- DEV_TX_OFFLOAD_TCP_TSO)
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS | \
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_TCP_TSO)
#define ICE_TX_VECTOR_OFFLOAD ( \
- DEV_TX_OFFLOAD_VLAN_INSERT | \
- DEV_TX_OFFLOAD_QINQ_INSERT | \
- DEV_TX_OFFLOAD_IPV4_CKSUM | \
- DEV_TX_OFFLOAD_SCTP_CKSUM | \
- DEV_TX_OFFLOAD_UDP_CKSUM | \
- DEV_TX_OFFLOAD_TCP_CKSUM)
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT | \
+ RTE_ETH_TX_OFFLOAD_QINQ_INSERT | \
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
#define ICE_RX_VECTOR_OFFLOAD ( \
- DEV_RX_OFFLOAD_CHECKSUM | \
- DEV_RX_OFFLOAD_SCTP_CKSUM | \
- DEV_RX_OFFLOAD_VLAN | \
- DEV_RX_OFFLOAD_RSS_HASH)
+ RTE_ETH_RX_OFFLOAD_CHECKSUM | \
+ RTE_ETH_RX_OFFLOAD_SCTP_CKSUM | \
+ RTE_ETH_RX_OFFLOAD_VLAN | \
+ RTE_ETH_RX_OFFLOAD_RSS_HASH)
#define ICE_VECTOR_PATH 0
#define ICE_VECTOR_OFFLOAD_PATH 1
if (rxq->proto_xtr != PROTO_XTR_NONE)
return -1;
- if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP)
+ if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
return -1;
if (rxq->offloads & ICE_RX_VECTOR_OFFLOAD)
* will cause performance drop to get into this context.
*/
if (rxq->vsi->adapter->pf.dev_data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_RSS_HASH) {
+ RTE_ETH_RX_OFFLOAD_RSS_HASH) {
/* load bottom half of every 32B desc */
const __m128i raw_desc_bh3 =
_mm_load_si128
return -EINVAL;
}
- if (rx_mq_mode != ETH_MQ_RX_NONE &&
- rx_mq_mode != ETH_MQ_RX_RSS) {
+ if (rx_mq_mode != RTE_ETH_MQ_RX_NONE &&
+ rx_mq_mode != RTE_ETH_MQ_RX_RSS) {
/* RSS together with VMDq not supported*/
PMD_INIT_LOG(ERR, "RX mode %d is not supported.",
rx_mq_mode);
/* To no break software that set invalid mode, only display
* warning if invalid mode is used.
*/
- if (tx_mq_mode != ETH_MQ_TX_NONE)
+ if (tx_mq_mode != RTE_ETH_MQ_TX_NONE)
PMD_INIT_LOG(WARNING,
"TX mode %d is not supported. Due to meaningless in this driver, just ignore",
tx_mq_mode);
PMD_INIT_FUNC_TRACE();
- if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
- dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+ if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+ dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
ret = igc_check_mq_mode(dev);
if (ret != 0)
uint16_t duplex, speed;
hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
link.link_duplex = (duplex == FULL_DUPLEX) ?
- ETH_LINK_FULL_DUPLEX :
- ETH_LINK_HALF_DUPLEX;
+ RTE_ETH_LINK_FULL_DUPLEX :
+ RTE_ETH_LINK_HALF_DUPLEX;
link.link_speed = speed;
- link.link_status = ETH_LINK_UP;
+ link.link_status = RTE_ETH_LINK_UP;
link.link_autoneg = !(dev->data->dev_conf.link_speeds &
- ETH_LINK_SPEED_FIXED);
+ RTE_ETH_LINK_SPEED_FIXED);
if (speed == SPEED_2500) {
uint32_t tipg = IGC_READ_REG(hw, IGC_TIPG);
}
} else {
link.link_speed = 0;
- link.link_duplex = ETH_LINK_HALF_DUPLEX;
- link.link_status = ETH_LINK_DOWN;
- link.link_autoneg = ETH_LINK_FIXED;
+ link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+ link.link_status = RTE_ETH_LINK_DOWN;
+ link.link_autoneg = RTE_ETH_LINK_FIXED;
}
return rte_eth_linkstatus_set(dev, &link);
" Port %d: Link Up - speed %u Mbps - %s",
dev->data->port_id,
(unsigned int)link.link_speed,
- link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+ link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
"full-duplex" : "half-duplex");
else
PMD_DRV_LOG(INFO, " Port %d: Link Down",
/* VLAN Offload Settings */
eth_igc_vlan_offload_set(dev,
- ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
- ETH_VLAN_EXTEND_MASK);
+ RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+ RTE_ETH_VLAN_EXTEND_MASK);
/* Setup link speed and duplex */
speeds = &dev->data->dev_conf.link_speeds;
- if (*speeds == ETH_LINK_SPEED_AUTONEG) {
+ if (*speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
hw->phy.autoneg_advertised = IGC_ALL_SPEED_DUPLEX_2500;
hw->mac.autoneg = 1;
} else {
int num_speeds = 0;
- if (*speeds & ETH_LINK_SPEED_FIXED) {
+ if (*speeds & RTE_ETH_LINK_SPEED_FIXED) {
PMD_DRV_LOG(ERR,
"Force speed mode currently not supported");
igc_dev_clear_queues(dev);
hw->phy.autoneg_advertised = 0;
hw->mac.autoneg = 1;
- if (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
- ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
- ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G)) {
+ if (*speeds & ~(RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M |
+ RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M |
+ RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_2_5G)) {
num_speeds = -1;
goto error_invalid_config;
}
- if (*speeds & ETH_LINK_SPEED_10M_HD) {
+ if (*speeds & RTE_ETH_LINK_SPEED_10M_HD) {
hw->phy.autoneg_advertised |= ADVERTISE_10_HALF;
num_speeds++;
}
- if (*speeds & ETH_LINK_SPEED_10M) {
+ if (*speeds & RTE_ETH_LINK_SPEED_10M) {
hw->phy.autoneg_advertised |= ADVERTISE_10_FULL;
num_speeds++;
}
- if (*speeds & ETH_LINK_SPEED_100M_HD) {
+ if (*speeds & RTE_ETH_LINK_SPEED_100M_HD) {
hw->phy.autoneg_advertised |= ADVERTISE_100_HALF;
num_speeds++;
}
- if (*speeds & ETH_LINK_SPEED_100M) {
+ if (*speeds & RTE_ETH_LINK_SPEED_100M) {
hw->phy.autoneg_advertised |= ADVERTISE_100_FULL;
num_speeds++;
}
- if (*speeds & ETH_LINK_SPEED_1G) {
+ if (*speeds & RTE_ETH_LINK_SPEED_1G) {
hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL;
num_speeds++;
}
- if (*speeds & ETH_LINK_SPEED_2_5G) {
+ if (*speeds & RTE_ETH_LINK_SPEED_2_5G) {
hw->phy.autoneg_advertised |= ADVERTISE_2500_FULL;
num_speeds++;
}
dev_info->max_mac_addrs = hw->mac.rar_entry_count;
dev_info->rx_offload_capa = IGC_RX_OFFLOAD_ALL;
dev_info->tx_offload_capa = IGC_TX_OFFLOAD_ALL;
- dev_info->rx_queue_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
+ dev_info->rx_queue_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
dev_info->max_rx_queues = IGC_QUEUE_PAIRS_NUM;
dev_info->max_tx_queues = IGC_QUEUE_PAIRS_NUM;
dev_info->max_vmdq_pools = 0;
dev_info->hash_key_size = IGC_HKEY_MAX_INDEX * sizeof(uint32_t);
- dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
+ dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
dev_info->flow_type_rss_offloads = IGC_RSS_OFFLOAD_ALL;
dev_info->default_rxconf = (struct rte_eth_rxconf) {
dev_info->rx_desc_lim = rx_desc_lim;
dev_info->tx_desc_lim = tx_desc_lim;
- dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
- ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
- ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G;
+ dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M |
+ RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M |
+ RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_2_5G;
dev_info->max_mtu = dev_info->max_rx_pktlen - IGC_ETH_OVERHEAD;
dev_info->min_mtu = RTE_ETHER_MIN_MTU;
rx_pause = 0;
if (rx_pause && tx_pause)
- fc_conf->mode = RTE_FC_FULL;
+ fc_conf->mode = RTE_ETH_FC_FULL;
else if (rx_pause)
- fc_conf->mode = RTE_FC_RX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
else if (tx_pause)
- fc_conf->mode = RTE_FC_TX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
else
- fc_conf->mode = RTE_FC_NONE;
+ fc_conf->mode = RTE_ETH_FC_NONE;
return 0;
}
}
switch (fc_conf->mode) {
- case RTE_FC_NONE:
+ case RTE_ETH_FC_NONE:
hw->fc.requested_mode = igc_fc_none;
break;
- case RTE_FC_RX_PAUSE:
+ case RTE_ETH_FC_RX_PAUSE:
hw->fc.requested_mode = igc_fc_rx_pause;
break;
- case RTE_FC_TX_PAUSE:
+ case RTE_ETH_FC_TX_PAUSE:
hw->fc.requested_mode = igc_fc_tx_pause;
break;
- case RTE_FC_FULL:
+ case RTE_ETH_FC_FULL:
hw->fc.requested_mode = igc_fc_full;
break;
default:
struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
uint16_t i;
- if (reta_size != ETH_RSS_RETA_SIZE_128) {
+ if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
PMD_DRV_LOG(ERR,
"The size of RSS redirection table configured(%d) doesn't match the number hardware can supported(%d)",
- reta_size, ETH_RSS_RETA_SIZE_128);
+ reta_size, RTE_ETH_RSS_RETA_SIZE_128);
return -EINVAL;
}
- RTE_BUILD_BUG_ON(ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE);
+ RTE_BUILD_BUG_ON(RTE_ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE);
/* set redirection table */
- for (i = 0; i < ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) {
+ for (i = 0; i < RTE_ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) {
union igc_rss_reta_reg reta, reg;
uint16_t idx, shift;
uint8_t j, mask;
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
mask = (uint8_t)((reta_conf[idx].mask >> shift) &
IGC_RSS_RDT_REG_SIZE_MASK);
/* if no need to update the register */
if (!mask ||
- shift > (RTE_RETA_GROUP_SIZE - IGC_RSS_RDT_REG_SIZE))
+ shift > (RTE_ETH_RETA_GROUP_SIZE - IGC_RSS_RDT_REG_SIZE))
continue;
/* check mask whether need to read the register value first */
struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
uint16_t i;
- if (reta_size != ETH_RSS_RETA_SIZE_128) {
+ if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
PMD_DRV_LOG(ERR,
"The size of RSS redirection table configured(%d) doesn't match the number hardware can supported(%d)",
- reta_size, ETH_RSS_RETA_SIZE_128);
+ reta_size, RTE_ETH_RSS_RETA_SIZE_128);
return -EINVAL;
}
- RTE_BUILD_BUG_ON(ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE);
+ RTE_BUILD_BUG_ON(RTE_ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE);
/* read redirection table */
- for (i = 0; i < ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) {
+ for (i = 0; i < RTE_ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) {
union igc_rss_reta_reg reta;
uint16_t idx, shift;
uint8_t j, mask;
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
mask = (uint8_t)((reta_conf[idx].mask >> shift) &
IGC_RSS_RDT_REG_SIZE_MASK);
/* if no need to read register */
if (!mask ||
- shift > (RTE_RETA_GROUP_SIZE - IGC_RSS_RDT_REG_SIZE))
+ shift > (RTE_ETH_RETA_GROUP_SIZE - IGC_RSS_RDT_REG_SIZE))
continue;
/* read register and get the queue index */
rss_hf = 0;
if (mrqc & IGC_MRQC_RSS_FIELD_IPV4)
- rss_hf |= ETH_RSS_IPV4;
+ rss_hf |= RTE_ETH_RSS_IPV4;
if (mrqc & IGC_MRQC_RSS_FIELD_IPV4_TCP)
- rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
if (mrqc & IGC_MRQC_RSS_FIELD_IPV6)
- rss_hf |= ETH_RSS_IPV6;
+ rss_hf |= RTE_ETH_RSS_IPV6;
if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_EX)
- rss_hf |= ETH_RSS_IPV6_EX;
+ rss_hf |= RTE_ETH_RSS_IPV6_EX;
if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_TCP)
- rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_TCP_EX)
- rss_hf |= ETH_RSS_IPV6_TCP_EX;
+ rss_hf |= RTE_ETH_RSS_IPV6_TCP_EX;
if (mrqc & IGC_MRQC_RSS_FIELD_IPV4_UDP)
- rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_UDP)
- rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_UDP_EX)
- rss_hf |= ETH_RSS_IPV6_UDP_EX;
+ rss_hf |= RTE_ETH_RSS_IPV6_UDP_EX;
rss_conf->rss_hf |= rss_hf;
return 0;
struct rte_eth_rxmode *rxmode;
rxmode = &dev->data->dev_conf.rxmode;
- if (mask & ETH_VLAN_STRIP_MASK) {
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
igc_vlan_hw_strip_enable(dev);
else
igc_vlan_hw_strip_disable(dev);
}
- if (mask & ETH_VLAN_FILTER_MASK) {
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+ if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
igc_vlan_hw_filter_enable(dev);
else
igc_vlan_hw_filter_disable(dev);
}
- if (mask & ETH_VLAN_EXTEND_MASK) {
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+ if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
return igc_vlan_hw_extend_enable(dev);
else
return igc_vlan_hw_extend_disable(dev);
uint32_t reg_val;
/* only outer TPID of double VLAN can be configured*/
- if (vlan_type == ETH_VLAN_TYPE_OUTER) {
+ if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER) {
reg_val = IGC_READ_REG(hw, IGC_VET);
reg_val = (reg_val & (~IGC_VET_EXT)) |
((uint32_t)tpid << IGC_VET_EXT_SHIFT);
#define IGC_TX_MAX_MTU_SEG UINT8_MAX
#define IGC_RX_OFFLOAD_ALL ( \
- DEV_RX_OFFLOAD_VLAN_STRIP | \
- DEV_RX_OFFLOAD_VLAN_FILTER | \
- DEV_RX_OFFLOAD_VLAN_EXTEND | \
- DEV_RX_OFFLOAD_IPV4_CKSUM | \
- DEV_RX_OFFLOAD_UDP_CKSUM | \
- DEV_RX_OFFLOAD_TCP_CKSUM | \
- DEV_RX_OFFLOAD_SCTP_CKSUM | \
- DEV_RX_OFFLOAD_KEEP_CRC | \
- DEV_RX_OFFLOAD_SCATTER | \
- DEV_RX_OFFLOAD_RSS_HASH)
+ RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER | \
+ RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | \
+ RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | \
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM | \
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM | \
+ RTE_ETH_RX_OFFLOAD_SCTP_CKSUM | \
+ RTE_ETH_RX_OFFLOAD_KEEP_CRC | \
+ RTE_ETH_RX_OFFLOAD_SCATTER | \
+ RTE_ETH_RX_OFFLOAD_RSS_HASH)
#define IGC_TX_OFFLOAD_ALL ( \
- DEV_TX_OFFLOAD_VLAN_INSERT | \
- DEV_TX_OFFLOAD_IPV4_CKSUM | \
- DEV_TX_OFFLOAD_UDP_CKSUM | \
- DEV_TX_OFFLOAD_TCP_CKSUM | \
- DEV_TX_OFFLOAD_SCTP_CKSUM | \
- DEV_TX_OFFLOAD_TCP_TSO | \
- DEV_TX_OFFLOAD_UDP_TSO | \
- DEV_TX_OFFLOAD_MULTI_SEGS)
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT | \
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_TCP_TSO | \
+ RTE_ETH_TX_OFFLOAD_UDP_TSO | \
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
#define IGC_RSS_OFFLOAD_ALL ( \
- ETH_RSS_IPV4 | \
- ETH_RSS_NONFRAG_IPV4_TCP | \
- ETH_RSS_NONFRAG_IPV4_UDP | \
- ETH_RSS_IPV6 | \
- ETH_RSS_NONFRAG_IPV6_TCP | \
- ETH_RSS_NONFRAG_IPV6_UDP | \
- ETH_RSS_IPV6_EX | \
- ETH_RSS_IPV6_TCP_EX | \
- ETH_RSS_IPV6_UDP_EX)
+ RTE_ETH_RSS_IPV4 | \
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+ RTE_ETH_RSS_IPV6 | \
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+ RTE_ETH_RSS_IPV6_EX | \
+ RTE_ETH_RSS_IPV6_TCP_EX | \
+ RTE_ETH_RSS_IPV6_UDP_EX)
#define IGC_MAX_ETQF_FILTERS 3 /* etqf(3) is used for 1588 */
#define IGC_ETQF_FILTER_1588 3
uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
uint32_t flags; /**< RX flags. */
- uint64_t offloads; /**< offloads of DEV_RX_OFFLOAD_* */
+ uint64_t offloads; /**< offloads of RTE_ETH_RX_OFFLOAD_* */
};
/** Offload features */
/**< Start context position for transmit queue. */
struct igc_advctx_info ctx_cache[IGC_CTX_NUM];
/**< Hardware context history.*/
- uint64_t offloads; /**< offloads of DEV_TX_OFFLOAD_* */
+ uint64_t offloads; /**< offloads of RTE_ETH_TX_OFFLOAD_* */
};
static inline uint64_t
/* Set configured hashing protocols in MRQC register */
rss_hf = rss_conf->rss_hf;
mrqc = IGC_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
- if (rss_hf & ETH_RSS_IPV4)
+ if (rss_hf & RTE_ETH_RSS_IPV4)
mrqc |= IGC_MRQC_RSS_FIELD_IPV4;
- if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
mrqc |= IGC_MRQC_RSS_FIELD_IPV4_TCP;
- if (rss_hf & ETH_RSS_IPV6)
+ if (rss_hf & RTE_ETH_RSS_IPV6)
mrqc |= IGC_MRQC_RSS_FIELD_IPV6;
- if (rss_hf & ETH_RSS_IPV6_EX)
+ if (rss_hf & RTE_ETH_RSS_IPV6_EX)
mrqc |= IGC_MRQC_RSS_FIELD_IPV6_EX;
- if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
mrqc |= IGC_MRQC_RSS_FIELD_IPV6_TCP;
- if (rss_hf & ETH_RSS_IPV6_TCP_EX)
+ if (rss_hf & RTE_ETH_RSS_IPV6_TCP_EX)
mrqc |= IGC_MRQC_RSS_FIELD_IPV6_TCP_EX;
- if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP;
- if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP;
- if (rss_hf & ETH_RSS_IPV6_UDP_EX)
+ if (rss_hf & RTE_ETH_RSS_IPV6_UDP_EX)
mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP_EX;
IGC_WRITE_REG(hw, IGC_MRQC, mrqc);
}
}
switch (dev->data->dev_conf.rxmode.mq_mode) {
- case ETH_MQ_RX_RSS:
+ case RTE_ETH_MQ_RX_RSS:
igc_rss_configure(dev);
break;
- case ETH_MQ_RX_NONE:
+ case RTE_ETH_MQ_RX_NONE:
/*
* configure RSS register for following,
* then disable the RSS logic
* Reset crc_len in case it was changed after queue setup by a
* call to configure
*/
- rxq->crc_len = (offloads & DEV_RX_OFFLOAD_KEEP_CRC) ?
+ rxq->crc_len = (offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) ?
RTE_ETHER_CRC_LEN : 0;
bus_addr = rxq->rx_ring_phys_addr;
IGC_WRITE_REG(hw, IGC_RXDCTL(rxq->reg_idx), rxdctl);
}
- if (offloads & DEV_RX_OFFLOAD_SCATTER)
+ if (offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
dev->data->scattered_rx = 1;
if (dev->data->scattered_rx) {
rxcsum |= IGC_RXCSUM_PCSD;
/* Enable both L3/L4 rx checksum offload */
- if (offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
+ if (offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM)
rxcsum |= IGC_RXCSUM_IPOFL;
else
rxcsum &= ~IGC_RXCSUM_IPOFL;
if (offloads &
- (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM)) {
+ (RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM)) {
rxcsum |= IGC_RXCSUM_TUOFL;
- offloads |= DEV_RX_OFFLOAD_SCTP_CKSUM;
+ offloads |= RTE_ETH_RX_OFFLOAD_SCTP_CKSUM;
} else {
rxcsum &= ~IGC_RXCSUM_TUOFL;
}
- if (offloads & DEV_RX_OFFLOAD_SCTP_CKSUM)
+ if (offloads & RTE_ETH_RX_OFFLOAD_SCTP_CKSUM)
rxcsum |= IGC_RXCSUM_CRCOFL;
else
rxcsum &= ~IGC_RXCSUM_CRCOFL;
IGC_WRITE_REG(hw, IGC_RXCSUM, rxcsum);
/* Setup the Receive Control Register. */
- if (offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ if (offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
rctl &= ~IGC_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
else
rctl |= IGC_RCTL_SECRC; /* Strip Ethernet CRC. */
IGC_WRITE_REG(hw, IGC_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
dvmolr = IGC_READ_REG(hw, IGC_DVMOLR(rxq->reg_idx));
- if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
dvmolr |= IGC_DVMOLR_STRVLAN;
else
dvmolr &= ~IGC_DVMOLR_STRVLAN;
- if (offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ if (offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
dvmolr &= ~IGC_DVMOLR_STRCRC;
else
dvmolr |= IGC_DVMOLR_STRCRC;
reg_val = IGC_READ_REG(hw, IGC_DVMOLR(rx_queue_id));
if (on) {
reg_val |= IGC_DVMOLR_STRVLAN;
- rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
} else {
reg_val &= ~(IGC_DVMOLR_STRVLAN | IGC_DVMOLR_HIDVLAN);
- rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+ rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
}
IGC_WRITE_REG(hw, IGC_DVMOLR(rx_queue_id), reg_val);
memset(&link, 0, sizeof(link));
if (adapter->idev.port_info->config.an_enable) {
- link.link_autoneg = ETH_LINK_AUTONEG;
+ link.link_autoneg = RTE_ETH_LINK_AUTONEG;
}
if (!adapter->link_up ||
!(lif->state & IONIC_LIF_F_UP)) {
/* Interface is down */
- link.link_status = ETH_LINK_DOWN;
- link.link_duplex = ETH_LINK_HALF_DUPLEX;
- link.link_speed = ETH_SPEED_NUM_NONE;
+ link.link_status = RTE_ETH_LINK_DOWN;
+ link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+ link.link_speed = RTE_ETH_SPEED_NUM_NONE;
} else {
/* Interface is up */
- link.link_status = ETH_LINK_UP;
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_status = RTE_ETH_LINK_UP;
+ link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
switch (adapter->link_speed) {
case 10000:
- link.link_speed = ETH_SPEED_NUM_10G;
+ link.link_speed = RTE_ETH_SPEED_NUM_10G;
break;
case 25000:
- link.link_speed = ETH_SPEED_NUM_25G;
+ link.link_speed = RTE_ETH_SPEED_NUM_25G;
break;
case 40000:
- link.link_speed = ETH_SPEED_NUM_40G;
+ link.link_speed = RTE_ETH_SPEED_NUM_40G;
break;
case 50000:
- link.link_speed = ETH_SPEED_NUM_50G;
+ link.link_speed = RTE_ETH_SPEED_NUM_50G;
break;
case 100000:
- link.link_speed = ETH_SPEED_NUM_100G;
+ link.link_speed = RTE_ETH_SPEED_NUM_100G;
break;
default:
- link.link_speed = ETH_SPEED_NUM_NONE;
+ link.link_speed = RTE_ETH_SPEED_NUM_NONE;
break;
}
}
dev_info->flow_type_rss_offloads = IONIC_ETH_RSS_OFFLOAD_ALL;
dev_info->speed_capa =
- ETH_LINK_SPEED_10G |
- ETH_LINK_SPEED_25G |
- ETH_LINK_SPEED_40G |
- ETH_LINK_SPEED_50G |
- ETH_LINK_SPEED_100G;
+ RTE_ETH_LINK_SPEED_10G |
+ RTE_ETH_LINK_SPEED_25G |
+ RTE_ETH_LINK_SPEED_40G |
+ RTE_ETH_LINK_SPEED_50G |
+ RTE_ETH_LINK_SPEED_100G;
/*
* Per-queue capabilities
* RTE does not support disabling a feature on a queue if it is
* enabled globally on the device. Thus the driver does not advertise
- * capabilities like DEV_TX_OFFLOAD_IPV4_CKSUM as per-queue even
+ * capabilities like RTE_ETH_TX_OFFLOAD_IPV4_CKSUM as per-queue even
* though the driver would be otherwise capable of disabling it on
* a per-queue basis.
*/
*/
dev_info->rx_offload_capa = dev_info->rx_queue_offload_capa |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_VLAN_FILTER |
- DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_RSS_HASH |
+ RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+ RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+ RTE_ETH_RX_OFFLOAD_SCATTER |
+ RTE_ETH_RX_OFFLOAD_RSS_HASH |
0;
dev_info->tx_offload_capa = dev_info->tx_queue_offload_capa |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_TX_OFFLOAD_OUTER_UDP_CKSUM |
- DEV_TX_OFFLOAD_MULTI_SEGS |
- DEV_TX_OFFLOAD_TCP_TSO |
- DEV_TX_OFFLOAD_VLAN_INSERT |
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+ RTE_ETH_TX_OFFLOAD_TCP_TSO |
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
0;
dev_info->rx_desc_lim = rx_desc_lim;
fc_conf->autoneg = 0;
if (idev->port_info->config.pause_type)
- fc_conf->mode = RTE_FC_FULL;
+ fc_conf->mode = RTE_ETH_FC_FULL;
else
- fc_conf->mode = RTE_FC_NONE;
+ fc_conf->mode = RTE_ETH_FC_NONE;
}
return 0;
}
switch (fc_conf->mode) {
- case RTE_FC_NONE:
+ case RTE_ETH_FC_NONE:
pause_type = IONIC_PORT_PAUSE_TYPE_NONE;
break;
- case RTE_FC_FULL:
+ case RTE_ETH_FC_FULL:
pause_type = IONIC_PORT_PAUSE_TYPE_LINK;
break;
- case RTE_FC_RX_PAUSE:
- case RTE_FC_TX_PAUSE:
+ case RTE_ETH_FC_RX_PAUSE:
+ case RTE_ETH_FC_TX_PAUSE:
return -ENOTSUP;
}
return -EINVAL;
}
- num = tbl_sz / RTE_RETA_GROUP_SIZE;
+ num = tbl_sz / RTE_ETH_RETA_GROUP_SIZE;
for (i = 0; i < num; i++) {
- for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) {
+ for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) {
if (reta_conf[i].mask & ((uint64_t)1 << j)) {
- index = (i * RTE_RETA_GROUP_SIZE) + j;
+ index = (i * RTE_ETH_RETA_GROUP_SIZE) + j;
lif->rss_ind_tbl[index] = reta_conf[i].reta[j];
}
}
return -EINVAL;
}
- num = reta_size / RTE_RETA_GROUP_SIZE;
+ num = reta_size / RTE_ETH_RETA_GROUP_SIZE;
for (i = 0; i < num; i++) {
memcpy(reta_conf->reta,
- &lif->rss_ind_tbl[i * RTE_RETA_GROUP_SIZE],
- RTE_RETA_GROUP_SIZE);
+ &lif->rss_ind_tbl[i * RTE_ETH_RETA_GROUP_SIZE],
+ RTE_ETH_RETA_GROUP_SIZE);
reta_conf++;
}
IONIC_RSS_HASH_KEY_SIZE);
if (lif->rss_types & IONIC_RSS_TYPE_IPV4)
- rss_hf |= ETH_RSS_IPV4;
+ rss_hf |= RTE_ETH_RSS_IPV4;
if (lif->rss_types & IONIC_RSS_TYPE_IPV4_TCP)
- rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
if (lif->rss_types & IONIC_RSS_TYPE_IPV4_UDP)
- rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
if (lif->rss_types & IONIC_RSS_TYPE_IPV6)
- rss_hf |= ETH_RSS_IPV6;
+ rss_hf |= RTE_ETH_RSS_IPV6;
if (lif->rss_types & IONIC_RSS_TYPE_IPV6_TCP)
- rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
if (lif->rss_types & IONIC_RSS_TYPE_IPV6_UDP)
- rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
rss_conf->rss_hf = rss_hf;
if (!lif->rss_ind_tbl)
return -EINVAL;
- if (rss_conf->rss_hf & ETH_RSS_IPV4)
+ if (rss_conf->rss_hf & RTE_ETH_RSS_IPV4)
rss_types |= IONIC_RSS_TYPE_IPV4;
- if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+ if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
rss_types |= IONIC_RSS_TYPE_IPV4_TCP;
- if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+ if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
rss_types |= IONIC_RSS_TYPE_IPV4_UDP;
- if (rss_conf->rss_hf & ETH_RSS_IPV6)
+ if (rss_conf->rss_hf & RTE_ETH_RSS_IPV6)
rss_types |= IONIC_RSS_TYPE_IPV6;
- if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+ if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
rss_types |= IONIC_RSS_TYPE_IPV6_TCP;
- if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
+ if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
rss_types |= IONIC_RSS_TYPE_IPV6_UDP;
ionic_lif_rss_config(lif, rss_types, key, NULL);
static inline uint32_t
ionic_parse_link_speeds(uint16_t link_speeds)
{
- if (link_speeds & ETH_LINK_SPEED_100G)
+ if (link_speeds & RTE_ETH_LINK_SPEED_100G)
return 100000;
- else if (link_speeds & ETH_LINK_SPEED_50G)
+ else if (link_speeds & RTE_ETH_LINK_SPEED_50G)
return 50000;
- else if (link_speeds & ETH_LINK_SPEED_40G)
+ else if (link_speeds & RTE_ETH_LINK_SPEED_40G)
return 40000;
- else if (link_speeds & ETH_LINK_SPEED_25G)
+ else if (link_speeds & RTE_ETH_LINK_SPEED_25G)
return 25000;
- else if (link_speeds & ETH_LINK_SPEED_10G)
+ else if (link_speeds & RTE_ETH_LINK_SPEED_10G)
return 10000;
else
return 0;
IONIC_PRINT_CALL();
allowed_speeds =
- ETH_LINK_SPEED_FIXED |
- ETH_LINK_SPEED_10G |
- ETH_LINK_SPEED_25G |
- ETH_LINK_SPEED_40G |
- ETH_LINK_SPEED_50G |
- ETH_LINK_SPEED_100G;
+ RTE_ETH_LINK_SPEED_FIXED |
+ RTE_ETH_LINK_SPEED_10G |
+ RTE_ETH_LINK_SPEED_25G |
+ RTE_ETH_LINK_SPEED_40G |
+ RTE_ETH_LINK_SPEED_50G |
+ RTE_ETH_LINK_SPEED_100G;
if (dev_conf->link_speeds & ~allowed_speeds) {
IONIC_PRINT(ERR, "Invalid link setting");
}
/* Configure link */
- an_enable = (dev_conf->link_speeds & ETH_LINK_SPEED_FIXED) == 0;
+ an_enable = (dev_conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) == 0;
ionic_dev_cmd_port_autoneg(idev, an_enable);
err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT);
#include <rte_ethdev.h>
#define IONIC_ETH_RSS_OFFLOAD_ALL ( \
- ETH_RSS_IPV4 | \
- ETH_RSS_NONFRAG_IPV4_TCP | \
- ETH_RSS_NONFRAG_IPV4_UDP | \
- ETH_RSS_IPV6 | \
- ETH_RSS_NONFRAG_IPV6_TCP | \
- ETH_RSS_NONFRAG_IPV6_UDP)
+ RTE_ETH_RSS_IPV4 | \
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+ RTE_ETH_RSS_IPV6 | \
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP)
#define IONIC_ETH_DEV_TO_LIF(eth_dev) ((struct ionic_lif *) \
(eth_dev)->data->dev_private)
/*
* IONIC_ETH_HW_VLAN_RX_FILTER cannot be turned off, so
- * set DEV_RX_OFFLOAD_VLAN_FILTER and ignore ETH_VLAN_FILTER_MASK
+ * set RTE_ETH_RX_OFFLOAD_VLAN_FILTER and ignore RTE_ETH_VLAN_FILTER_MASK
*/
- rxmode->offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+ rxmode->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
- if (mask & ETH_VLAN_STRIP_MASK) {
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
lif->features |= IONIC_ETH_HW_VLAN_RX_STRIP;
else
lif->features &= ~IONIC_ETH_HW_VLAN_RX_STRIP;
/*
* NB: While it is true that RSS_HASH is always enabled on ionic,
* setting this flag unconditionally causes problems in DTS.
- * rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+ * rxmode->offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
*/
/* RX per-port */
- if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM ||
- rxmode->offloads & DEV_RX_OFFLOAD_UDP_CKSUM ||
- rxmode->offloads & DEV_RX_OFFLOAD_TCP_CKSUM)
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM ||
+ rxmode->offloads & RTE_ETH_RX_OFFLOAD_UDP_CKSUM ||
+ rxmode->offloads & RTE_ETH_RX_OFFLOAD_TCP_CKSUM)
lif->features |= IONIC_ETH_HW_RX_CSUM;
else
lif->features &= ~IONIC_ETH_HW_RX_CSUM;
- if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
lif->features |= IONIC_ETH_HW_RX_SG;
lif->eth_dev->data->scattered_rx = 1;
} else {
}
/* Covers VLAN_STRIP */
- ionic_lif_configure_vlan_offload(lif, ETH_VLAN_STRIP_MASK);
+ ionic_lif_configure_vlan_offload(lif, RTE_ETH_VLAN_STRIP_MASK);
/* TX per-port */
- if (txmode->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM ||
- txmode->offloads & DEV_TX_OFFLOAD_UDP_CKSUM ||
- txmode->offloads & DEV_TX_OFFLOAD_TCP_CKSUM ||
- txmode->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
- txmode->offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
+ if (txmode->offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
+ txmode->offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM ||
+ txmode->offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM ||
+ txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
+ txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
lif->features |= IONIC_ETH_HW_TX_CSUM;
else
lif->features &= ~IONIC_ETH_HW_TX_CSUM;
- if (txmode->offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
+ if (txmode->offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
lif->features |= IONIC_ETH_HW_VLAN_TX_TAG;
else
lif->features &= ~IONIC_ETH_HW_VLAN_TX_TAG;
- if (txmode->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+ if (txmode->offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
lif->features |= IONIC_ETH_HW_TX_SG;
else
lif->features &= ~IONIC_ETH_HW_TX_SG;
- if (txmode->offloads & DEV_TX_OFFLOAD_TCP_TSO) {
+ if (txmode->offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO) {
lif->features |= IONIC_ETH_HW_TSO;
lif->features |= IONIC_ETH_HW_TSO_IPV6;
lif->features |= IONIC_ETH_HW_TSO_ECN;
txq->flags |= IONIC_QCQ_F_DEFERRED;
/* Convert the offload flags into queue flags */
- if (offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
+ if (offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
txq->flags |= IONIC_QCQ_F_CSUM_L3;
- if (offloads & DEV_TX_OFFLOAD_TCP_CKSUM)
+ if (offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
txq->flags |= IONIC_QCQ_F_CSUM_TCP;
- if (offloads & DEV_TX_OFFLOAD_UDP_CKSUM)
+ if (offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM)
txq->flags |= IONIC_QCQ_F_CSUM_UDP;
eth_dev->data->tx_queues[tx_queue_id] = txq;
/*
* Note: the interface does not currently support
- * DEV_RX_OFFLOAD_KEEP_CRC, please also consider ETHER_CRC_LEN
+ * RTE_ETH_RX_OFFLOAD_KEEP_CRC, please also consider ETHER_CRC_LEN
* when the adapter will be able to keep the CRC and subtract
* it to the length for all received packets:
* if (eth_dev->data->dev_conf.rxmode.offloads &
- * DEV_RX_OFFLOAD_KEEP_CRC)
+ * RTE_ETH_RX_OFFLOAD_KEEP_CRC)
* rxq->crc_len = ETHER_CRC_LEN;
*/
dev_info->speed_capa =
(hw->retimer.mac_type ==
IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) ?
- ETH_LINK_SPEED_10G :
+ RTE_ETH_LINK_SPEED_10G :
((hw->retimer.mac_type ==
IFPGA_RAWDEV_RETIMER_MAC_TYPE_25GE_25GAUI) ?
- ETH_LINK_SPEED_25G :
- ETH_LINK_SPEED_AUTONEG);
+ RTE_ETH_LINK_SPEED_25G :
+ RTE_ETH_LINK_SPEED_AUTONEG);
dev_info->max_rx_queues = 1;
dev_info->max_tx_queues = 1;
};
dev_info->rx_queue_offload_capa = 0;
dev_info->rx_offload_capa =
- DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_QINQ_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_VLAN_EXTEND |
- DEV_RX_OFFLOAD_VLAN_FILTER;
-
- dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+ RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+ RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
+
+ dev_info->tx_queue_offload_capa = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
dev_info->tx_offload_capa =
- DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_QINQ_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_SCTP_CKSUM |
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO |
- DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GRE_TNL_TSO |
- DEV_TX_OFFLOAD_IPIP_TNL_TSO |
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
- DEV_TX_OFFLOAD_MULTI_SEGS |
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+ RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_TSO |
+ RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
dev_info->tx_queue_offload_capa;
dev_info->dev_capa =
(uint64_t *)&link_speed);
switch (link_speed) {
case IFPGA_RAWDEV_LINK_SPEED_10GB:
- link->link_speed = ETH_SPEED_NUM_10G;
+ link->link_speed = RTE_ETH_SPEED_NUM_10G;
break;
case IFPGA_RAWDEV_LINK_SPEED_25GB:
- link->link_speed = ETH_SPEED_NUM_25G;
+ link->link_speed = RTE_ETH_SPEED_NUM_25G;
break;
default:
IPN3KE_AFU_PMD_ERR("Unknown link speed info %u", link_speed);
memset(&link, 0, sizeof(link));
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
link.link_autoneg = !(ethdev->data->dev_conf.link_speeds &
- ETH_LINK_SPEED_FIXED);
+ RTE_ETH_LINK_SPEED_FIXED);
rawdev = hw->rawdev;
ipn3ke_update_link(rawdev, rpst->port_id, &link);
memset(&link, 0, sizeof(link));
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
link.link_autoneg = !(rpst->ethdev->data->dev_conf.link_speeds &
- ETH_LINK_SPEED_FIXED);
+ RTE_ETH_LINK_SPEED_FIXED);
rawdev = hw->rawdev;
ipn3ke_update_link(rawdev, rpst->port_id, &link);
qinq &= IXGBE_DMATXCTL_GDV;
switch (vlan_type) {
- case ETH_VLAN_TYPE_INNER:
+ case RTE_ETH_VLAN_TYPE_INNER:
if (qinq) {
reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid;
" by single VLAN");
}
break;
- case ETH_VLAN_TYPE_OUTER:
+ case RTE_ETH_VLAN_TYPE_OUTER:
if (qinq) {
/* Only the high 16-bits is valid */
IXGBE_WRITE_REG(hw, IXGBE_EXVET, (uint32_t)tpid <<
if (on) {
rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
- rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
} else {
rxq->vlan_flags = PKT_RX_VLAN;
- rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+ rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
}
}
PMD_INIT_FUNC_TRACE();
if (hw->mac.type == ixgbe_mac_82598EB) {
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
ctrl |= IXGBE_VLNCTRL_VME;
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
- if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+ if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
ctrl |= IXGBE_RXDCTL_VME;
on = TRUE;
} else {
struct rte_eth_rxmode *rxmode;
struct ixgbe_rx_queue *rxq;
- if (mask & ETH_VLAN_STRIP_MASK) {
+ if (mask & RTE_ETH_VLAN_STRIP_MASK) {
rxmode = &dev->data->dev_conf.rxmode;
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
- rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
}
else
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
- rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+ rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
}
}
}
struct rte_eth_rxmode *rxmode;
rxmode = &dev->data->dev_conf.rxmode;
- if (mask & ETH_VLAN_STRIP_MASK) {
+ if (mask & RTE_ETH_VLAN_STRIP_MASK)
ixgbe_vlan_hw_strip_config(dev);
- }
- if (mask & ETH_VLAN_FILTER_MASK) {
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+ if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
ixgbe_vlan_hw_filter_enable(dev);
else
ixgbe_vlan_hw_filter_disable(dev);
}
- if (mask & ETH_VLAN_EXTEND_MASK) {
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+ if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
ixgbe_vlan_hw_extend_enable(dev);
else
ixgbe_vlan_hw_extend_disable(dev);
switch (nb_rx_q) {
case 1:
case 2:
- RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
+ RTE_ETH_DEV_SRIOV(dev).active = RTE_ETH_64_POOLS;
break;
case 4:
- RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
+ RTE_ETH_DEV_SRIOV(dev).active = RTE_ETH_32_POOLS;
break;
default:
return -EINVAL;
if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
/* check multi-queue mode */
switch (dev_conf->rxmode.mq_mode) {
- case ETH_MQ_RX_VMDQ_DCB:
- PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
+ case RTE_ETH_MQ_RX_VMDQ_DCB:
+ PMD_INIT_LOG(INFO, "RTE_ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
break;
- case ETH_MQ_RX_VMDQ_DCB_RSS:
+ case RTE_ETH_MQ_RX_VMDQ_DCB_RSS:
/* DCB/RSS VMDQ in SRIOV mode, not implement yet */
PMD_INIT_LOG(ERR, "SRIOV active,"
" unsupported mq_mode rx %d.",
dev_conf->rxmode.mq_mode);
return -EINVAL;
- case ETH_MQ_RX_RSS:
- case ETH_MQ_RX_VMDQ_RSS:
- dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
+ case RTE_ETH_MQ_RX_RSS:
+ case RTE_ETH_MQ_RX_VMDQ_RSS:
+ dev->data->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_RSS;
if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
if (ixgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
PMD_INIT_LOG(ERR, "SRIOV is active,"
return -EINVAL;
}
break;
- case ETH_MQ_RX_VMDQ_ONLY:
- case ETH_MQ_RX_NONE:
+ case RTE_ETH_MQ_RX_VMDQ_ONLY:
+ case RTE_ETH_MQ_RX_NONE:
/* if nothing mq mode configure, use default scheme */
- dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
+ dev->data->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_ONLY;
break;
- default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
+ default: /* RTE_ETH_MQ_RX_DCB, RTE_ETH_MQ_RX_DCB_RSS or RTE_ETH_MQ_TX_DCB*/
/* SRIOV only works in VMDq enable mode */
PMD_INIT_LOG(ERR, "SRIOV is active,"
" wrong mq_mode rx %d.",
}
switch (dev_conf->txmode.mq_mode) {
- case ETH_MQ_TX_VMDQ_DCB:
- PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
- dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
+ case RTE_ETH_MQ_TX_VMDQ_DCB:
+ PMD_INIT_LOG(INFO, "RTE_ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
+ dev->data->dev_conf.txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB;
break;
- default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
- dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
+ default: /* RTE_ETH_MQ_TX_VMDQ_ONLY or RTE_ETH_MQ_TX_NONE */
+ dev->data->dev_conf.txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_ONLY;
break;
}
return -EINVAL;
}
} else {
- if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
+ if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB_RSS) {
PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
" not supported.");
return -EINVAL;
}
/* check configuration for vmdb+dcb mode */
- if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
+ if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB) {
const struct rte_eth_vmdq_dcb_conf *conf;
if (nb_rx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
return -EINVAL;
}
conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
- if (!(conf->nb_queue_pools == ETH_16_POOLS ||
- conf->nb_queue_pools == ETH_32_POOLS)) {
+ if (!(conf->nb_queue_pools == RTE_ETH_16_POOLS ||
+ conf->nb_queue_pools == RTE_ETH_32_POOLS)) {
PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
" nb_queue_pools must be %d or %d.",
- ETH_16_POOLS, ETH_32_POOLS);
+ RTE_ETH_16_POOLS, RTE_ETH_32_POOLS);
return -EINVAL;
}
}
- if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+ if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) {
const struct rte_eth_vmdq_dcb_tx_conf *conf;
if (nb_tx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
return -EINVAL;
}
conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
- if (!(conf->nb_queue_pools == ETH_16_POOLS ||
- conf->nb_queue_pools == ETH_32_POOLS)) {
+ if (!(conf->nb_queue_pools == RTE_ETH_16_POOLS ||
+ conf->nb_queue_pools == RTE_ETH_32_POOLS)) {
PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
" nb_queue_pools != %d and"
" nb_queue_pools != %d.",
- ETH_16_POOLS, ETH_32_POOLS);
+ RTE_ETH_16_POOLS, RTE_ETH_32_POOLS);
return -EINVAL;
}
}
/* For DCB mode check our configuration before we go further */
- if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
+ if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_DCB) {
const struct rte_eth_dcb_rx_conf *conf;
conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
- if (!(conf->nb_tcs == ETH_4_TCS ||
- conf->nb_tcs == ETH_8_TCS)) {
+ if (!(conf->nb_tcs == RTE_ETH_4_TCS ||
+ conf->nb_tcs == RTE_ETH_8_TCS)) {
PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
" and nb_tcs != %d.",
- ETH_4_TCS, ETH_8_TCS);
+ RTE_ETH_4_TCS, RTE_ETH_8_TCS);
return -EINVAL;
}
}
- if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+ if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_DCB) {
const struct rte_eth_dcb_tx_conf *conf;
conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
- if (!(conf->nb_tcs == ETH_4_TCS ||
- conf->nb_tcs == ETH_8_TCS)) {
+ if (!(conf->nb_tcs == RTE_ETH_4_TCS ||
+ conf->nb_tcs == RTE_ETH_8_TCS)) {
PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
" and nb_tcs != %d.",
- ETH_4_TCS, ETH_8_TCS);
+ RTE_ETH_4_TCS, RTE_ETH_8_TCS);
return -EINVAL;
}
}
* When DCB/VT is off, maximum number of queues changes,
* except for 82598EB, which remains constant.
*/
- if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE &&
+ if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_NONE &&
hw->mac.type != ixgbe_mac_82598EB) {
if (nb_tx_q > IXGBE_NONE_MODE_TX_NB_QUEUES) {
PMD_INIT_LOG(ERR,
PMD_INIT_FUNC_TRACE();
- if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
- dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+ if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+ dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
/* multipe queue mode checking */
ret = ixgbe_check_mq_mode(dev);
goto error;
}
- mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
- ETH_VLAN_EXTEND_MASK;
+ mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+ RTE_ETH_VLAN_EXTEND_MASK;
err = ixgbe_vlan_offload_config(dev, mask);
if (err) {
PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
goto error;
}
- if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
+ if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_ONLY) {
/* Enable vlan filtering for VMDq */
ixgbe_vmdq_vlan_hw_filter_enable(dev);
}
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_X550EM_a:
- allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
- ETH_LINK_SPEED_2_5G | ETH_LINK_SPEED_5G |
- ETH_LINK_SPEED_10G;
+ allowed_speeds = RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G |
+ RTE_ETH_LINK_SPEED_2_5G | RTE_ETH_LINK_SPEED_5G |
+ RTE_ETH_LINK_SPEED_10G;
if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)
- allowed_speeds = ETH_LINK_SPEED_10M |
- ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G;
+ allowed_speeds = RTE_ETH_LINK_SPEED_10M |
+ RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G;
break;
default:
- allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
- ETH_LINK_SPEED_10G;
+ allowed_speeds = RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G |
+ RTE_ETH_LINK_SPEED_10G;
}
link_speeds = &dev->data->dev_conf.link_speeds;
}
speed = 0x0;
- if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
+ if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
speed = IXGBE_LINK_SPEED_82598_AUTONEG;
speed = IXGBE_LINK_SPEED_82599_AUTONEG;
}
} else {
- if (*link_speeds & ETH_LINK_SPEED_10G)
+ if (*link_speeds & RTE_ETH_LINK_SPEED_10G)
speed |= IXGBE_LINK_SPEED_10GB_FULL;
- if (*link_speeds & ETH_LINK_SPEED_5G)
+ if (*link_speeds & RTE_ETH_LINK_SPEED_5G)
speed |= IXGBE_LINK_SPEED_5GB_FULL;
- if (*link_speeds & ETH_LINK_SPEED_2_5G)
+ if (*link_speeds & RTE_ETH_LINK_SPEED_2_5G)
speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
- if (*link_speeds & ETH_LINK_SPEED_1G)
+ if (*link_speeds & RTE_ETH_LINK_SPEED_1G)
speed |= IXGBE_LINK_SPEED_1GB_FULL;
- if (*link_speeds & ETH_LINK_SPEED_100M)
+ if (*link_speeds & RTE_ETH_LINK_SPEED_100M)
speed |= IXGBE_LINK_SPEED_100_FULL;
- if (*link_speeds & ETH_LINK_SPEED_10M)
+ if (*link_speeds & RTE_ETH_LINK_SPEED_10M)
speed |= IXGBE_LINK_SPEED_10_FULL;
}
* When DCB/VT is off, maximum number of queues changes,
* except for 82598EB, which remains constant.
*/
- if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE &&
+ if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_NONE &&
hw->mac.type != ixgbe_mac_82598EB)
dev_info->max_tx_queues = IXGBE_NONE_MODE_TX_NB_QUEUES;
}
dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
dev_info->max_vfs = pci_dev->max_vfs;
if (hw->mac.type == ixgbe_mac_82598EB)
- dev_info->max_vmdq_pools = ETH_16_POOLS;
+ dev_info->max_vmdq_pools = RTE_ETH_16_POOLS;
else
- dev_info->max_vmdq_pools = ETH_64_POOLS;
+ dev_info->max_vmdq_pools = RTE_ETH_64_POOLS;
dev_info->max_mtu = dev_info->max_rx_pktlen - IXGBE_ETH_OVERHEAD;
dev_info->min_mtu = RTE_ETHER_MIN_MTU;
dev_info->vmdq_queue_num = dev_info->max_rx_queues;
dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type);
dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL;
- dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
+ dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G;
if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)
- dev_info->speed_capa = ETH_LINK_SPEED_10M |
- ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G;
+ dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M |
+ RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G;
if (hw->mac.type == ixgbe_mac_X540 ||
hw->mac.type == ixgbe_mac_X540_vf ||
hw->mac.type == ixgbe_mac_X550 ||
hw->mac.type == ixgbe_mac_X550_vf) {
- dev_info->speed_capa |= ETH_LINK_SPEED_100M;
+ dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100M;
}
if (hw->mac.type == ixgbe_mac_X550) {
- dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
- dev_info->speed_capa |= ETH_LINK_SPEED_5G;
+ dev_info->speed_capa |= RTE_ETH_LINK_SPEED_2_5G;
+ dev_info->speed_capa |= RTE_ETH_LINK_SPEED_5G;
}
/* Driver-preferred Rx/Tx parameters */
dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
dev_info->max_vfs = pci_dev->max_vfs;
if (hw->mac.type == ixgbe_mac_82598EB)
- dev_info->max_vmdq_pools = ETH_16_POOLS;
+ dev_info->max_vmdq_pools = RTE_ETH_16_POOLS;
else
- dev_info->max_vmdq_pools = ETH_64_POOLS;
+ dev_info->max_vmdq_pools = RTE_ETH_64_POOLS;
dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
dev_info->rx_queue_offload_capa);
u32 esdp_reg;
memset(&link, 0, sizeof(link));
- link.link_status = ETH_LINK_DOWN;
- link.link_speed = ETH_SPEED_NUM_NONE;
- link.link_duplex = ETH_LINK_HALF_DUPLEX;
+ link.link_status = RTE_ETH_LINK_DOWN;
+ link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+ link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
link.link_autoneg = !(dev->data->dev_conf.link_speeds &
- ETH_LINK_SPEED_FIXED);
+ RTE_ETH_LINK_SPEED_FIXED);
hw->mac.get_link_status = true;
diag = ixgbe_check_link(hw, &link_speed, &link_up, wait);
if (diag != 0) {
- link.link_speed = ETH_SPEED_NUM_100M;
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_speed = RTE_ETH_SPEED_NUM_100M;
+ link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
return rte_eth_linkstatus_set(dev, &link);
}
return rte_eth_linkstatus_set(dev, &link);
}
- link.link_status = ETH_LINK_UP;
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_status = RTE_ETH_LINK_UP;
+ link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
switch (link_speed) {
default:
case IXGBE_LINK_SPEED_UNKNOWN:
- link.link_speed = ETH_SPEED_NUM_UNKNOWN;
+ link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
break;
case IXGBE_LINK_SPEED_10_FULL:
- link.link_speed = ETH_SPEED_NUM_10M;
+ link.link_speed = RTE_ETH_SPEED_NUM_10M;
break;
case IXGBE_LINK_SPEED_100_FULL:
- link.link_speed = ETH_SPEED_NUM_100M;
+ link.link_speed = RTE_ETH_SPEED_NUM_100M;
break;
case IXGBE_LINK_SPEED_1GB_FULL:
- link.link_speed = ETH_SPEED_NUM_1G;
+ link.link_speed = RTE_ETH_SPEED_NUM_1G;
break;
case IXGBE_LINK_SPEED_2_5GB_FULL:
- link.link_speed = ETH_SPEED_NUM_2_5G;
+ link.link_speed = RTE_ETH_SPEED_NUM_2_5G;
break;
case IXGBE_LINK_SPEED_5GB_FULL:
- link.link_speed = ETH_SPEED_NUM_5G;
+ link.link_speed = RTE_ETH_SPEED_NUM_5G;
break;
case IXGBE_LINK_SPEED_10GB_FULL:
- link.link_speed = ETH_SPEED_NUM_10G;
+ link.link_speed = RTE_ETH_SPEED_NUM_10G;
break;
}
PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
(int)(dev->data->port_id),
(unsigned)link.link_speed,
- link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+ link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
"full-duplex" : "half-duplex");
} else {
PMD_INIT_LOG(INFO, " Port %d: Link Down",
tx_pause = 0;
if (rx_pause && tx_pause)
- fc_conf->mode = RTE_FC_FULL;
+ fc_conf->mode = RTE_ETH_FC_FULL;
else if (rx_pause)
- fc_conf->mode = RTE_FC_RX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
else if (tx_pause)
- fc_conf->mode = RTE_FC_TX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
else
- fc_conf->mode = RTE_FC_NONE;
+ fc_conf->mode = RTE_ETH_FC_NONE;
return 0;
}
}
for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
mask = (uint8_t)((reta_conf[idx].mask >> shift) &
IXGBE_4_BIT_MASK);
if (!mask)
}
for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
mask = (uint8_t)((reta_conf[idx].mask >> shift) &
IXGBE_4_BIT_MASK);
if (!mask)
PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
dev->data->port_id);
- if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
- dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+ if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+ dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
/*
* VF has no ability to enable/disable HW CRC
* Keep the persistent behavior the same as Host PF
*/
#ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
- if (conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+ if (conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
- conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_KEEP_CRC;
+ conf->rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_KEEP_CRC;
}
#else
- if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)) {
+ if (!(conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)) {
PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
- conf->rxmode.offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
+ conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
}
#endif
ixgbevf_set_vfta_all(dev, 1);
/* Set HW strip */
- mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
- ETH_VLAN_EXTEND_MASK;
+ mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+ RTE_ETH_VLAN_EXTEND_MASK;
err = ixgbevf_vlan_offload_config(dev, mask);
if (err) {
PMD_INIT_LOG(ERR, "Unable to set VLAN offload (%d)", err);
int on = 0;
/* VF function only support hw strip feature, others are not support */
- if (mask & ETH_VLAN_STRIP_MASK) {
+ if (mask & RTE_ETH_VLAN_STRIP_MASK) {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
- on = !!(rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+ on = !!(rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
ixgbevf_vlan_strip_queue_set(dev, i, on);
}
}
return -ENOTSUP;
if (on) {
- for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
+ for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
uta_info->uta_shadow[i] = ~0;
IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
}
} else {
- for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
+ for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
uta_info->uta_shadow[i] = 0;
IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
}
{
uint32_t new_val = orig_val;
- if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
+ if (rx_mask & RTE_ETH_VMDQ_ACCEPT_UNTAG)
new_val |= IXGBE_VMOLR_AUPE;
- if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
+ if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_MC)
new_val |= IXGBE_VMOLR_ROMPE;
- if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
+ if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_UC)
new_val |= IXGBE_VMOLR_ROPE;
- if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
+ if (rx_mask & RTE_ETH_VMDQ_ACCEPT_BROADCAST)
new_val |= IXGBE_VMOLR_BAM;
- if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
+ if (rx_mask & RTE_ETH_VMDQ_ACCEPT_MULTICAST)
new_val |= IXGBE_VMOLR_MPE;
return new_val;
rte_eth_linkstatus_get(dev, &link);
switch (link.link_speed) {
- case ETH_SPEED_NUM_100M:
+ case RTE_ETH_SPEED_NUM_100M:
incval = IXGBE_INCVAL_100;
shift = IXGBE_INCVAL_SHIFT_100;
break;
- case ETH_SPEED_NUM_1G:
+ case RTE_ETH_SPEED_NUM_1G:
incval = IXGBE_INCVAL_1GB;
shift = IXGBE_INCVAL_SHIFT_1GB;
break;
- case ETH_SPEED_NUM_10G:
+ case RTE_ETH_SPEED_NUM_10G:
default:
incval = IXGBE_INCVAL_10GB;
shift = IXGBE_INCVAL_SHIFT_10GB;
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_X550EM_a:
- return ETH_RSS_RETA_SIZE_512;
+ return RTE_ETH_RSS_RETA_SIZE_512;
case ixgbe_mac_X550_vf:
case ixgbe_mac_X550EM_x_vf:
case ixgbe_mac_X550EM_a_vf:
- return ETH_RSS_RETA_SIZE_64;
+ return RTE_ETH_RSS_RETA_SIZE_64;
case ixgbe_mac_X540_vf:
case ixgbe_mac_82599_vf:
return 0;
default:
- return ETH_RSS_RETA_SIZE_128;
+ return RTE_ETH_RSS_RETA_SIZE_128;
}
}
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_X550EM_a:
- if (reta_idx < ETH_RSS_RETA_SIZE_128)
+ if (reta_idx < RTE_ETH_RSS_RETA_SIZE_128)
return IXGBE_RETA(reta_idx >> 2);
else
- return IXGBE_ERETA((reta_idx - ETH_RSS_RETA_SIZE_128) >> 2);
+ return IXGBE_ERETA((reta_idx - RTE_ETH_RSS_RETA_SIZE_128) >> 2);
case ixgbe_mac_X550_vf:
case ixgbe_mac_X550EM_x_vf:
case ixgbe_mac_X550EM_a_vf:
uint8_t nb_tcs;
uint8_t i, j;
- if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
+ if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_DCB_FLAG)
dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
else
dcb_info->nb_tcs = 1;
if (dcb_config->vt_mode) { /* vt is enabled*/
struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
&dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
- for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+ for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
if (RTE_ETH_DEV_SRIOV(dev).active > 0) {
for (j = 0; j < nb_tcs; j++) {
} else { /* vt is disabled*/
struct rte_eth_dcb_rx_conf *rx_conf =
&dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
- for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+ for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
- if (dcb_info->nb_tcs == ETH_4_TCS) {
+ if (dcb_info->nb_tcs == RTE_ETH_4_TCS) {
for (i = 0; i < dcb_info->nb_tcs; i++) {
dcb_info->tc_queue.tc_rxq[0][i].base = i * 32;
dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
- } else if (dcb_info->nb_tcs == ETH_8_TCS) {
+ } else if (dcb_info->nb_tcs == RTE_ETH_8_TCS) {
for (i = 0; i < dcb_info->nb_tcs; i++) {
dcb_info->tc_queue.tc_rxq[0][i].base = i * 16;
dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
}
switch (l2_tunnel->l2_tunnel_type) {
- case RTE_L2_TUNNEL_TYPE_E_TAG:
+ case RTE_ETH_L2_TUNNEL_TYPE_E_TAG:
ret = ixgbe_e_tag_filter_add(dev, l2_tunnel);
break;
default:
return ret;
switch (l2_tunnel->l2_tunnel_type) {
- case RTE_L2_TUNNEL_TYPE_E_TAG:
+ case RTE_ETH_L2_TUNNEL_TYPE_E_TAG:
ret = ixgbe_e_tag_filter_del(dev, l2_tunnel);
break;
default:
return -EINVAL;
switch (udp_tunnel->prot_type) {
- case RTE_TUNNEL_TYPE_VXLAN:
+ case RTE_ETH_TUNNEL_TYPE_VXLAN:
ret = ixgbe_add_vxlan_port(hw, udp_tunnel->udp_port);
break;
- case RTE_TUNNEL_TYPE_GENEVE:
- case RTE_TUNNEL_TYPE_TEREDO:
+ case RTE_ETH_TUNNEL_TYPE_GENEVE:
+ case RTE_ETH_TUNNEL_TYPE_TEREDO:
PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
ret = -EINVAL;
break;
return -EINVAL;
switch (udp_tunnel->prot_type) {
- case RTE_TUNNEL_TYPE_VXLAN:
+ case RTE_ETH_TUNNEL_TYPE_VXLAN:
ret = ixgbe_del_vxlan_port(hw, udp_tunnel->udp_port);
break;
- case RTE_TUNNEL_TYPE_GENEVE:
- case RTE_TUNNEL_TYPE_TEREDO:
+ case RTE_ETH_TUNNEL_TYPE_GENEVE:
+ case RTE_ETH_TUNNEL_TYPE_TEREDO:
PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
ret = -EINVAL;
break;
#define IXGBE_FDIR_NVGRE_TUNNEL_TYPE 0x0
#define IXGBE_RSS_OFFLOAD_ALL ( \
- ETH_RSS_IPV4 | \
- ETH_RSS_NONFRAG_IPV4_TCP | \
- ETH_RSS_NONFRAG_IPV4_UDP | \
- ETH_RSS_IPV6 | \
- ETH_RSS_NONFRAG_IPV6_TCP | \
- ETH_RSS_NONFRAG_IPV6_UDP | \
- ETH_RSS_IPV6_EX | \
- ETH_RSS_IPV6_TCP_EX | \
- ETH_RSS_IPV6_UDP_EX)
+ RTE_ETH_RSS_IPV4 | \
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+ RTE_ETH_RSS_IPV6 | \
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+ RTE_ETH_RSS_IPV6_EX | \
+ RTE_ETH_RSS_IPV6_TCP_EX | \
+ RTE_ETH_RSS_IPV6_UDP_EX)
#define IXGBE_VF_IRQ_ENABLE_MASK 3 /* vf irq enable mask */
#define IXGBE_VF_MAXMSIVECTOR 1
static uint32_t ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
uint32_t key);
static uint32_t atr_compute_sig_hash_82599(union ixgbe_atr_input *input,
- enum rte_fdir_pballoc_type pballoc);
+ enum rte_eth_fdir_pballoc_type pballoc);
static uint32_t atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
- enum rte_fdir_pballoc_type pballoc);
+ enum rte_eth_fdir_pballoc_type pballoc);
static int fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_input *input, uint8_t queue,
uint32_t fdircmd, uint32_t fdirhash,
* flexbytes matching field, and drop queue (only for perfect matching mode).
*/
static inline int
-configure_fdir_flags(const struct rte_fdir_conf *conf, uint32_t *fdirctrl)
+configure_fdir_flags(const struct rte_eth_fdir_conf *conf, uint32_t *fdirctrl)
{
*fdirctrl = 0;
switch (conf->pballoc) {
- case RTE_FDIR_PBALLOC_64K:
+ case RTE_ETH_FDIR_PBALLOC_64K:
/* 8k - 1 signature filters */
*fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K;
break;
- case RTE_FDIR_PBALLOC_128K:
+ case RTE_ETH_FDIR_PBALLOC_128K:
/* 16k - 1 signature filters */
*fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K;
break;
- case RTE_FDIR_PBALLOC_256K:
+ case RTE_ETH_FDIR_PBALLOC_256K:
/* 32k - 1 signature filters */
*fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K;
break;
static uint32_t
atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
- enum rte_fdir_pballoc_type pballoc)
+ enum rte_eth_fdir_pballoc_type pballoc)
{
- if (pballoc == RTE_FDIR_PBALLOC_256K)
+ if (pballoc == RTE_ETH_FDIR_PBALLOC_256K)
return ixgbe_atr_compute_hash_82599(input,
IXGBE_ATR_BUCKET_HASH_KEY) &
PERFECT_BUCKET_256KB_HASH_MASK;
- else if (pballoc == RTE_FDIR_PBALLOC_128K)
+ else if (pballoc == RTE_ETH_FDIR_PBALLOC_128K)
return ixgbe_atr_compute_hash_82599(input,
IXGBE_ATR_BUCKET_HASH_KEY) &
PERFECT_BUCKET_128KB_HASH_MASK;
*/
static uint32_t
atr_compute_sig_hash_82599(union ixgbe_atr_input *input,
- enum rte_fdir_pballoc_type pballoc)
+ enum rte_eth_fdir_pballoc_type pballoc)
{
uint32_t bucket_hash, sig_hash;
- if (pballoc == RTE_FDIR_PBALLOC_256K)
+ if (pballoc == RTE_ETH_FDIR_PBALLOC_256K)
bucket_hash = ixgbe_atr_compute_hash_82599(input,
IXGBE_ATR_BUCKET_HASH_KEY) &
SIG_BUCKET_256KB_HASH_MASK;
- else if (pballoc == RTE_FDIR_PBALLOC_128K)
+ else if (pballoc == RTE_ETH_FDIR_PBALLOC_128K)
bucket_hash = ixgbe_atr_compute_hash_82599(input,
IXGBE_ATR_BUCKET_HASH_KEY) &
SIG_BUCKET_128KB_HASH_MASK;
return -rte_errno;
}
- filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
+ filter->l2_tunnel_type = RTE_ETH_L2_TUNNEL_TYPE_E_TAG;
/**
* grp and e_cid_base are bit fields and only use 14 bits.
* e-tag id is taken as little endian by HW.
aead_xform = &conf->crypto_xform->aead;
if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
- if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) {
+ if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
ic_session->op = IXGBE_OP_AUTHENTICATED_DECRYPTION;
} else {
PMD_DRV_LOG(ERR, "IPsec decryption not enabled\n");
return -ENOTSUP;
}
} else {
- if (dev_conf->txmode.offloads & DEV_TX_OFFLOAD_SECURITY) {
+ if (dev_conf->txmode.offloads & RTE_ETH_TX_OFFLOAD_SECURITY) {
ic_session->op = IXGBE_OP_AUTHENTICATED_ENCRYPTION;
} else {
PMD_DRV_LOG(ERR, "IPsec encryption not enabled\n");
tx_offloads = dev->data->dev_conf.txmode.offloads;
/* sanity checks */
- if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
PMD_DRV_LOG(ERR, "RSC and IPsec not supported");
return -1;
}
- if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
PMD_DRV_LOG(ERR, "HW CRC strip needs to be enabled for IPsec");
return -1;
}
reg |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP;
IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
- if (rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, 0);
reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
if (reg != 0) {
return -1;
}
}
- if (tx_offloads & DEV_TX_OFFLOAD_SECURITY) {
+ if (tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY) {
IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL,
IXGBE_SECTXCTRL_STORE_FORWARD);
reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
memset(uta_info, 0, sizeof(struct ixgbe_uta_info));
hw->mac.mc_filter_type = 0;
- if (vf_num >= ETH_32_POOLS) {
+ if (vf_num >= RTE_ETH_32_POOLS) {
nb_queue = 2;
- RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_64_POOLS;
- } else if (vf_num >= ETH_16_POOLS) {
+ RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_64_POOLS;
+ } else if (vf_num >= RTE_ETH_16_POOLS) {
nb_queue = 4;
- RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_32_POOLS;
+ RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_32_POOLS;
} else {
nb_queue = 8;
- RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_16_POOLS;
+ RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_16_POOLS;
}
RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = nb_queue;
gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT;
switch (RTE_ETH_DEV_SRIOV(eth_dev).active) {
- case ETH_64_POOLS:
+ case RTE_ETH_64_POOLS:
gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
gpie |= IXGBE_GPIE_VTMODE_64;
break;
- case ETH_32_POOLS:
+ case RTE_ETH_32_POOLS:
gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32;
gpie |= IXGBE_GPIE_VTMODE_32;
break;
- case ETH_16_POOLS:
+ case RTE_ETH_16_POOLS:
gcr_ext |= IXGBE_GCR_EXT_VT_MODE_16;
gpie |= IXGBE_GPIE_VTMODE_16;
break;
/* Notify VF of number of DCB traffic classes */
eth_conf = &dev->data->dev_conf;
switch (eth_conf->txmode.mq_mode) {
- case ETH_MQ_TX_NONE:
- case ETH_MQ_TX_DCB:
+ case RTE_ETH_MQ_TX_NONE:
+ case RTE_ETH_MQ_TX_DCB:
PMD_DRV_LOG(ERR, "PF must work with virtualization for VF %u"
", but its tx mode = %d\n", vf,
eth_conf->txmode.mq_mode);
return -1;
- case ETH_MQ_TX_VMDQ_DCB:
+ case RTE_ETH_MQ_TX_VMDQ_DCB:
vmdq_dcb_tx_conf = ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
switch (vmdq_dcb_tx_conf->nb_queue_pools) {
- case ETH_16_POOLS:
- num_tcs = ETH_8_TCS;
+ case RTE_ETH_16_POOLS:
+ num_tcs = RTE_ETH_8_TCS;
break;
- case ETH_32_POOLS:
- num_tcs = ETH_4_TCS;
+ case RTE_ETH_32_POOLS:
+ num_tcs = RTE_ETH_4_TCS;
break;
default:
return -1;
}
break;
- /* ETH_MQ_TX_VMDQ_ONLY, DCB not enabled */
- case ETH_MQ_TX_VMDQ_ONLY:
+ /* RTE_ETH_MQ_TX_VMDQ_ONLY, DCB not enabled */
+ case RTE_ETH_MQ_TX_VMDQ_ONLY:
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
vmvir = IXGBE_READ_REG(hw, IXGBE_VMVIR(vf));
vlana = vmvir & IXGBE_VMVIR_VLANA_MASK;
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
tx_offload_capa =
- DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_SCTP_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO |
- DEV_TX_OFFLOAD_MULTI_SEGS;
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_TSO |
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
if (hw->mac.type == ixgbe_mac_82599EB ||
hw->mac.type == ixgbe_mac_X540)
- tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
+ tx_offload_capa |= RTE_ETH_TX_OFFLOAD_MACSEC_INSERT;
if (hw->mac.type == ixgbe_mac_X550 ||
hw->mac.type == ixgbe_mac_X550EM_x ||
hw->mac.type == ixgbe_mac_X550EM_a)
- tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+ tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM;
#ifdef RTE_LIB_SECURITY
if (dev->security_ctx)
- tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
+ tx_offload_capa |= RTE_ETH_TX_OFFLOAD_SECURITY;
#endif
return tx_offload_capa;
}
txq->tx_deferred_start = tx_conf->tx_deferred_start;
#ifdef RTE_LIB_SECURITY
txq->using_ipsec = !!(dev->data->dev_conf.txmode.offloads &
- DEV_TX_OFFLOAD_SECURITY);
+ RTE_ETH_TX_OFFLOAD_SECURITY);
#endif
/*
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
if (hw->mac.type != ixgbe_mac_82598EB)
- offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
return offloads;
}
uint64_t offloads;
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- offloads = DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_KEEP_CRC |
- DEV_RX_OFFLOAD_VLAN_FILTER |
- DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_RSS_HASH;
+ offloads = RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_KEEP_CRC |
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+ RTE_ETH_RX_OFFLOAD_SCATTER |
+ RTE_ETH_RX_OFFLOAD_RSS_HASH;
if (hw->mac.type == ixgbe_mac_82598EB)
- offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
if (ixgbe_is_vf(dev) == 0)
- offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
+ offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
/*
* RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
hw->mac.type == ixgbe_mac_X540 ||
hw->mac.type == ixgbe_mac_X550) &&
!RTE_ETH_DEV_SRIOV(dev).active)
- offloads |= DEV_RX_OFFLOAD_TCP_LRO;
+ offloads |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
if (hw->mac.type == ixgbe_mac_82599EB ||
hw->mac.type == ixgbe_mac_X540)
- offloads |= DEV_RX_OFFLOAD_MACSEC_STRIP;
+ offloads |= RTE_ETH_RX_OFFLOAD_MACSEC_STRIP;
if (hw->mac.type == ixgbe_mac_X550 ||
hw->mac.type == ixgbe_mac_X550EM_x ||
hw->mac.type == ixgbe_mac_X550EM_a)
- offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+ offloads |= RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM;
#ifdef RTE_LIB_SECURITY
if (dev->security_ctx)
- offloads |= DEV_RX_OFFLOAD_SECURITY;
+ offloads |= RTE_ETH_RX_OFFLOAD_SECURITY;
#endif
return offloads;
rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
rxq->port_id = dev->data->port_id;
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
rxq->crc_len = RTE_ETHER_CRC_LEN;
else
rxq->crc_len = 0;
/* Set configured hashing protocols in MRQC register */
rss_hf = rss_conf->rss_hf;
mrqc = IXGBE_MRQC_RSSEN; /* Enable RSS */
- if (rss_hf & ETH_RSS_IPV4)
+ if (rss_hf & RTE_ETH_RSS_IPV4)
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
- if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
- if (rss_hf & ETH_RSS_IPV6)
+ if (rss_hf & RTE_ETH_RSS_IPV6)
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
- if (rss_hf & ETH_RSS_IPV6_EX)
+ if (rss_hf & RTE_ETH_RSS_IPV6_EX)
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
- if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
- if (rss_hf & ETH_RSS_IPV6_TCP_EX)
+ if (rss_hf & RTE_ETH_RSS_IPV6_TCP_EX)
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
- if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
- if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
- if (rss_hf & ETH_RSS_IPV6_UDP_EX)
+ if (rss_hf & RTE_ETH_RSS_IPV6_UDP_EX)
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
IXGBE_WRITE_REG(hw, mrqc_reg, mrqc);
}
}
rss_hf = 0;
if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4)
- rss_hf |= ETH_RSS_IPV4;
+ rss_hf |= RTE_ETH_RSS_IPV4;
if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_TCP)
- rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6)
- rss_hf |= ETH_RSS_IPV6;
+ rss_hf |= RTE_ETH_RSS_IPV6;
if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX)
- rss_hf |= ETH_RSS_IPV6_EX;
+ rss_hf |= RTE_ETH_RSS_IPV6_EX;
if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_TCP)
- rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP)
- rss_hf |= ETH_RSS_IPV6_TCP_EX;
+ rss_hf |= RTE_ETH_RSS_IPV6_TCP_EX;
if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_UDP)
- rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_UDP)
- rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP)
- rss_hf |= ETH_RSS_IPV6_UDP_EX;
+ rss_hf |= RTE_ETH_RSS_IPV6_UDP_EX;
rss_conf->rss_hf = rss_hf;
return 0;
}
cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
num_pools = cfg->nb_queue_pools;
/* Check we have a valid number of pools */
- if (num_pools != ETH_16_POOLS && num_pools != ETH_32_POOLS) {
+ if (num_pools != RTE_ETH_16_POOLS && num_pools != RTE_ETH_32_POOLS) {
ixgbe_rss_disable(dev);
return;
}
/* 16 pools -> 8 traffic classes, 32 pools -> 4 traffic classes */
- nb_tcs = (uint8_t)(ETH_VMDQ_DCB_NUM_QUEUES / (int)num_pools);
+ nb_tcs = (uint8_t)(RTE_ETH_VMDQ_DCB_NUM_QUEUES / (int)num_pools);
/*
* RXPBSIZE
IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
}
/* zero alloc all unused TCs */
- for (i = nb_tcs; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ for (i = nb_tcs; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT));
}
/* MRQC: enable vmdq and dcb */
- mrqc = (num_pools == ETH_16_POOLS) ?
+ mrqc = (num_pools == RTE_ETH_16_POOLS) ?
IXGBE_MRQC_VMDQRT8TCEN : IXGBE_MRQC_VMDQRT4TCEN;
IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
/* RTRUP2TC: mapping user priorities to traffic classes (TCs) */
queue_mapping = 0;
- for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+ for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
/*
* mapping is done with 3 bits per priority,
* so shift by i*3 each time
/* VFRE: pool enabling for receive - 16 or 32 */
IXGBE_WRITE_REG(hw, IXGBE_VFRE(0),
- num_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
+ num_pools == RTE_ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
/*
* MPSAR - allow pools to read specific mac addresses
if (hw->mac.type != ixgbe_mac_82598EB)
/*PF VF Transmit Enable*/
IXGBE_WRITE_REG(hw, IXGBE_VFTE(0),
- vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
+ vmdq_tx_conf->nb_queue_pools == RTE_ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
/*Configure general DCB TX parameters*/
ixgbe_dcb_tx_hw_config(dev, dcb_config);
uint8_t i, j;
/* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
- if (vmdq_rx_conf->nb_queue_pools == ETH_16_POOLS) {
- dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
- dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
+ if (vmdq_rx_conf->nb_queue_pools == RTE_ETH_16_POOLS) {
+ dcb_config->num_tcs.pg_tcs = RTE_ETH_8_TCS;
+ dcb_config->num_tcs.pfc_tcs = RTE_ETH_8_TCS;
} else {
- dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
- dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
+ dcb_config->num_tcs.pg_tcs = RTE_ETH_4_TCS;
+ dcb_config->num_tcs.pfc_tcs = RTE_ETH_4_TCS;
}
/* Initialize User Priority to Traffic Class mapping */
}
/* User Priority to Traffic Class mapping */
- for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
j = vmdq_rx_conf->dcb_tc[i];
tc = &dcb_config->tc_config[j];
tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
uint8_t i, j;
/* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
- if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS) {
- dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
- dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
+ if (vmdq_tx_conf->nb_queue_pools == RTE_ETH_16_POOLS) {
+ dcb_config->num_tcs.pg_tcs = RTE_ETH_8_TCS;
+ dcb_config->num_tcs.pfc_tcs = RTE_ETH_8_TCS;
} else {
- dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
- dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
+ dcb_config->num_tcs.pg_tcs = RTE_ETH_4_TCS;
+ dcb_config->num_tcs.pfc_tcs = RTE_ETH_4_TCS;
}
/* Initialize User Priority to Traffic Class mapping */
}
/* User Priority to Traffic Class mapping */
- for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
j = vmdq_tx_conf->dcb_tc[i];
tc = &dcb_config->tc_config[j];
tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
}
/* User Priority to Traffic Class mapping */
- for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
j = rx_conf->dcb_tc[i];
tc = &dcb_config->tc_config[j];
tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
}
/* User Priority to Traffic Class mapping */
- for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
j = tx_conf->dcb_tc[i];
tc = &dcb_config->tc_config[j];
tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
IXGBE_DEV_PRIVATE_TO_BW_CONF(dev->data->dev_private);
switch (dev->data->dev_conf.rxmode.mq_mode) {
- case ETH_MQ_RX_VMDQ_DCB:
+ case RTE_ETH_MQ_RX_VMDQ_DCB:
dcb_config->vt_mode = true;
if (hw->mac.type != ixgbe_mac_82598EB) {
config_dcb_rx = DCB_RX_CONFIG;
ixgbe_vmdq_dcb_configure(dev);
}
break;
- case ETH_MQ_RX_DCB:
- case ETH_MQ_RX_DCB_RSS:
+ case RTE_ETH_MQ_RX_DCB:
+ case RTE_ETH_MQ_RX_DCB_RSS:
dcb_config->vt_mode = false;
config_dcb_rx = DCB_RX_CONFIG;
/* Get dcb TX configuration parameters from rte_eth_conf */
break;
}
switch (dev->data->dev_conf.txmode.mq_mode) {
- case ETH_MQ_TX_VMDQ_DCB:
+ case RTE_ETH_MQ_TX_VMDQ_DCB:
dcb_config->vt_mode = true;
config_dcb_tx = DCB_TX_CONFIG;
/* get DCB and VT TX configuration parameters
ixgbe_vmdq_dcb_hw_tx_config(dev, dcb_config);
break;
- case ETH_MQ_TX_DCB:
+ case RTE_ETH_MQ_TX_DCB:
dcb_config->vt_mode = false;
config_dcb_tx = DCB_TX_CONFIG;
/*get DCB TX configuration parameters from rte_eth_conf*/
nb_tcs = dcb_config->num_tcs.pfc_tcs;
/* Unpack map */
ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
- if (nb_tcs == ETH_4_TCS) {
+ if (nb_tcs == RTE_ETH_4_TCS) {
/* Avoid un-configured priority mapping to TC0 */
uint8_t j = 4;
uint8_t mask = 0xFF;
- for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
+ for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
mask = (uint8_t)(mask & (~(1 << map[i])));
for (i = 0; mask && (i < IXGBE_DCB_MAX_TRAFFIC_CLASS); i++) {
- if ((mask & 0x1) && (j < ETH_DCB_NUM_USER_PRIORITIES))
+ if ((mask & 0x1) && j < RTE_ETH_DCB_NUM_USER_PRIORITIES)
map[j++] = i;
mask >>= 1;
}
IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
}
/* zero alloc all unused TCs */
- for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ for (; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
- }
}
if (config_dcb_tx) {
/* Only support an equally distributed
IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
}
/* Clear unused TCs, if any, to zero buffer size*/
- for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ for (; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
}
ixgbe_dcb_config_tc_stats_82599(hw, dcb_config);
/* Check if the PFC is supported */
- if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
+ if (dev->data->dev_conf.dcb_capability_en & RTE_ETH_DCB_PFC_SUPPORT) {
pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
for (i = 0; i < nb_tcs; i++) {
/*
tc->pfc = ixgbe_dcb_pfc_enabled;
}
ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
- if (dcb_config->num_tcs.pfc_tcs == ETH_4_TCS)
+ if (dcb_config->num_tcs.pfc_tcs == RTE_ETH_4_TCS)
pfc_en &= 0x0F;
ret = ixgbe_dcb_config_pfc(hw, pfc_en, map);
}
PMD_INIT_FUNC_TRACE();
/* check support mq_mode for DCB */
- if ((dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB) &&
- (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB) &&
- (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB_RSS))
+ if (dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_VMDQ_DCB &&
+ dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_DCB &&
+ dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_DCB_RSS)
return;
- if (dev->data->nb_rx_queues > ETH_DCB_NUM_QUEUES)
+ if (dev->data->nb_rx_queues > RTE_ETH_DCB_NUM_QUEUES)
return;
/** Configure DCB hardware **/
/* VFRE: pool enabling for receive - 64 */
IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), UINT32_MAX);
- if (num_pools == ETH_64_POOLS)
+ if (num_pools == RTE_ETH_64_POOLS)
IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), UINT32_MAX);
/*
mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
mrqc &= ~IXGBE_MRQC_MRQE_MASK;
switch (RTE_ETH_DEV_SRIOV(dev).active) {
- case ETH_64_POOLS:
+ case RTE_ETH_64_POOLS:
mrqc |= IXGBE_MRQC_VMDQRSS64EN;
break;
- case ETH_32_POOLS:
+ case RTE_ETH_32_POOLS:
mrqc |= IXGBE_MRQC_VMDQRSS32EN;
break;
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
switch (RTE_ETH_DEV_SRIOV(dev).active) {
- case ETH_64_POOLS:
+ case RTE_ETH_64_POOLS:
IXGBE_WRITE_REG(hw, IXGBE_MRQC,
IXGBE_MRQC_VMDQEN);
break;
- case ETH_32_POOLS:
+ case RTE_ETH_32_POOLS:
IXGBE_WRITE_REG(hw, IXGBE_MRQC,
IXGBE_MRQC_VMDQRT4TCEN);
break;
- case ETH_16_POOLS:
+ case RTE_ETH_16_POOLS:
IXGBE_WRITE_REG(hw, IXGBE_MRQC,
IXGBE_MRQC_VMDQRT8TCEN);
break;
* any DCB/RSS w/o VMDq multi-queue setting
*/
switch (dev->data->dev_conf.rxmode.mq_mode) {
- case ETH_MQ_RX_RSS:
- case ETH_MQ_RX_DCB_RSS:
- case ETH_MQ_RX_VMDQ_RSS:
+ case RTE_ETH_MQ_RX_RSS:
+ case RTE_ETH_MQ_RX_DCB_RSS:
+ case RTE_ETH_MQ_RX_VMDQ_RSS:
ixgbe_rss_configure(dev);
break;
- case ETH_MQ_RX_VMDQ_DCB:
+ case RTE_ETH_MQ_RX_VMDQ_DCB:
ixgbe_vmdq_dcb_configure(dev);
break;
- case ETH_MQ_RX_VMDQ_ONLY:
+ case RTE_ETH_MQ_RX_VMDQ_ONLY:
ixgbe_vmdq_rx_hw_configure(dev);
break;
- case ETH_MQ_RX_NONE:
+ case RTE_ETH_MQ_RX_NONE:
default:
/* if mq_mode is none, disable rss mode.*/
ixgbe_rss_disable(dev);
* Support RSS together with SRIOV.
*/
switch (dev->data->dev_conf.rxmode.mq_mode) {
- case ETH_MQ_RX_RSS:
- case ETH_MQ_RX_VMDQ_RSS:
+ case RTE_ETH_MQ_RX_RSS:
+ case RTE_ETH_MQ_RX_VMDQ_RSS:
ixgbe_config_vf_rss(dev);
break;
- case ETH_MQ_RX_VMDQ_DCB:
- case ETH_MQ_RX_DCB:
+ case RTE_ETH_MQ_RX_VMDQ_DCB:
+ case RTE_ETH_MQ_RX_DCB:
/* In SRIOV, the configuration is the same as VMDq case */
ixgbe_vmdq_dcb_configure(dev);
break;
/* DCB/RSS together with SRIOV is not supported */
- case ETH_MQ_RX_VMDQ_DCB_RSS:
- case ETH_MQ_RX_DCB_RSS:
+ case RTE_ETH_MQ_RX_VMDQ_DCB_RSS:
+ case RTE_ETH_MQ_RX_DCB_RSS:
PMD_INIT_LOG(ERR,
"Could not support DCB/RSS with VMDq & SRIOV");
return -1;
* SRIOV inactive scheme
* any DCB w/o VMDq multi-queue setting
*/
- if (dev->data->dev_conf.txmode.mq_mode == ETH_MQ_TX_VMDQ_ONLY)
+ if (dev->data->dev_conf.txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_ONLY)
ixgbe_vmdq_tx_hw_configure(hw);
else {
mtqc = IXGBE_MTQC_64Q_1PB;
* SRIOV active scheme
* FIXME if support DCB together with VMDq & SRIOV
*/
- case ETH_64_POOLS:
+ case RTE_ETH_64_POOLS:
mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF;
break;
- case ETH_32_POOLS:
+ case RTE_ETH_32_POOLS:
mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_32VF;
break;
- case ETH_16_POOLS:
+ case RTE_ETH_16_POOLS:
mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_RT_ENA |
IXGBE_MTQC_8TC_8TQ;
break;
rxq->rx_using_sse = rx_using_sse;
#ifdef RTE_LIB_SECURITY
rxq->using_ipsec = !!(dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_SECURITY);
+ RTE_ETH_RX_OFFLOAD_SECURITY);
#endif
}
}
/* Sanity check */
dev->dev_ops->dev_infos_get(dev, &dev_info);
- if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO)
+ if (dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_TCP_LRO)
rsc_capable = true;
- if (!rsc_capable && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
+ if (!rsc_capable && (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)) {
PMD_INIT_LOG(CRIT, "LRO is requested on HW that doesn't "
"support it");
return -EINVAL;
/* RSC global configuration (chapter 4.6.7.2.1 of 82599 Spec) */
- if ((rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC) &&
- (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
+ if ((rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) &&
+ (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)) {
/*
* According to chapter of 4.6.7.2.1 of the Spec Rev.
* 3.0 RSC configuration requires HW CRC stripping being
/* RFCTL configuration */
rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
- if ((rsc_capable) && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
+ if ((rsc_capable) && (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO))
rfctl &= ~IXGBE_RFCTL_RSC_DIS;
else
rfctl |= IXGBE_RFCTL_RSC_DIS;
IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
/* If LRO hasn't been requested - we are done here. */
- if (!(rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
+ if (!(rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO))
return 0;
/* Set RDRXCTL.RSCACKC bit */
* Configure CRC stripping, if any.
*/
hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
- if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
hlreg0 &= ~IXGBE_HLREG0_RXCRCSTRP;
else
hlreg0 |= IXGBE_HLREG0_RXCRCSTRP;
* Assume no header split and no VLAN strip support
* on any Rx queue first .
*/
- rx_conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+ rx_conf->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
/* Setup RX queues */
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
* Reset crc_len in case it was changed after queue setup by a
* call to configure.
*/
- if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
rxq->crc_len = RTE_ETHER_CRC_LEN;
else
rxq->crc_len = 0;
/* It adds dual VLAN length for supporting dual VLAN */
if (frame_size + 2 * IXGBE_VLAN_TAG_SIZE > buf_size)
dev->data->scattered_rx = 1;
- if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
- rx_conf->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+ rx_conf->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
}
- if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
+ if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
dev->data->scattered_rx = 1;
/*
*/
rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
rxcsum |= IXGBE_RXCSUM_PCSD;
- if (rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+ if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
rxcsum |= IXGBE_RXCSUM_IPPCSE;
else
rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
if (hw->mac.type == ixgbe_mac_82599EB ||
hw->mac.type == ixgbe_mac_X540) {
rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
- if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
rdrxctl &= ~IXGBE_RDRXCTL_CRCSTRIP;
else
rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
#ifdef RTE_LIB_SECURITY
if ((dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_SECURITY) ||
+ RTE_ETH_RX_OFFLOAD_SECURITY) ||
(dev->data->dev_conf.txmode.offloads &
- DEV_TX_OFFLOAD_SECURITY)) {
+ RTE_ETH_TX_OFFLOAD_SECURITY)) {
ret = ixgbe_crypto_enable_ipsec(dev);
if (ret != 0) {
PMD_DRV_LOG(ERR,
* Assume no header split and no VLAN strip support
* on any Rx queue first .
*/
- rxmode->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+ rxmode->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
/* Setup RX queues */
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
IXGBE_SRRCTL_BSIZEPKT_SHIFT);
- if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ||
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER ||
/* It adds dual VLAN length for supporting dual VLAN */
(frame_size + 2 * IXGBE_VLAN_TAG_SIZE) > buf_size) {
if (!dev->data->scattered_rx)
dev->data->scattered_rx = 1;
}
- if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
- rxmode->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+ rxmode->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
}
/* Set RQPL for VF RSS according to max Rx queue */
uint8_t rx_udp_csum_zero_err;
/** flags to set in mbuf when a vlan is detected. */
uint64_t vlan_flags;
- uint64_t offloads; /**< Rx offloads with DEV_RX_OFFLOAD_* */
+ uint64_t offloads; /**< Rx offloads with RTE_ETH_RX_OFFLOAD_* */
/** need to alloc dummy mbuf, for wraparound when scanning hw ring */
struct rte_mbuf fake_mbuf;
/** hold packets to return to application */
uint8_t pthresh; /**< Prefetch threshold register. */
uint8_t hthresh; /**< Host threshold register. */
uint8_t wthresh; /**< Write-back threshold reg. */
- uint64_t offloads; /**< Tx offload flags of DEV_TX_OFFLOAD_* */
+ uint64_t offloads; /**< Tx offload flags of RTE_ETH_TX_OFFLOAD_* */
uint32_t ctx_curr; /**< Hardware context states. */
/** Hardware context0 history. */
struct ixgbe_advctx_info ctx_cache[IXGBE_CTX_NUM];
ixgbe_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev)
{
#ifndef RTE_LIBRTE_IEEE1588
- struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
+ struct rte_eth_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
/* no fdir support */
if (fconf->mode != RTE_FDIR_MODE_NONE)
uint8_t nb_tcs = 0;
eth_conf = &dev->data->dev_conf;
- if (eth_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+ if (eth_conf->txmode.mq_mode == RTE_ETH_MQ_TX_DCB) {
nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs;
- } else if (eth_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+ } else if (eth_conf->txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) {
if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools ==
- ETH_32_POOLS)
- nb_tcs = ETH_4_TCS;
+ RTE_ETH_32_POOLS)
+ nb_tcs = RTE_ETH_4_TCS;
else
- nb_tcs = ETH_8_TCS;
+ nb_tcs = RTE_ETH_8_TCS;
} else {
nb_tcs = 1;
}
if (vf_num) {
/* no DCB */
if (nb_tcs == 1) {
- if (vf_num >= ETH_32_POOLS) {
+ if (vf_num >= RTE_ETH_32_POOLS) {
*nb = 2;
*base = vf_num * 2;
- } else if (vf_num >= ETH_16_POOLS) {
+ } else if (vf_num >= RTE_ETH_16_POOLS) {
*nb = 4;
*base = vf_num * 4;
} else {
}
} else {
/* VT off */
- if (nb_tcs == ETH_8_TCS) {
+ if (nb_tcs == RTE_ETH_8_TCS) {
switch (tc_node_no) {
case 0:
*base = 0;
dev_info->max_mac_addrs = hw->mac.num_rar_entries;
/**< Maximum number of MAC addresses. */
- dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM;
+ dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+ RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM;
/**< Device RX offload capabilities. */
- dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM | DEV_TX_OFFLOAD_SCTP_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_MULTI_SEGS;
+ dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM | RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
/**< Device TX offload capabilities. */
dev_info->speed_capa =
representor->pf_ethdev->data->dev_link.link_speed;
- /**< Supported speeds bitmap (ETH_LINK_SPEED_). */
+ /**< Supported speeds bitmap (RTE_ETH_LINK_SPEED_). */
dev_info->switch_info.name =
representor->pf_ethdev->device->name;
*/
if (hw->mac.type == ixgbe_mac_82598EB)
queues_per_pool = (uint16_t)hw->mac.max_rx_queues /
- ETH_16_POOLS;
+ RTE_ETH_16_POOLS;
else
queues_per_pool = (uint16_t)hw->mac.max_rx_queues /
- ETH_64_POOLS;
+ RTE_ETH_64_POOLS;
for (q = 0; q < queues_per_pool; q++)
(*dev->dev_ops->vlan_strip_queue_set)(dev,
bw_conf = IXGBE_DEV_PRIVATE_TO_BW_CONF(dev->data->dev_private);
eth_conf = &dev->data->dev_conf;
- if (eth_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+ if (eth_conf->txmode.mq_mode == RTE_ETH_MQ_TX_DCB) {
nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs;
- } else if (eth_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+ } else if (eth_conf->txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) {
if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools ==
- ETH_32_POOLS)
- nb_tcs = ETH_4_TCS;
+ RTE_ETH_32_POOLS)
+ nb_tcs = RTE_ETH_4_TCS;
else
- nb_tcs = ETH_8_TCS;
+ nb_tcs = RTE_ETH_8_TCS;
} else {
nb_tcs = 1;
}
* @param rx_mask
* The RX mode mask, which is one or more of accepting Untagged Packets,
* packets that match the PFUTA table, Broadcast and Multicast Promiscuous.
-* ETH_VMDQ_ACCEPT_UNTAG,ETH_VMDQ_ACCEPT_HASH_UC,
-* ETH_VMDQ_ACCEPT_BROADCAST and ETH_VMDQ_ACCEPT_MULTICAST will be used
+* RTE_ETH_VMDQ_ACCEPT_UNTAG, RTE_ETH_VMDQ_ACCEPT_HASH_UC,
+* RTE_ETH_VMDQ_ACCEPT_BROADCAST and RTE_ETH_VMDQ_ACCEPT_MULTICAST will be used
* in rx_mode.
* @param on
* 1 - Enable a VF RX mode.
};
static const struct rte_eth_link pmd_link = {
- .link_speed = ETH_SPEED_NUM_10G,
- .link_duplex = ETH_LINK_FULL_DUPLEX,
- .link_status = ETH_LINK_DOWN,
- .link_autoneg = ETH_LINK_FIXED,
+ .link_speed = RTE_ETH_SPEED_NUM_10G,
+ .link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+ .link_status = RTE_ETH_LINK_DOWN,
+ .link_autoneg = RTE_ETH_LINK_FIXED,
};
static int is_kni_initialized;
case PCI_SUBSYS_DEV_ID_CN2360_210SVPN3:
case PCI_SUBSYS_DEV_ID_CN2350_210SVPT:
case PCI_SUBSYS_DEV_ID_CN2360_210SVPT:
- devinfo->speed_capa = ETH_LINK_SPEED_10G;
+ devinfo->speed_capa = RTE_ETH_LINK_SPEED_10G;
break;
/* CN23xx 25G cards */
case PCI_SUBSYS_DEV_ID_CN2350_225:
case PCI_SUBSYS_DEV_ID_CN2360_225:
- devinfo->speed_capa = ETH_LINK_SPEED_25G;
+ devinfo->speed_capa = RTE_ETH_LINK_SPEED_25G;
break;
default:
- devinfo->speed_capa = ETH_LINK_SPEED_10G;
+ devinfo->speed_capa = RTE_ETH_LINK_SPEED_10G;
lio_dev_err(lio_dev,
"Unknown CN23XX subsystem device id. Setting 10G as default link speed.\n");
return -EINVAL;
devinfo->max_mac_addrs = 1;
- devinfo->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_RSS_HASH);
- devinfo->tx_offload_capa = (DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM);
+ devinfo->rx_offload_capa = (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+ RTE_ETH_RX_OFFLOAD_RSS_HASH);
+ devinfo->tx_offload_capa = (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM);
devinfo->rx_desc_lim = lio_rx_desc_lim;
devinfo->tx_desc_lim = lio_tx_desc_lim;
devinfo->reta_size = LIO_RSS_MAX_TABLE_SZ;
devinfo->hash_key_size = LIO_RSS_MAX_KEY_SZ;
- devinfo->flow_type_rss_offloads = (ETH_RSS_IPV4 |
- ETH_RSS_NONFRAG_IPV4_TCP |
- ETH_RSS_IPV6 |
- ETH_RSS_NONFRAG_IPV6_TCP |
- ETH_RSS_IPV6_EX |
- ETH_RSS_IPV6_TCP_EX);
+ devinfo->flow_type_rss_offloads = (RTE_ETH_RSS_IPV4 |
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+ RTE_ETH_RSS_IPV6 |
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+ RTE_ETH_RSS_IPV6_EX |
+ RTE_ETH_RSS_IPV6_TCP_EX);
return 0;
}
rss_param->param.flags &= ~LIO_RSS_PARAM_ITABLE_UNCHANGED;
rss_param->param.itablesize = LIO_RSS_MAX_TABLE_SZ;
- for (i = 0; i < (reta_size / RTE_RETA_GROUP_SIZE); i++) {
- for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) {
+ for (i = 0; i < (reta_size / RTE_ETH_RETA_GROUP_SIZE); i++) {
+ for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) {
if ((reta_conf[i].mask) & ((uint64_t)1 << j)) {
- index = (i * RTE_RETA_GROUP_SIZE) + j;
+ index = (i * RTE_ETH_RETA_GROUP_SIZE) + j;
rss_state->itable[index] = reta_conf[i].reta[j];
}
}
return -EINVAL;
}
- num = reta_size / RTE_RETA_GROUP_SIZE;
+ num = reta_size / RTE_ETH_RETA_GROUP_SIZE;
for (i = 0; i < num; i++) {
memcpy(reta_conf->reta,
- &rss_state->itable[i * RTE_RETA_GROUP_SIZE],
- RTE_RETA_GROUP_SIZE);
+ &rss_state->itable[i * RTE_ETH_RETA_GROUP_SIZE],
+ RTE_ETH_RETA_GROUP_SIZE);
reta_conf++;
}
memcpy(hash_key, rss_state->hash_key, rss_state->hash_key_size);
if (rss_state->ip)
- rss_hf |= ETH_RSS_IPV4;
+ rss_hf |= RTE_ETH_RSS_IPV4;
if (rss_state->tcp_hash)
- rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
if (rss_state->ipv6)
- rss_hf |= ETH_RSS_IPV6;
+ rss_hf |= RTE_ETH_RSS_IPV6;
if (rss_state->ipv6_tcp_hash)
- rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
if (rss_state->ipv6_ex)
- rss_hf |= ETH_RSS_IPV6_EX;
+ rss_hf |= RTE_ETH_RSS_IPV6_EX;
if (rss_state->ipv6_tcp_ex_hash)
- rss_hf |= ETH_RSS_IPV6_TCP_EX;
+ rss_hf |= RTE_ETH_RSS_IPV6_TCP_EX;
rss_conf->rss_hf = rss_hf;
if (rss_state->hash_disable)
return -EINVAL;
- if (rss_conf->rss_hf & ETH_RSS_IPV4) {
+ if (rss_conf->rss_hf & RTE_ETH_RSS_IPV4) {
hashinfo |= LIO_RSS_HASH_IPV4;
rss_state->ip = 1;
} else {
rss_state->ip = 0;
}
- if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
+ if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) {
hashinfo |= LIO_RSS_HASH_TCP_IPV4;
rss_state->tcp_hash = 1;
} else {
rss_state->tcp_hash = 0;
}
- if (rss_conf->rss_hf & ETH_RSS_IPV6) {
+ if (rss_conf->rss_hf & RTE_ETH_RSS_IPV6) {
hashinfo |= LIO_RSS_HASH_IPV6;
rss_state->ipv6 = 1;
} else {
rss_state->ipv6 = 0;
}
- if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
+ if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) {
hashinfo |= LIO_RSS_HASH_TCP_IPV6;
rss_state->ipv6_tcp_hash = 1;
} else {
rss_state->ipv6_tcp_hash = 0;
}
- if (rss_conf->rss_hf & ETH_RSS_IPV6_EX) {
+ if (rss_conf->rss_hf & RTE_ETH_RSS_IPV6_EX) {
hashinfo |= LIO_RSS_HASH_IPV6_EX;
rss_state->ipv6_ex = 1;
} else {
rss_state->ipv6_ex = 0;
}
- if (rss_conf->rss_hf & ETH_RSS_IPV6_TCP_EX) {
+ if (rss_conf->rss_hf & RTE_ETH_RSS_IPV6_TCP_EX) {
hashinfo |= LIO_RSS_HASH_TCP_IPV6_EX;
rss_state->ipv6_tcp_ex_hash = 1;
} else {
if (udp_tnl == NULL)
return -EINVAL;
- if (udp_tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN) {
+ if (udp_tnl->prot_type != RTE_ETH_TUNNEL_TYPE_VXLAN) {
lio_dev_err(lio_dev, "Unsupported tunnel type\n");
return -1;
}
if (udp_tnl == NULL)
return -EINVAL;
- if (udp_tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN) {
+ if (udp_tnl->prot_type != RTE_ETH_TUNNEL_TYPE_VXLAN) {
lio_dev_err(lio_dev, "Unsupported tunnel type\n");
return -1;
}
/* Initialize */
memset(&link, 0, sizeof(link));
- link.link_status = ETH_LINK_DOWN;
- link.link_speed = ETH_SPEED_NUM_NONE;
- link.link_duplex = ETH_LINK_HALF_DUPLEX;
- link.link_autoneg = ETH_LINK_AUTONEG;
+ link.link_status = RTE_ETH_LINK_DOWN;
+ link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+ link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+ link.link_autoneg = RTE_ETH_LINK_AUTONEG;
/* Return what we found */
if (lio_dev->linfo.link.s.link_up == 0) {
return rte_eth_linkstatus_set(eth_dev, &link);
}
- link.link_status = ETH_LINK_UP; /* Interface is up */
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_status = RTE_ETH_LINK_UP; /* Interface is up */
+ link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
switch (lio_dev->linfo.link.s.speed) {
case LIO_LINK_SPEED_10000:
- link.link_speed = ETH_SPEED_NUM_10G;
+ link.link_speed = RTE_ETH_SPEED_NUM_10G;
break;
case LIO_LINK_SPEED_25000:
- link.link_speed = ETH_SPEED_NUM_25G;
+ link.link_speed = RTE_ETH_SPEED_NUM_25G;
break;
default:
- link.link_speed = ETH_SPEED_NUM_NONE;
- link.link_duplex = ETH_LINK_HALF_DUPLEX;
+ link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+ link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
}
return rte_eth_linkstatus_set(eth_dev, &link);
q_idx = (uint8_t)((eth_dev->data->nb_rx_queues > 1) ?
i % eth_dev->data->nb_rx_queues : 0);
- conf_idx = i / RTE_RETA_GROUP_SIZE;
- reta_idx = i % RTE_RETA_GROUP_SIZE;
+ conf_idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ reta_idx = i % RTE_ETH_RETA_GROUP_SIZE;
reta_conf[conf_idx].reta[reta_idx] = q_idx;
reta_conf[conf_idx].mask |= ((uint64_t)1 << reta_idx);
}
struct rte_eth_rss_conf rss_conf;
switch (eth_dev->data->dev_conf.rxmode.mq_mode) {
- case ETH_MQ_RX_RSS:
+ case RTE_ETH_MQ_RX_RSS:
lio_dev_rss_configure(eth_dev);
break;
- case ETH_MQ_RX_NONE:
+ case RTE_ETH_MQ_RX_NONE:
/* if mq_mode is none, disable rss mode. */
default:
memset(&rss_conf, 0, sizeof(rss_conf));
}
lio_dev->linfo.link.s.link_up = 1;
- eth_dev->data->dev_link.link_status = ETH_LINK_UP;
+ eth_dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
return 0;
}
}
lio_dev->linfo.link.s.link_up = 0;
- eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+ eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
if (lio_send_rx_ctrl_cmd(eth_dev, 0)) {
lio_dev->linfo.link.s.link_up = 1;
- eth_dev->data->dev_link.link_status = ETH_LINK_UP;
+ eth_dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
lio_dev_err(lio_dev, "Unable to set Link Down\n");
return -1;
}
PMD_INIT_FUNC_TRACE();
- if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
+ if (eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
eth_dev->data->dev_conf.rxmode.offloads |=
- DEV_RX_OFFLOAD_RSS_HASH;
+ RTE_ETH_RX_OFFLOAD_RSS_HASH;
/* Inform firmware about change in number of queues to use.
* Disable IO queues and reset registers for re-configuration.
int i;
int ret;
- dev->data->dev_link.link_status = ETH_LINK_DOWN;
+ dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
pmd->flags &= ~ETH_MEMIF_FLAG_CONNECTING;
pmd->flags &= ~ETH_MEMIF_FLAG_CONNECTED;
};
static const struct rte_eth_link pmd_link = {
- .link_speed = ETH_SPEED_NUM_10G,
- .link_duplex = ETH_LINK_FULL_DUPLEX,
- .link_status = ETH_LINK_DOWN,
- .link_autoneg = ETH_LINK_AUTONEG
+ .link_speed = RTE_ETH_SPEED_NUM_10G,
+ .link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+ .link_status = RTE_ETH_LINK_DOWN,
+ .link_autoneg = RTE_ETH_LINK_AUTONEG
};
#define MEMIF_MP_SEND_REGION "memif_mp_send_region"
dev_info->max_rx_queues = ETH_MEMIF_MAX_NUM_Q_PAIRS;
dev_info->max_tx_queues = ETH_MEMIF_MAX_NUM_Q_PAIRS;
dev_info->min_rx_bufsize = 0;
- dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS;
+ dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
return 0;
}
pmd->flags &= ~ETH_MEMIF_FLAG_CONNECTING;
pmd->flags |= ETH_MEMIF_FLAG_CONNECTED;
- dev->data->dev_link.link_status = ETH_LINK_UP;
+ dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
}
MIF_LOG(INFO, "Connected.");
return 0;
if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
proc_private = dev->process_private;
- if (dev->data->dev_link.link_status == ETH_LINK_UP &&
+ if (dev->data->dev_link.link_status == RTE_ETH_LINK_UP &&
proc_private->regions_num == 0) {
memif_mp_request_regions(dev);
- } else if (dev->data->dev_link.link_status == ETH_LINK_DOWN &&
+ } else if (dev->data->dev_link.link_status == RTE_ETH_LINK_DOWN &&
proc_private->regions_num > 0) {
memif_free_regions(dev);
}
info->if_index = priv->if_index;
info->hash_key_size = MLX4_RSS_HASH_KEY_SIZE;
info->speed_capa =
- ETH_LINK_SPEED_1G |
- ETH_LINK_SPEED_10G |
- ETH_LINK_SPEED_20G |
- ETH_LINK_SPEED_40G |
- ETH_LINK_SPEED_56G;
+ RTE_ETH_LINK_SPEED_1G |
+ RTE_ETH_LINK_SPEED_10G |
+ RTE_ETH_LINK_SPEED_20G |
+ RTE_ETH_LINK_SPEED_40G |
+ RTE_ETH_LINK_SPEED_56G;
info->flow_type_rss_offloads = mlx4_conv_rss_types(priv, 0, 1);
return 0;
}
link_speed = ethtool_cmd_speed(&edata);
if (link_speed == -1)
- dev_link.link_speed = ETH_SPEED_NUM_NONE;
+ dev_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
else
dev_link.link_speed = link_speed;
dev_link.link_duplex = ((edata.duplex == DUPLEX_HALF) ?
- ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
+ RTE_ETH_LINK_HALF_DUPLEX : RTE_ETH_LINK_FULL_DUPLEX);
dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
- ETH_LINK_SPEED_FIXED);
+ RTE_ETH_LINK_SPEED_FIXED);
dev->data->dev_link = dev_link;
return 0;
}
}
fc_conf->autoneg = ethpause.autoneg;
if (ethpause.rx_pause && ethpause.tx_pause)
- fc_conf->mode = RTE_FC_FULL;
+ fc_conf->mode = RTE_ETH_FC_FULL;
else if (ethpause.rx_pause)
- fc_conf->mode = RTE_FC_RX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
else if (ethpause.tx_pause)
- fc_conf->mode = RTE_FC_TX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
else
- fc_conf->mode = RTE_FC_NONE;
+ fc_conf->mode = RTE_ETH_FC_NONE;
ret = 0;
out:
MLX4_ASSERT(ret >= 0);
ifr.ifr_data = (void *)ðpause;
ethpause.autoneg = fc_conf->autoneg;
- if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
- (fc_conf->mode & RTE_FC_RX_PAUSE))
+ if (((fc_conf->mode & RTE_ETH_FC_FULL) == RTE_ETH_FC_FULL) ||
+ (fc_conf->mode & RTE_ETH_FC_RX_PAUSE))
ethpause.rx_pause = 1;
else
ethpause.rx_pause = 0;
- if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
- (fc_conf->mode & RTE_FC_TX_PAUSE))
+ if (((fc_conf->mode & RTE_ETH_FC_FULL) == RTE_ETH_FC_FULL) ||
+ (fc_conf->mode & RTE_ETH_FC_TX_PAUSE))
ethpause.tx_pause = 1;
else
ethpause.tx_pause = 0;
};
static const uint64_t dpdk[] = {
[INNER] = 0,
- [IPV4] = ETH_RSS_IPV4,
- [IPV4_1] = ETH_RSS_FRAG_IPV4,
- [IPV4_2] = ETH_RSS_NONFRAG_IPV4_OTHER,
- [IPV6] = ETH_RSS_IPV6,
- [IPV6_1] = ETH_RSS_FRAG_IPV6,
- [IPV6_2] = ETH_RSS_NONFRAG_IPV6_OTHER,
- [IPV6_3] = ETH_RSS_IPV6_EX,
+ [IPV4] = RTE_ETH_RSS_IPV4,
+ [IPV4_1] = RTE_ETH_RSS_FRAG_IPV4,
+ [IPV4_2] = RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
+ [IPV6] = RTE_ETH_RSS_IPV6,
+ [IPV6_1] = RTE_ETH_RSS_FRAG_IPV6,
+ [IPV6_2] = RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
+ [IPV6_3] = RTE_ETH_RSS_IPV6_EX,
[TCP] = 0,
[UDP] = 0,
- [IPV4_TCP] = ETH_RSS_NONFRAG_IPV4_TCP,
- [IPV4_UDP] = ETH_RSS_NONFRAG_IPV4_UDP,
- [IPV6_TCP] = ETH_RSS_NONFRAG_IPV6_TCP,
- [IPV6_TCP_1] = ETH_RSS_IPV6_TCP_EX,
- [IPV6_UDP] = ETH_RSS_NONFRAG_IPV6_UDP,
- [IPV6_UDP_1] = ETH_RSS_IPV6_UDP_EX,
+ [IPV4_TCP] = RTE_ETH_RSS_NONFRAG_IPV4_TCP,
+ [IPV4_UDP] = RTE_ETH_RSS_NONFRAG_IPV4_UDP,
+ [IPV6_TCP] = RTE_ETH_RSS_NONFRAG_IPV6_TCP,
+ [IPV6_TCP_1] = RTE_ETH_RSS_IPV6_TCP_EX,
+ [IPV6_UDP] = RTE_ETH_RSS_NONFRAG_IPV6_UDP,
+ [IPV6_UDP_1] = RTE_ETH_RSS_IPV6_UDP_EX,
};
static const uint64_t verbs[RTE_DIM(dpdk)] = {
[INNER] = IBV_RX_HASH_INNER,
* - MAC flow rules are generated from @p dev->data->mac_addrs
* (@p priv->mac array).
* - An additional flow rule for Ethernet broadcasts is also generated.
- * - All these are per-VLAN if @p DEV_RX_OFFLOAD_VLAN_FILTER
+ * - All these are per-VLAN if @p RTE_ETH_RX_OFFLOAD_VLAN_FILTER
* is enabled and VLAN filters are configured.
*
* @param priv
struct rte_ether_addr *rule_mac = ð_spec.dst;
rte_be16_t *rule_vlan =
(ETH_DEV(priv)->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_VLAN_FILTER) &&
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER) &&
!ETH_DEV(priv)->data->promiscuous ?
&vlan_spec.tci :
NULL;
static void
mlx4_link_status_alarm(struct mlx4_priv *priv)
{
- const struct rte_intr_conf *const intr_conf =
+ const struct rte_eth_intr_conf *const intr_conf =
Ð_DEV(priv)->data->dev_conf.intr_conf;
MLX4_ASSERT(priv->intr_alarm == 1);
};
uint32_t caught[RTE_DIM(type)] = { 0 };
struct ibv_async_event event;
- const struct rte_intr_conf *const intr_conf =
+ const struct rte_eth_intr_conf *const intr_conf =
Ð_DEV(priv)->data->dev_conf.intr_conf;
unsigned int i;
int
mlx4_intr_install(struct mlx4_priv *priv)
{
- const struct rte_intr_conf *const intr_conf =
+ const struct rte_eth_intr_conf *const intr_conf =
Ð_DEV(priv)->data->dev_conf.intr_conf;
int rc;
int
mlx4_rxq_intr_enable(struct mlx4_priv *priv)
{
- const struct rte_intr_conf *const intr_conf =
+ const struct rte_eth_intr_conf *const intr_conf =
Ð_DEV(priv)->data->dev_conf.intr_conf;
if (intr_conf->rxq && mlx4_rx_intr_vec_enable(priv) < 0)
uint64_t
mlx4_get_rx_queue_offloads(struct mlx4_priv *priv)
{
- uint64_t offloads = DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_KEEP_CRC |
- DEV_RX_OFFLOAD_RSS_HASH;
+ uint64_t offloads = RTE_ETH_RX_OFFLOAD_SCATTER |
+ RTE_ETH_RX_OFFLOAD_KEEP_CRC |
+ RTE_ETH_RX_OFFLOAD_RSS_HASH;
if (priv->hw_csum)
- offloads |= DEV_RX_OFFLOAD_CHECKSUM;
+ offloads |= RTE_ETH_RX_OFFLOAD_CHECKSUM;
return offloads;
}
uint64_t
mlx4_get_rx_port_offloads(struct mlx4_priv *priv)
{
- uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
+ uint64_t offloads = RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
(void)priv;
return offloads;
}
/* By default, FCS (CRC) is stripped by hardware. */
crc_present = 0;
- if (offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+ if (offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
if (priv->hw_fcs_strip) {
crc_present = 1;
} else {
.elts = elts,
/* Toggle Rx checksum offload if hardware supports it. */
.csum = priv->hw_csum &&
- (offloads & DEV_RX_OFFLOAD_CHECKSUM),
+ (offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM),
.csum_l2tun = priv->hw_csum_l2tun &&
- (offloads & DEV_RX_OFFLOAD_CHECKSUM),
+ (offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM),
.crc_present = crc_present,
.l2tun_offload = priv->hw_csum_l2tun,
.stats = {
max_rx_pktlen = dev->data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
if (max_rx_pktlen <= (mb_len - RTE_PKTMBUF_HEADROOM)) {
;
- } else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
+ } else if (offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
uint32_t size = RTE_PKTMBUF_HEADROOM + max_rx_pktlen;
uint32_t sges_n;
uint64_t
mlx4_get_tx_port_offloads(struct mlx4_priv *priv)
{
- uint64_t offloads = DEV_TX_OFFLOAD_MULTI_SEGS;
+ uint64_t offloads = RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
if (priv->hw_csum) {
- offloads |= (DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM);
+ offloads |= (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM);
}
if (priv->tso)
- offloads |= DEV_TX_OFFLOAD_TCP_TSO;
+ offloads |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
if (priv->hw_csum_l2tun) {
- offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+ offloads |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM;
if (priv->tso)
- offloads |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GRE_TNL_TSO);
+ offloads |= (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO);
}
return offloads;
}
.elts_comp_cd_init =
RTE_MIN(MLX4_PMD_TX_PER_COMP_REQ, desc / 4),
.csum = priv->hw_csum &&
- (offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM)),
+ (offloads & (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM)),
.csum_l2tun = priv->hw_csum_l2tun &&
(offloads &
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM),
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM),
/* Enable Tx loopback for VF devices. */
.lb = !!priv->vf,
.bounce_buf = bounce_buf,
}
link_speed = ethtool_cmd_speed(&edata);
if (link_speed == -1)
- dev_link.link_speed = ETH_SPEED_NUM_UNKNOWN;
+ dev_link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
else
dev_link.link_speed = link_speed;
priv->link_speed_capa = 0;
if (edata.supported & (SUPPORTED_1000baseT_Full |
SUPPORTED_1000baseKX_Full))
- priv->link_speed_capa |= ETH_LINK_SPEED_1G;
+ priv->link_speed_capa |= RTE_ETH_LINK_SPEED_1G;
if (edata.supported & SUPPORTED_10000baseKR_Full)
- priv->link_speed_capa |= ETH_LINK_SPEED_10G;
+ priv->link_speed_capa |= RTE_ETH_LINK_SPEED_10G;
if (edata.supported & (SUPPORTED_40000baseKR4_Full |
SUPPORTED_40000baseCR4_Full |
SUPPORTED_40000baseSR4_Full |
SUPPORTED_40000baseLR4_Full))
- priv->link_speed_capa |= ETH_LINK_SPEED_40G;
+ priv->link_speed_capa |= RTE_ETH_LINK_SPEED_40G;
dev_link.link_duplex = ((edata.duplex == DUPLEX_HALF) ?
- ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
+ RTE_ETH_LINK_HALF_DUPLEX : RTE_ETH_LINK_FULL_DUPLEX);
dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
- ETH_LINK_SPEED_FIXED);
+ RTE_ETH_LINK_SPEED_FIXED);
*link = dev_link;
return 0;
}
return ret;
}
dev_link.link_speed = (ecmd->speed == UINT32_MAX) ?
- ETH_SPEED_NUM_UNKNOWN : ecmd->speed;
+ RTE_ETH_SPEED_NUM_UNKNOWN : ecmd->speed;
sc = ecmd->link_mode_masks[0] |
((uint64_t)ecmd->link_mode_masks[1] << 32);
priv->link_speed_capa = 0;
if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_1000baseT_Full_BIT) |
MLX5_BITSHIFT(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT)))
- priv->link_speed_capa |= ETH_LINK_SPEED_1G;
+ priv->link_speed_capa |= RTE_ETH_LINK_SPEED_1G;
if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT) |
MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT) |
MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseR_FEC_BIT)))
- priv->link_speed_capa |= ETH_LINK_SPEED_10G;
+ priv->link_speed_capa |= RTE_ETH_LINK_SPEED_10G;
if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT) |
MLX5_BITSHIFT(ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT)))
- priv->link_speed_capa |= ETH_LINK_SPEED_20G;
+ priv->link_speed_capa |= RTE_ETH_LINK_SPEED_20G;
if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT) |
MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT) |
MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT) |
MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT)))
- priv->link_speed_capa |= ETH_LINK_SPEED_40G;
+ priv->link_speed_capa |= RTE_ETH_LINK_SPEED_40G;
if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT) |
MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT) |
MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT) |
MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT)))
- priv->link_speed_capa |= ETH_LINK_SPEED_56G;
+ priv->link_speed_capa |= RTE_ETH_LINK_SPEED_56G;
if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT) |
MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT) |
MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT)))
- priv->link_speed_capa |= ETH_LINK_SPEED_25G;
+ priv->link_speed_capa |= RTE_ETH_LINK_SPEED_25G;
if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT) |
MLX5_BITSHIFT(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT)))
- priv->link_speed_capa |= ETH_LINK_SPEED_50G;
+ priv->link_speed_capa |= RTE_ETH_LINK_SPEED_50G;
if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT) |
MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT) |
MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT) |
MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT)))
- priv->link_speed_capa |= ETH_LINK_SPEED_100G;
+ priv->link_speed_capa |= RTE_ETH_LINK_SPEED_100G;
if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT) |
MLX5_BITSHIFT(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT)))
- priv->link_speed_capa |= ETH_LINK_SPEED_200G;
+ priv->link_speed_capa |= RTE_ETH_LINK_SPEED_200G;
sc = ecmd->link_mode_masks[2] |
((uint64_t)ecmd->link_mode_masks[3] << 32);
MLX5_BITSHIFT
(ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT) |
MLX5_BITSHIFT(ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT)))
- priv->link_speed_capa |= ETH_LINK_SPEED_200G;
+ priv->link_speed_capa |= RTE_ETH_LINK_SPEED_200G;
dev_link.link_duplex = ((ecmd->duplex == DUPLEX_HALF) ?
- ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
+ RTE_ETH_LINK_HALF_DUPLEX : RTE_ETH_LINK_FULL_DUPLEX);
dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
- ETH_LINK_SPEED_FIXED);
+ RTE_ETH_LINK_SPEED_FIXED);
*link = dev_link;
return 0;
}
}
fc_conf->autoneg = ethpause.autoneg;
if (ethpause.rx_pause && ethpause.tx_pause)
- fc_conf->mode = RTE_FC_FULL;
+ fc_conf->mode = RTE_ETH_FC_FULL;
else if (ethpause.rx_pause)
- fc_conf->mode = RTE_FC_RX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
else if (ethpause.tx_pause)
- fc_conf->mode = RTE_FC_TX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
else
- fc_conf->mode = RTE_FC_NONE;
+ fc_conf->mode = RTE_ETH_FC_NONE;
return 0;
}
ifr.ifr_data = (void *)ðpause;
ethpause.autoneg = fc_conf->autoneg;
- if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
- (fc_conf->mode & RTE_FC_RX_PAUSE))
+ if (((fc_conf->mode & RTE_ETH_FC_FULL) == RTE_ETH_FC_FULL) ||
+ (fc_conf->mode & RTE_ETH_FC_RX_PAUSE))
ethpause.rx_pause = 1;
else
ethpause.rx_pause = 0;
- if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
- (fc_conf->mode & RTE_FC_TX_PAUSE))
+ if (((fc_conf->mode & RTE_ETH_FC_FULL) == RTE_ETH_FC_FULL) ||
+ (fc_conf->mode & RTE_ETH_FC_TX_PAUSE))
ethpause.tx_pause = 1;
else
ethpause.tx_pause = 0;
* Remove this check once DPDK supports larger/variable
* indirection tables.
*/
- if (config->ind_table_max_size > (unsigned int)ETH_RSS_RETA_SIZE_512)
- config->ind_table_max_size = ETH_RSS_RETA_SIZE_512;
+ if (config->ind_table_max_size > (unsigned int)RTE_ETH_RSS_RETA_SIZE_512)
+ config->ind_table_max_size = RTE_ETH_RSS_RETA_SIZE_512;
DRV_LOG(DEBUG, "maximum Rx indirection table size is %u",
config->ind_table_max_size);
config->hw_vlan_strip = !!(sh->device_attr.raw_packet_caps &
/*
* If HW has bug working with tunnel packet decapsulation and
* scatter FCS, and decapsulation is needed, clear the hw_fcs_strip
- * bit. Then DEV_RX_OFFLOAD_KEEP_CRC bit will not be set anymore.
+ * bit. Then RTE_ETH_RX_OFFLOAD_KEEP_CRC bit will not be set anymore.
*/
if (config->hca_attr.scatter_fcs_w_decap_disable && config->decap_en)
config->hw_fcs_strip = 0;
struct rte_eth_udp_tunnel *udp_tunnel)
{
MLX5_ASSERT(udp_tunnel != NULL);
- if (udp_tunnel->prot_type == RTE_TUNNEL_TYPE_VXLAN &&
+ if (udp_tunnel->prot_type == RTE_ETH_TUNNEL_TYPE_VXLAN &&
udp_tunnel->udp_port == 4789)
return 0;
- if (udp_tunnel->prot_type == RTE_TUNNEL_TYPE_VXLAN_GPE &&
+ if (udp_tunnel->prot_type == RTE_ETH_TUNNEL_TYPE_VXLAN_GPE &&
udp_tunnel->udp_port == 4790)
return 0;
return -ENOTSUP;
struct mlx5_flow_rss_desc {
uint32_t level;
uint32_t queue_num; /**< Number of entries in @p queue. */
- uint64_t types; /**< Specific RSS hash types (see ETH_RSS_*). */
+ uint64_t types; /**< Specific RSS hash types (see RTE_ETH_RSS_*). */
uint64_t hash_fields; /* Verbs Hash fields. */
uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
uint32_t key_len; /**< RSS hash key len. */
#define MLX5_VPMD_DESCS_PER_LOOP 4
/* Mask of RSS on source only or destination only. */
-#define MLX5_RSS_SRC_DST_ONLY (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY | \
- ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)
+#define MLX5_RSS_SRC_DST_ONLY (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY | \
+ RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY)
/* Supported RSS */
-#define MLX5_RSS_HF_MASK (~(ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP | \
+#define MLX5_RSS_HF_MASK (~(RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP | RTE_ETH_RSS_TCP | \
MLX5_RSS_SRC_DST_ONLY))
/* Timeout in seconds to get a valid link status. */
}
if ((dev->data->dev_conf.txmode.offloads &
- DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP) &&
+ RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP) &&
rte_mbuf_dyn_tx_timestamp_register(NULL, NULL) != 0) {
DRV_LOG(ERR, "port %u cannot register Tx timestamp field/flag",
dev->data->port_id);
info->default_txportconf.ring_size = 256;
info->default_rxportconf.burst_size = MLX5_RX_DEFAULT_BURST;
info->default_txportconf.burst_size = MLX5_TX_DEFAULT_BURST;
- if ((priv->link_speed_capa & ETH_LINK_SPEED_200G) |
- (priv->link_speed_capa & ETH_LINK_SPEED_100G)) {
+ if ((priv->link_speed_capa & RTE_ETH_LINK_SPEED_200G) |
+ (priv->link_speed_capa & RTE_ETH_LINK_SPEED_100G)) {
info->default_rxportconf.nb_queues = 16;
info->default_txportconf.nb_queues = 16;
if (dev->data->nb_rx_queues > 2 ||
uint64_t rss_types;
/**<
* RSS types bit-field associated with this node
- * (see ETH_RSS_* definitions).
+ * (see RTE_ETH_RSS_* definitions).
*/
uint64_t node_flags;
/**<
* @param[in] pattern
* User flow pattern.
* @param[in] types
- * RSS types to expand (see ETH_RSS_* definitions).
+ * RSS types to expand (see RTE_ETH_RSS_* definitions).
* @param[in] graph
* Input graph to expand @p pattern according to @p types.
* @param[in] graph_root_index
MLX5_EXPANSION_IPV4,
MLX5_EXPANSION_IPV6),
.type = RTE_FLOW_ITEM_TYPE_IPV4,
- .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
- ETH_RSS_NONFRAG_IPV4_OTHER,
+ .rss_types = RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+ RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
},
[MLX5_EXPANSION_OUTER_IPV4_UDP] = {
.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
MLX5_EXPANSION_MPLS,
MLX5_EXPANSION_GTP),
.type = RTE_FLOW_ITEM_TYPE_UDP,
- .rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
+ .rss_types = RTE_ETH_RSS_NONFRAG_IPV4_UDP,
},
[MLX5_EXPANSION_OUTER_IPV4_TCP] = {
.type = RTE_FLOW_ITEM_TYPE_TCP,
- .rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
+ .rss_types = RTE_ETH_RSS_NONFRAG_IPV4_TCP,
},
[MLX5_EXPANSION_OUTER_IPV6] = {
.next = MLX5_FLOW_EXPAND_RSS_NEXT
MLX5_EXPANSION_GRE,
MLX5_EXPANSION_NVGRE),
.type = RTE_FLOW_ITEM_TYPE_IPV6,
- .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
- ETH_RSS_NONFRAG_IPV6_OTHER,
+ .rss_types = RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |
+ RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
},
[MLX5_EXPANSION_OUTER_IPV6_UDP] = {
.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
MLX5_EXPANSION_MPLS,
MLX5_EXPANSION_GTP),
.type = RTE_FLOW_ITEM_TYPE_UDP,
- .rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
+ .rss_types = RTE_ETH_RSS_NONFRAG_IPV6_UDP,
},
[MLX5_EXPANSION_OUTER_IPV6_TCP] = {
.type = RTE_FLOW_ITEM_TYPE_TCP,
- .rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
+ .rss_types = RTE_ETH_RSS_NONFRAG_IPV6_TCP,
},
[MLX5_EXPANSION_VXLAN] = {
.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP,
MLX5_EXPANSION_IPV4_TCP),
.type = RTE_FLOW_ITEM_TYPE_IPV4,
- .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
- ETH_RSS_NONFRAG_IPV4_OTHER,
+ .rss_types = RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+ RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
},
[MLX5_EXPANSION_IPV4_UDP] = {
.type = RTE_FLOW_ITEM_TYPE_UDP,
- .rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
+ .rss_types = RTE_ETH_RSS_NONFRAG_IPV4_UDP,
},
[MLX5_EXPANSION_IPV4_TCP] = {
.type = RTE_FLOW_ITEM_TYPE_TCP,
- .rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
+ .rss_types = RTE_ETH_RSS_NONFRAG_IPV4_TCP,
},
[MLX5_EXPANSION_IPV6] = {
.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV6_UDP,
MLX5_EXPANSION_IPV6_TCP,
MLX5_EXPANSION_IPV6_FRAG_EXT),
.type = RTE_FLOW_ITEM_TYPE_IPV6,
- .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
- ETH_RSS_NONFRAG_IPV6_OTHER,
+ .rss_types = RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |
+ RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
},
[MLX5_EXPANSION_IPV6_UDP] = {
.type = RTE_FLOW_ITEM_TYPE_UDP,
- .rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
+ .rss_types = RTE_ETH_RSS_NONFRAG_IPV6_UDP,
},
[MLX5_EXPANSION_IPV6_TCP] = {
.type = RTE_FLOW_ITEM_TYPE_TCP,
- .rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
+ .rss_types = RTE_ETH_RSS_NONFRAG_IPV6_TCP,
},
[MLX5_EXPANSION_IPV6_FRAG_EXT] = {
.type = RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT,
* @param[in] tunnel
* 1 when the hash field is for a tunnel item.
* @param[in] layer_types
- * ETH_RSS_* types.
+ * RTE_ETH_RSS_* types.
* @param[in] hash_fields
* Item hash fields.
*
&rss->types,
"some RSS protocols are not"
" supported");
- if ((rss->types & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY)) &&
- !(rss->types & ETH_RSS_IP))
+ if ((rss->types & (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY)) &&
+ !(rss->types & RTE_ETH_RSS_IP))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
"L3 partial RSS requested but L3 RSS"
" type not specified");
- if ((rss->types & (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)) &&
- !(rss->types & (ETH_RSS_UDP | ETH_RSS_TCP)))
+ if ((rss->types & (RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY)) &&
+ !(rss->types & (RTE_ETH_RSS_UDP | RTE_ETH_RSS_TCP)))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
"L4 partial RSS requested but L4 RSS"
* mlx5_flow_hashfields_adjust() in advance.
*/
rss_desc->level = rss->level;
- /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
- rss_desc->types = !rss->types ? ETH_RSS_IP : rss->types;
+ /* RSS type 0 indicates default RSS type (RTE_ETH_RSS_IP). */
+ rss_desc->types = !rss->types ? RTE_ETH_RSS_IP : rss->types;
}
flow->dev_handles = 0;
if (rss && rss->types) {
if (!priv->reta_idx_n || !priv->rxqs_n) {
return 0;
}
- if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
+ if (!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG))
action_rss.types = 0;
for (i = 0; i != priv->reta_idx_n; ++i)
queue[i] = (*priv->reta_idx)[i];
(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION_CONF,
NULL, "invalid port configuration");
- if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
+ if (!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG))
ctx->action_rss.types = 0;
for (i = 0; i != priv->reta_idx_n; ++i)
ctx->queue[i] = (*priv->reta_idx)[i];
/* Valid layer type for IPV4 RSS. */
#define MLX5_IPV4_LAYER_TYPES \
- (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | \
- ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_UDP | \
- ETH_RSS_NONFRAG_IPV4_OTHER)
+ (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | \
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_OTHER)
/* IBV hash source bits for IPV4. */
#define MLX5_IPV4_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4)
/* Valid layer type for IPV6 RSS. */
#define MLX5_IPV6_LAYER_TYPES \
- (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_TCP | \
- ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_IPV6_EX | ETH_RSS_IPV6_TCP_EX | \
- ETH_RSS_IPV6_UDP_EX | ETH_RSS_NONFRAG_IPV6_OTHER)
+ (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_IPV6_EX | RTE_ETH_RSS_IPV6_TCP_EX | \
+ RTE_ETH_RSS_IPV6_UDP_EX | RTE_ETH_RSS_NONFRAG_IPV6_OTHER)
/* IBV hash source bits for IPV6. */
#define MLX5_IPV6_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6)
if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
(!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
if (rss_types & MLX5_IPV4_LAYER_TYPES) {
- if (rss_types & ETH_RSS_L3_SRC_ONLY)
+ if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
- else if (rss_types & ETH_RSS_L3_DST_ONLY)
+ else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
else
dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
} else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
(!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
if (rss_types & MLX5_IPV6_LAYER_TYPES) {
- if (rss_types & ETH_RSS_L3_SRC_ONLY)
+ if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
- else if (rss_types & ETH_RSS_L3_DST_ONLY)
+ else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
else
dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
return;
if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
(!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
- if (rss_types & ETH_RSS_UDP) {
- if (rss_types & ETH_RSS_L4_SRC_ONLY)
+ if (rss_types & RTE_ETH_RSS_UDP) {
+ if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
dev_flow->hash_fields |=
IBV_RX_HASH_SRC_PORT_UDP;
- else if (rss_types & ETH_RSS_L4_DST_ONLY)
+ else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
dev_flow->hash_fields |=
IBV_RX_HASH_DST_PORT_UDP;
else
}
} else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
(!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
- if (rss_types & ETH_RSS_TCP) {
- if (rss_types & ETH_RSS_L4_SRC_ONLY)
+ if (rss_types & RTE_ETH_RSS_TCP) {
+ if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
dev_flow->hash_fields |=
IBV_RX_HASH_SRC_PORT_TCP;
- else if (rss_types & ETH_RSS_L4_DST_ONLY)
+ else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
dev_flow->hash_fields |=
IBV_RX_HASH_DST_PORT_TCP;
else
case MLX5_RSS_HASH_IPV4:
if (rss_types & MLX5_IPV4_LAYER_TYPES) {
*hash_field &= ~MLX5_RSS_HASH_IPV4;
- if (rss_types & ETH_RSS_L3_DST_ONLY)
+ if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
*hash_field |= IBV_RX_HASH_DST_IPV4;
- else if (rss_types & ETH_RSS_L3_SRC_ONLY)
+ else if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
*hash_field |= IBV_RX_HASH_SRC_IPV4;
else
*hash_field |= MLX5_RSS_HASH_IPV4;
case MLX5_RSS_HASH_IPV6:
if (rss_types & MLX5_IPV6_LAYER_TYPES) {
*hash_field &= ~MLX5_RSS_HASH_IPV6;
- if (rss_types & ETH_RSS_L3_DST_ONLY)
+ if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
*hash_field |= IBV_RX_HASH_DST_IPV6;
- else if (rss_types & ETH_RSS_L3_SRC_ONLY)
+ else if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
*hash_field |= IBV_RX_HASH_SRC_IPV6;
else
*hash_field |= MLX5_RSS_HASH_IPV6;
case MLX5_RSS_HASH_IPV4_UDP:
/* fall-through. */
case MLX5_RSS_HASH_IPV6_UDP:
- if (rss_types & ETH_RSS_UDP) {
+ if (rss_types & RTE_ETH_RSS_UDP) {
*hash_field &= ~MLX5_UDP_IBV_RX_HASH;
- if (rss_types & ETH_RSS_L4_DST_ONLY)
+ if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
*hash_field |= IBV_RX_HASH_DST_PORT_UDP;
- else if (rss_types & ETH_RSS_L4_SRC_ONLY)
+ else if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
*hash_field |= IBV_RX_HASH_SRC_PORT_UDP;
else
*hash_field |= MLX5_UDP_IBV_RX_HASH;
case MLX5_RSS_HASH_IPV4_TCP:
/* fall-through. */
case MLX5_RSS_HASH_IPV6_TCP:
- if (rss_types & ETH_RSS_TCP) {
+ if (rss_types & RTE_ETH_RSS_TCP) {
*hash_field &= ~MLX5_TCP_IBV_RX_HASH;
- if (rss_types & ETH_RSS_L4_DST_ONLY)
+ if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
*hash_field |= IBV_RX_HASH_DST_PORT_TCP;
- else if (rss_types & ETH_RSS_L4_SRC_ONLY)
+ else if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
*hash_field |= IBV_RX_HASH_SRC_PORT_TCP;
else
*hash_field |= MLX5_TCP_IBV_RX_HASH;
origin = &shared_rss->origin;
origin->func = rss->func;
origin->level = rss->level;
- /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
- origin->types = !rss->types ? ETH_RSS_IP : rss->types;
+ /* RSS type 0 indicates default RSS type (RTE_ETH_RSS_IP). */
+ origin->types = !rss->types ? RTE_ETH_RSS_IP : rss->types;
/* NULL RSS key indicates default RSS key. */
rss_key = !rss->key ? rss_hash_default_key : rss->key;
memcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
if (dev_flow->hash_fields != 0)
dev_flow->hash_fields |=
mlx5_flow_hashfields_adjust
- (rss_desc, tunnel, ETH_RSS_TCP,
+ (rss_desc, tunnel, RTE_ETH_RSS_TCP,
(IBV_RX_HASH_SRC_PORT_TCP |
IBV_RX_HASH_DST_PORT_TCP));
item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
if (dev_flow->hash_fields != 0)
dev_flow->hash_fields |=
mlx5_flow_hashfields_adjust
- (rss_desc, tunnel, ETH_RSS_UDP,
+ (rss_desc, tunnel, RTE_ETH_RSS_UDP,
(IBV_RX_HASH_SRC_PORT_UDP |
IBV_RX_HASH_DST_PORT_UDP));
item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
if (!(*priv->rxqs)[i])
continue;
(*priv->rxqs)[i]->rss_hash = !!rss_conf->rss_hf &&
- !!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS);
+ !!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS);
++idx;
}
return 0;
}
/* Fill each entry of the table even if its bit is not set. */
for (idx = 0, i = 0; (i != reta_size); ++i) {
- idx = i / RTE_RETA_GROUP_SIZE;
- reta_conf[idx].reta[i % RTE_RETA_GROUP_SIZE] =
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ reta_conf[idx].reta[i % RTE_ETH_RETA_GROUP_SIZE] =
(*priv->reta_idx)[i];
}
return 0;
if (ret)
return ret;
for (idx = 0, i = 0; (i != reta_size); ++i) {
- idx = i / RTE_RETA_GROUP_SIZE;
- pos = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ pos = i % RTE_ETH_RETA_GROUP_SIZE;
if (((reta_conf[idx].mask >> i) & 0x1) == 0)
continue;
MLX5_ASSERT(reta_conf[idx].reta[pos] < priv->rxqs_n);
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_config *config = &priv->config;
- uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_TIMESTAMP |
- DEV_RX_OFFLOAD_RSS_HASH);
+ uint64_t offloads = (RTE_ETH_RX_OFFLOAD_SCATTER |
+ RTE_ETH_RX_OFFLOAD_TIMESTAMP |
+ RTE_ETH_RX_OFFLOAD_RSS_HASH);
if (!config->mprq.enabled)
offloads |= RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT;
if (config->hw_fcs_strip)
- offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
+ offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
if (config->hw_csum)
- offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM);
+ offloads |= (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM);
if (config->hw_vlan_strip)
- offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
if (MLX5_LRO_SUPPORTED(dev))
- offloads |= DEV_RX_OFFLOAD_TCP_LRO;
+ offloads |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
return offloads;
}
uint64_t
mlx5_get_rx_port_offloads(void)
{
- uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
+ uint64_t offloads = RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
return offloads;
}
dev->data->dev_conf.rxmode.offloads;
/* The offloads should be checked on rte_eth_dev layer. */
- MLX5_ASSERT(offloads & DEV_RX_OFFLOAD_SCATTER);
+ MLX5_ASSERT(offloads & RTE_ETH_RX_OFFLOAD_SCATTER);
if (!(offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) {
DRV_LOG(ERR, "port %u queue index %u split "
"offload not configured",
struct mlx5_dev_config *config = &priv->config;
uint64_t offloads = conf->offloads |
dev->data->dev_conf.rxmode.offloads;
- unsigned int lro_on_queue = !!(offloads & DEV_RX_OFFLOAD_TCP_LRO);
+ unsigned int lro_on_queue = !!(offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO);
unsigned int max_rx_pktlen = lro_on_queue ?
dev->data->dev_conf.rxmode.max_lro_pkt_size :
dev->data->mtu + (unsigned int)RTE_ETHER_HDR_LEN +
} while (tail_len || !rte_is_power_of_2(tmpl->rxq.rxseg_n));
MLX5_ASSERT(tmpl->rxq.rxseg_n &&
tmpl->rxq.rxseg_n <= MLX5_MAX_RXQ_NSEG);
- if (tmpl->rxq.rxseg_n > 1 && !(offloads & DEV_RX_OFFLOAD_SCATTER)) {
+ if (tmpl->rxq.rxseg_n > 1 && !(offloads & RTE_ETH_RX_OFFLOAD_SCATTER)) {
DRV_LOG(ERR, "port %u Rx queue %u: Scatter offload is not"
" configured and no enough mbuf space(%u) to contain "
"the maximum RX packet length(%u) with head-room(%u)",
config->mprq.stride_size_n : mprq_stride_size;
tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT;
tmpl->rxq.strd_scatter_en =
- !!(offloads & DEV_RX_OFFLOAD_SCATTER);
+ !!(offloads & RTE_ETH_RX_OFFLOAD_SCATTER);
tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(first_mb_free_size,
config->mprq.max_memcpy_len);
max_lro_size = RTE_MIN(max_rx_pktlen,
MLX5_ASSERT(max_rx_pktlen <= first_mb_free_size);
tmpl->rxq.sges_n = 0;
max_lro_size = max_rx_pktlen;
- } else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
+ } else if (offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
unsigned int sges_n;
if (lro_on_queue && first_mb_free_size <
}
mlx5_max_lro_msg_size_adjust(dev, idx, max_lro_size);
/* Toggle RX checksum offload if hardware supports it. */
- tmpl->rxq.csum = !!(offloads & DEV_RX_OFFLOAD_CHECKSUM);
+ tmpl->rxq.csum = !!(offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM);
/* Configure Rx timestamp. */
- tmpl->rxq.hw_timestamp = !!(offloads & DEV_RX_OFFLOAD_TIMESTAMP);
+ tmpl->rxq.hw_timestamp = !!(offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP);
tmpl->rxq.timestamp_rx_flag = 0;
if (tmpl->rxq.hw_timestamp && rte_mbuf_dyn_rx_timestamp_register(
&tmpl->rxq.timestamp_offset,
goto error;
}
/* Configure VLAN stripping. */
- tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+ tmpl->rxq.vlan_strip = !!(offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
/* By default, FCS (CRC) is stripped by hardware. */
tmpl->rxq.crc_present = 0;
tmpl->rxq.lro = lro_on_queue;
- if (offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+ if (offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
if (config->hw_fcs_strip) {
/*
* RQs used for LRO-enabled TIRs should not be
tmpl->rxq.crc_present << 2);
/* Save port ID. */
tmpl->rxq.rss_hash = !!priv->rss_conf.rss_hf &&
- (!!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS));
+ (!!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS));
tmpl->rxq.port_id = dev->data->port_id;
tmpl->priv = priv;
tmpl->rxq.mp = rx_seg[0].mp;
/* HW checksum offload capabilities of vectorized Tx. */
#define MLX5_VEC_TX_CKSUM_OFFLOAD_CAP \
- (DEV_TX_OFFLOAD_IPV4_CKSUM | \
- DEV_TX_OFFLOAD_UDP_CKSUM | \
- DEV_TX_OFFLOAD_TCP_CKSUM | \
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
+ (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)
/*
* Compile time sanity check for vectorized functions.
unsigned int diff = 0, olx = 0, i, m;
MLX5_ASSERT(priv);
- if (tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS) {
+ if (tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS) {
/* We should support Multi-Segment Packets. */
olx |= MLX5_TXOFF_CONFIG_MULTI;
}
- if (tx_offloads & (DEV_TX_OFFLOAD_TCP_TSO |
- DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GRE_TNL_TSO |
- DEV_TX_OFFLOAD_IP_TNL_TSO |
- DEV_TX_OFFLOAD_UDP_TNL_TSO)) {
+ if (tx_offloads & (RTE_ETH_TX_OFFLOAD_TCP_TSO |
+ RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO)) {
/* We should support TCP Send Offload. */
olx |= MLX5_TXOFF_CONFIG_TSO;
}
- if (tx_offloads & (DEV_TX_OFFLOAD_IP_TNL_TSO |
- DEV_TX_OFFLOAD_UDP_TNL_TSO |
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
+ if (tx_offloads & (RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
/* We should support Software Parser for Tunnels. */
olx |= MLX5_TXOFF_CONFIG_SWP;
}
- if (tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
+ if (tx_offloads & (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
/* We should support IP/TCP/UDP Checksums. */
olx |= MLX5_TXOFF_CONFIG_CSUM;
}
- if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT) {
+ if (tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) {
/* We should support VLAN insertion. */
olx |= MLX5_TXOFF_CONFIG_VLAN;
}
- if (tx_offloads & DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP &&
+ if (tx_offloads & RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP &&
rte_mbuf_dynflag_lookup
(RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, NULL) >= 0 &&
rte_mbuf_dynfield_lookup
mlx5_get_tx_port_offloads(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
- uint64_t offloads = (DEV_TX_OFFLOAD_MULTI_SEGS |
- DEV_TX_OFFLOAD_VLAN_INSERT);
+ uint64_t offloads = (RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT);
struct mlx5_dev_config *config = &priv->config;
if (config->hw_csum)
- offloads |= (DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM);
+ offloads |= (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM);
if (config->tso)
- offloads |= DEV_TX_OFFLOAD_TCP_TSO;
+ offloads |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
if (config->tx_pp)
- offloads |= DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP;
+ offloads |= RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP;
if (config->swp) {
if (config->swp & MLX5_SW_PARSING_CSUM_CAP)
- offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+ offloads |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM;
if (config->swp & MLX5_SW_PARSING_TSO_CAP)
- offloads |= (DEV_TX_OFFLOAD_IP_TNL_TSO |
- DEV_TX_OFFLOAD_UDP_TNL_TSO);
+ offloads |= (RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO);
}
if (config->tunnel_en) {
if (config->hw_csum)
- offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+ offloads |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM;
if (config->tso) {
if (config->tunnel_en &
MLX5_TUNNELED_OFFLOADS_VXLAN_CAP)
- offloads |= DEV_TX_OFFLOAD_VXLAN_TNL_TSO;
+ offloads |= RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO;
if (config->tunnel_en &
MLX5_TUNNELED_OFFLOADS_GRE_CAP)
- offloads |= DEV_TX_OFFLOAD_GRE_TNL_TSO;
+ offloads |= RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO;
if (config->tunnel_en &
MLX5_TUNNELED_OFFLOADS_GENEVE_CAP)
- offloads |= DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
+ offloads |= RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO;
}
}
if (!config->mprq.enabled)
- offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ offloads |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
return offloads;
}
unsigned int inlen_mode; /* Minimal required Inline data. */
unsigned int txqs_inline; /* Min Tx queues to enable inline. */
uint64_t dev_txoff = priv->dev_data->dev_conf.txmode.offloads;
- bool tso = txq_ctrl->txq.offloads & (DEV_TX_OFFLOAD_TCP_TSO |
- DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GRE_TNL_TSO |
- DEV_TX_OFFLOAD_IP_TNL_TSO |
- DEV_TX_OFFLOAD_UDP_TNL_TSO);
+ bool tso = txq_ctrl->txq.offloads & (RTE_ETH_TX_OFFLOAD_TCP_TSO |
+ RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO);
bool vlan_inline;
unsigned int temp;
txq_ctrl->txq.fast_free =
- !!((txq_ctrl->txq.offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
- !(txq_ctrl->txq.offloads & DEV_TX_OFFLOAD_MULTI_SEGS) &&
+ !!((txq_ctrl->txq.offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) &&
+ !(txq_ctrl->txq.offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS) &&
!config->mprq.enabled);
if (config->txqs_inline == MLX5_ARG_UNSET)
txqs_inline =
* tx_burst routine.
*/
txq_ctrl->txq.vlan_en = config->hw_vlan_insert;
- vlan_inline = (dev_txoff & DEV_TX_OFFLOAD_VLAN_INSERT) &&
+ vlan_inline = (dev_txoff & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) &&
!config->hw_vlan_insert;
/*
* If there are few Tx queues it is prioritized
MLX5_MAX_TSO_HEADER);
txq_ctrl->txq.tso_en = 1;
}
- if (((DEV_TX_OFFLOAD_VXLAN_TNL_TSO & txq_ctrl->txq.offloads) &&
+ if (((RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO & txq_ctrl->txq.offloads) &&
(config->tunnel_en & MLX5_TUNNELED_OFFLOADS_VXLAN_CAP)) |
- ((DEV_TX_OFFLOAD_GRE_TNL_TSO & txq_ctrl->txq.offloads) &&
+ ((RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO & txq_ctrl->txq.offloads) &&
(config->tunnel_en & MLX5_TUNNELED_OFFLOADS_GRE_CAP)) |
- ((DEV_TX_OFFLOAD_GENEVE_TNL_TSO & txq_ctrl->txq.offloads) &&
+ ((RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO & txq_ctrl->txq.offloads) &&
(config->tunnel_en & MLX5_TUNNELED_OFFLOADS_GENEVE_CAP)) |
(config->swp & MLX5_SW_PARSING_TSO_CAP))
txq_ctrl->txq.tunnel_en = 1;
- txq_ctrl->txq.swp_en = (((DEV_TX_OFFLOAD_IP_TNL_TSO |
- DEV_TX_OFFLOAD_UDP_TNL_TSO) &
+ txq_ctrl->txq.swp_en = (((RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO) &
txq_ctrl->txq.offloads) && (config->swp &
MLX5_SW_PARSING_TSO_CAP)) |
- ((DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM &
+ ((RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM &
txq_ctrl->txq.offloads) && (config->swp &
MLX5_SW_PARSING_CSUM_CAP));
}
struct mlx5_priv *priv = dev->data->dev_private;
unsigned int i;
- if (mask & ETH_VLAN_STRIP_MASK) {
+ if (mask & RTE_ETH_VLAN_STRIP_MASK) {
int hw_vlan_strip = !!(dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_VLAN_STRIP);
+ RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
if (!priv->config.hw_vlan_strip) {
DRV_LOG(ERR, "port %u VLAN stripping is not supported",
* Remove this check once DPDK supports larger/variable
* indirection tables.
*/
- if (config->ind_table_max_size > (unsigned int)ETH_RSS_RETA_SIZE_512)
- config->ind_table_max_size = ETH_RSS_RETA_SIZE_512;
+ if (config->ind_table_max_size > (unsigned int)RTE_ETH_RSS_RETA_SIZE_512)
+ config->ind_table_max_size = RTE_ETH_RSS_RETA_SIZE_512;
DRV_LOG(DEBUG, "maximum Rx indirection table size is %u",
config->ind_table_max_size);
if (config->hw_padding) {
struct mvneta_priv *priv = dev->data->dev_private;
struct neta_ppio_params *ppio_params;
- if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_NONE) {
+ if (dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_NONE) {
MVNETA_LOG(INFO, "Unsupported RSS and rx multi queue mode %d",
dev->data->dev_conf.rxmode.mq_mode);
if (dev->data->nb_rx_queues > 1)
return -EINVAL;
}
- if (dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+ if (dev->data->dev_conf.txmode.offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
priv->multiseg = 1;
ppio_params = &priv->ppio_params;
mvneta_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
struct rte_eth_dev_info *info)
{
- info->speed_capa = ETH_LINK_SPEED_10M |
- ETH_LINK_SPEED_100M |
- ETH_LINK_SPEED_1G |
- ETH_LINK_SPEED_2_5G;
+ info->speed_capa = RTE_ETH_LINK_SPEED_10M |
+ RTE_ETH_LINK_SPEED_100M |
+ RTE_ETH_LINK_SPEED_1G |
+ RTE_ETH_LINK_SPEED_2_5G;
info->max_rx_queues = MRVL_NETA_RXQ_MAX;
info->max_tx_queues = MRVL_NETA_TXQ_MAX;
switch (ethtool_cmd_speed(&edata)) {
case SPEED_10:
- dev->data->dev_link.link_speed = ETH_SPEED_NUM_10M;
+ dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_10M;
break;
case SPEED_100:
- dev->data->dev_link.link_speed = ETH_SPEED_NUM_100M;
+ dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_100M;
break;
case SPEED_1000:
- dev->data->dev_link.link_speed = ETH_SPEED_NUM_1G;
+ dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_1G;
break;
case SPEED_2500:
- dev->data->dev_link.link_speed = ETH_SPEED_NUM_2_5G;
+ dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_2_5G;
break;
default:
- dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+ dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
}
- dev->data->dev_link.link_duplex = edata.duplex ? ETH_LINK_FULL_DUPLEX :
- ETH_LINK_HALF_DUPLEX;
- dev->data->dev_link.link_autoneg = edata.autoneg ? ETH_LINK_AUTONEG :
- ETH_LINK_FIXED;
+ dev->data->dev_link.link_duplex = edata.duplex ? RTE_ETH_LINK_FULL_DUPLEX :
+ RTE_ETH_LINK_HALF_DUPLEX;
+ dev->data->dev_link.link_autoneg = edata.autoneg ? RTE_ETH_LINK_AUTONEG :
+ RTE_ETH_LINK_FIXED;
neta_ppio_get_link_state(priv->ppio, &link_up);
- dev->data->dev_link.link_status = link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
+ dev->data->dev_link.link_status = link_up ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
return 0;
}
#define MRVL_NETA_MRU_TO_MTU(mru) ((mru) - MRVL_NETA_HDRS_LEN)
/** Rx offloads capabilities */
-#define MVNETA_RX_OFFLOADS (DEV_RX_OFFLOAD_CHECKSUM)
+#define MVNETA_RX_OFFLOADS (RTE_ETH_RX_OFFLOAD_CHECKSUM)
/** Tx offloads capabilities */
-#define MVNETA_TX_OFFLOAD_CHECKSUM (DEV_TX_OFFLOAD_IPV4_CKSUM | \
- DEV_TX_OFFLOAD_UDP_CKSUM | \
- DEV_TX_OFFLOAD_TCP_CKSUM)
+#define MVNETA_TX_OFFLOAD_CHECKSUM (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
#define MVNETA_TX_OFFLOADS (MVNETA_TX_OFFLOAD_CHECKSUM | \
- DEV_TX_OFFLOAD_MULTI_SEGS)
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
#define MVNETA_TX_PKT_OFFLOADS (PKT_TX_IP_CKSUM | \
PKT_TX_TCP_CKSUM | \
rxq->priv = priv;
rxq->mp = mp;
rxq->cksum_enabled = dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_IPV4_CKSUM;
+ RTE_ETH_RX_OFFLOAD_IPV4_CKSUM;
rxq->queue_id = idx;
rxq->port_id = dev->data->port_id;
rxq->size = desc;
#define MRVL_COOKIE_HIGH_ADDR_MASK 0xffffff0000000000
/** Port Rx offload capabilities */
-#define MRVL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_FILTER | \
- DEV_RX_OFFLOAD_CHECKSUM)
+#define MRVL_RX_OFFLOADS (RTE_ETH_RX_OFFLOAD_VLAN_FILTER | \
+ RTE_ETH_RX_OFFLOAD_CHECKSUM)
/** Port Tx offloads capabilities */
-#define MRVL_TX_OFFLOAD_CHECKSUM (DEV_TX_OFFLOAD_IPV4_CKSUM | \
- DEV_TX_OFFLOAD_UDP_CKSUM | \
- DEV_TX_OFFLOAD_TCP_CKSUM)
+#define MRVL_TX_OFFLOAD_CHECKSUM (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
#define MRVL_TX_OFFLOADS (MRVL_TX_OFFLOAD_CHECKSUM | \
- DEV_TX_OFFLOAD_MULTI_SEGS)
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
#define MRVL_TX_PKT_OFFLOADS (PKT_TX_IP_CKSUM | \
PKT_TX_TCP_CKSUM | \
if (rss_conf->rss_hf == 0) {
priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE;
- } else if (rss_conf->rss_hf & ETH_RSS_IPV4) {
+ } else if (rss_conf->rss_hf & RTE_ETH_RSS_IPV4) {
priv->ppio_params.inqs_params.hash_type =
PP2_PPIO_HASH_T_2_TUPLE;
- } else if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
+ } else if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) {
priv->ppio_params.inqs_params.hash_type =
PP2_PPIO_HASH_T_5_TUPLE;
priv->rss_hf_tcp = 1;
- } else if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
+ } else if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) {
priv->ppio_params.inqs_params.hash_type =
PP2_PPIO_HASH_T_5_TUPLE;
priv->rss_hf_tcp = 0;
return -EINVAL;
}
- if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_NONE &&
- dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
+ if (dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_NONE &&
+ dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_RSS) {
MRVL_LOG(INFO, "Unsupported rx multi queue mode %d",
dev->data->dev_conf.rxmode.mq_mode);
return -EINVAL;
return -EINVAL;
}
- if (dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+ if (dev->data->dev_conf.txmode.offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
priv->multiseg = 1;
ret = mrvl_configure_rxqs(priv, dev->data->port_id,
return ret;
if (dev->data->nb_rx_queues == 1 &&
- dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
+ dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
MRVL_LOG(WARNING, "Disabling hash for 1 rx queue");
priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE;
priv->configured = 1;
int ret;
if (!priv->ppio) {
- dev->data->dev_link.link_status = ETH_LINK_UP;
+ dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
return 0;
}
return ret;
}
- dev->data->dev_link.link_status = ETH_LINK_UP;
+ dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
return 0;
}
int ret;
if (!priv->ppio) {
- dev->data->dev_link.link_status = ETH_LINK_DOWN;
+ dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
return 0;
}
ret = pp2_ppio_disable(priv->ppio);
if (ret)
return ret;
- dev->data->dev_link.link_status = ETH_LINK_DOWN;
+ dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
return 0;
}
if (dev->data->all_multicast == 1)
mrvl_allmulticast_enable(dev);
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
+ if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
ret = mrvl_populate_vlan_table(dev, 1);
if (ret) {
MRVL_LOG(ERR, "Failed to populate VLAN table");
priv->flow_ctrl = 0;
}
- if (dev->data->dev_link.link_status == ETH_LINK_UP) {
+ if (dev->data->dev_link.link_status == RTE_ETH_LINK_UP) {
ret = mrvl_dev_set_link_up(dev);
if (ret) {
MRVL_LOG(ERR, "Failed to set link up");
- dev->data->dev_link.link_status = ETH_LINK_DOWN;
+ dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
goto out;
}
}
switch (ethtool_cmd_speed(&edata)) {
case SPEED_10:
- dev->data->dev_link.link_speed = ETH_SPEED_NUM_10M;
+ dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_10M;
break;
case SPEED_100:
- dev->data->dev_link.link_speed = ETH_SPEED_NUM_100M;
+ dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_100M;
break;
case SPEED_1000:
- dev->data->dev_link.link_speed = ETH_SPEED_NUM_1G;
+ dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_1G;
break;
case SPEED_2500:
- dev->data->dev_link.link_speed = ETH_SPEED_NUM_2_5G;
+ dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_2_5G;
break;
case SPEED_10000:
- dev->data->dev_link.link_speed = ETH_SPEED_NUM_10G;
+ dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_10G;
break;
default:
- dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+ dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
}
- dev->data->dev_link.link_duplex = edata.duplex ? ETH_LINK_FULL_DUPLEX :
- ETH_LINK_HALF_DUPLEX;
- dev->data->dev_link.link_autoneg = edata.autoneg ? ETH_LINK_AUTONEG :
- ETH_LINK_FIXED;
+ dev->data->dev_link.link_duplex = edata.duplex ? RTE_ETH_LINK_FULL_DUPLEX :
+ RTE_ETH_LINK_HALF_DUPLEX;
+ dev->data->dev_link.link_autoneg = edata.autoneg ? RTE_ETH_LINK_AUTONEG :
+ RTE_ETH_LINK_FIXED;
pp2_ppio_get_link_state(priv->ppio, &link_up);
- dev->data->dev_link.link_status = link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
+ dev->data->dev_link.link_status = link_up ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
return 0;
}
{
struct mrvl_priv *priv = dev->data->dev_private;
- info->speed_capa = ETH_LINK_SPEED_10M |
- ETH_LINK_SPEED_100M |
- ETH_LINK_SPEED_1G |
- ETH_LINK_SPEED_2_5G |
- ETH_LINK_SPEED_10G;
+ info->speed_capa = RTE_ETH_LINK_SPEED_10M |
+ RTE_ETH_LINK_SPEED_100M |
+ RTE_ETH_LINK_SPEED_1G |
+ RTE_ETH_LINK_SPEED_2_5G |
+ RTE_ETH_LINK_SPEED_10G;
info->max_rx_queues = MRVL_PP2_RXQ_MAX;
info->max_tx_queues = MRVL_PP2_TXQ_MAX;
info->tx_offload_capa = MRVL_TX_OFFLOADS;
info->tx_queue_offload_capa = MRVL_TX_OFFLOADS;
- info->flow_type_rss_offloads = ETH_RSS_IPV4 |
- ETH_RSS_NONFRAG_IPV4_TCP |
- ETH_RSS_NONFRAG_IPV4_UDP;
+ info->flow_type_rss_offloads = RTE_ETH_RSS_IPV4 |
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP;
/* By default packets are dropped if no descriptors are available */
info->default_rxconf.rx_drop_en = 1;
uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
int ret;
- if (mask & ETH_VLAN_STRIP_MASK) {
+ if (mask & RTE_ETH_VLAN_STRIP_MASK) {
MRVL_LOG(ERR, "VLAN stripping is not supported\n");
return -ENOTSUP;
}
- if (mask & ETH_VLAN_FILTER_MASK) {
- if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+ if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
ret = mrvl_populate_vlan_table(dev, 1);
else
ret = mrvl_populate_vlan_table(dev, 0);
return ret;
}
- if (mask & ETH_VLAN_EXTEND_MASK) {
+ if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
MRVL_LOG(ERR, "Extend VLAN not supported\n");
return -ENOTSUP;
}
rxq->priv = priv;
rxq->mp = mp;
- rxq->cksum_enabled = offloads & DEV_RX_OFFLOAD_IPV4_CKSUM;
+ rxq->cksum_enabled = offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM;
rxq->queue_id = idx;
rxq->port_id = dev->data->port_id;
mrvl_port_to_bpool_lookup[rxq->port_id] = priv->bpool;
return ret;
}
- fc_conf->mode = en ? RTE_FC_RX_PAUSE : RTE_FC_NONE;
+ fc_conf->mode = en ? RTE_ETH_FC_RX_PAUSE : RTE_ETH_FC_NONE;
ret = pp2_ppio_get_tx_pause(priv->ppio, &en);
if (ret) {
}
if (en) {
- if (fc_conf->mode == RTE_FC_NONE)
- fc_conf->mode = RTE_FC_TX_PAUSE;
+ if (fc_conf->mode == RTE_ETH_FC_NONE)
+ fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
else
- fc_conf->mode = RTE_FC_FULL;
+ fc_conf->mode = RTE_ETH_FC_FULL;
}
return 0;
}
switch (fc_conf->mode) {
- case RTE_FC_FULL:
+ case RTE_ETH_FC_FULL:
rx_en = 1;
tx_en = 1;
break;
- case RTE_FC_TX_PAUSE:
+ case RTE_ETH_FC_TX_PAUSE:
rx_en = 0;
tx_en = 1;
break;
- case RTE_FC_RX_PAUSE:
+ case RTE_ETH_FC_RX_PAUSE:
rx_en = 1;
tx_en = 0;
break;
- case RTE_FC_NONE:
+ case RTE_ETH_FC_NONE:
rx_en = 0;
tx_en = 0;
break;
if (hash_type == PP2_PPIO_HASH_T_NONE)
rss_conf->rss_hf = 0;
else if (hash_type == PP2_PPIO_HASH_T_2_TUPLE)
- rss_conf->rss_hf = ETH_RSS_IPV4;
+ rss_conf->rss_hf = RTE_ETH_RSS_IPV4;
else if (hash_type == PP2_PPIO_HASH_T_5_TUPLE && priv->rss_hf_tcp)
- rss_conf->rss_hf = ETH_RSS_NONFRAG_IPV4_TCP;
+ rss_conf->rss_hf = RTE_ETH_RSS_NONFRAG_IPV4_TCP;
else if (hash_type == PP2_PPIO_HASH_T_5_TUPLE && !priv->rss_hf_tcp)
- rss_conf->rss_hf = ETH_RSS_NONFRAG_IPV4_UDP;
+ rss_conf->rss_hf = RTE_ETH_RSS_NONFRAG_IPV4_UDP;
return 0;
}
eth_dev->dev_ops = &mrvl_ops;
eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
- eth_dev->data->dev_link.link_status = ETH_LINK_UP;
+ eth_dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
rte_eth_dev_probing_finish(eth_dev);
return 0;
#include "hn_nvs.h"
#include "ndis.h"
-#define HN_TX_OFFLOAD_CAPS (DEV_TX_OFFLOAD_IPV4_CKSUM | \
- DEV_TX_OFFLOAD_TCP_CKSUM | \
- DEV_TX_OFFLOAD_UDP_CKSUM | \
- DEV_TX_OFFLOAD_TCP_TSO | \
- DEV_TX_OFFLOAD_MULTI_SEGS | \
- DEV_TX_OFFLOAD_VLAN_INSERT)
+#define HN_TX_OFFLOAD_CAPS (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_TCP_TSO | \
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS | \
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
-#define HN_RX_OFFLOAD_CAPS (DEV_RX_OFFLOAD_CHECKSUM | \
- DEV_RX_OFFLOAD_VLAN_STRIP | \
- DEV_RX_OFFLOAD_RSS_HASH)
+#define HN_RX_OFFLOAD_CAPS (RTE_ETH_RX_OFFLOAD_CHECKSUM | \
+ RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \
+ RTE_ETH_RX_OFFLOAD_RSS_HASH)
#define NETVSC_ARG_LATENCY "latency"
#define NETVSC_ARG_RXBREAK "rx_copybreak"
hn_rndis_get_linkspeed(hv);
link = (struct rte_eth_link) {
- .link_duplex = ETH_LINK_FULL_DUPLEX,
- .link_autoneg = ETH_LINK_SPEED_FIXED,
+ .link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+ .link_autoneg = RTE_ETH_LINK_SPEED_FIXED,
.link_speed = hv->link_speed / 10000,
};
if (hv->link_status == NDIS_MEDIA_STATE_CONNECTED)
- link.link_status = ETH_LINK_UP;
+ link.link_status = RTE_ETH_LINK_UP;
else
- link.link_status = ETH_LINK_DOWN;
+ link.link_status = RTE_ETH_LINK_DOWN;
if (old.link_status == link.link_status)
return 0;
PMD_INIT_LOG(DEBUG, "Port %d is %s", dev->data->port_id,
- (link.link_status == ETH_LINK_UP) ? "up" : "down");
+ (link.link_status == RTE_ETH_LINK_UP) ? "up" : "down");
return rte_eth_linkstatus_set(dev, &link);
}
struct hn_data *hv = dev->data->dev_private;
int rc;
- dev_info->speed_capa = ETH_LINK_SPEED_10G;
+ dev_info->speed_capa = RTE_ETH_LINK_SPEED_10G;
dev_info->min_rx_bufsize = HN_MIN_RX_BUF_SIZE;
dev_info->max_rx_pktlen = HN_MAX_XFER_LEN;
dev_info->max_mac_addrs = 1;
dev_info->hash_key_size = NDIS_HASH_KEYSIZE_TOEPLITZ;
dev_info->flow_type_rss_offloads = hv->rss_offloads;
- dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
+ dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
dev_info->max_rx_queues = hv->max_queues;
dev_info->max_tx_queues = hv->max_queues;
}
for (i = 0; i < NDIS_HASH_INDCNT; i++) {
- uint16_t idx = i / RTE_RETA_GROUP_SIZE;
- uint16_t shift = i % RTE_RETA_GROUP_SIZE;
+ uint16_t idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ uint16_t shift = i % RTE_ETH_RETA_GROUP_SIZE;
uint64_t mask = (uint64_t)1 << shift;
if (reta_conf[idx].mask & mask)
}
for (i = 0; i < NDIS_HASH_INDCNT; i++) {
- uint16_t idx = i / RTE_RETA_GROUP_SIZE;
- uint16_t shift = i % RTE_RETA_GROUP_SIZE;
+ uint16_t idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ uint16_t shift = i % RTE_ETH_RETA_GROUP_SIZE;
uint64_t mask = (uint64_t)1 << shift;
if (reta_conf[idx].mask & mask)
/* Convert from DPDK RSS hash flags to NDIS hash flags */
hv->rss_hash = NDIS_HASH_FUNCTION_TOEPLITZ;
- if (rss_conf->rss_hf & ETH_RSS_IPV4)
+ if (rss_conf->rss_hf & RTE_ETH_RSS_IPV4)
hv->rss_hash |= NDIS_HASH_IPV4;
- if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+ if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
hv->rss_hash |= NDIS_HASH_TCP_IPV4;
- if (rss_conf->rss_hf & ETH_RSS_IPV6)
+ if (rss_conf->rss_hf & RTE_ETH_RSS_IPV6)
hv->rss_hash |= NDIS_HASH_IPV6;
- if (rss_conf->rss_hf & ETH_RSS_IPV6_EX)
+ if (rss_conf->rss_hf & RTE_ETH_RSS_IPV6_EX)
hv->rss_hash |= NDIS_HASH_IPV6_EX;
- if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+ if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
hv->rss_hash |= NDIS_HASH_TCP_IPV6;
- if (rss_conf->rss_hf & ETH_RSS_IPV6_TCP_EX)
+ if (rss_conf->rss_hf & RTE_ETH_RSS_IPV6_TCP_EX)
hv->rss_hash |= NDIS_HASH_TCP_IPV6_EX;
memcpy(hv->rss_key, rss_conf->rss_key ? : rss_default_key,
rss_conf->rss_hf = 0;
if (hv->rss_hash & NDIS_HASH_IPV4)
- rss_conf->rss_hf |= ETH_RSS_IPV4;
+ rss_conf->rss_hf |= RTE_ETH_RSS_IPV4;
if (hv->rss_hash & NDIS_HASH_TCP_IPV4)
- rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+ rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
if (hv->rss_hash & NDIS_HASH_IPV6)
- rss_conf->rss_hf |= ETH_RSS_IPV6;
+ rss_conf->rss_hf |= RTE_ETH_RSS_IPV6;
if (hv->rss_hash & NDIS_HASH_IPV6_EX)
- rss_conf->rss_hf |= ETH_RSS_IPV6_EX;
+ rss_conf->rss_hf |= RTE_ETH_RSS_IPV6_EX;
if (hv->rss_hash & NDIS_HASH_TCP_IPV6)
- rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+ rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
if (hv->rss_hash & NDIS_HASH_TCP_IPV6_EX)
- rss_conf->rss_hf |= ETH_RSS_IPV6_TCP_EX;
+ rss_conf->rss_hf |= RTE_ETH_RSS_IPV6_TCP_EX;
return 0;
}
PMD_INIT_FUNC_TRACE();
- if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
- dev_conf->rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+ if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+ dev_conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
unsupported = txmode->offloads & ~HN_TX_OFFLOAD_CAPS;
if (unsupported) {
return -EINVAL;
}
- hv->vlan_strip = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+ hv->vlan_strip = !!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
err = hn_rndis_conf_offload(hv, txmode->offloads,
rxmode->offloads);
hv->rss_offloads = 0;
if (caps.ndis_caps & NDIS_RSS_CAP_IPV4)
- hv->rss_offloads |= ETH_RSS_IPV4
- | ETH_RSS_NONFRAG_IPV4_TCP
- | ETH_RSS_NONFRAG_IPV4_UDP;
+ hv->rss_offloads |= RTE_ETH_RSS_IPV4
+ | RTE_ETH_RSS_NONFRAG_IPV4_TCP
+ | RTE_ETH_RSS_NONFRAG_IPV4_UDP;
if (caps.ndis_caps & NDIS_RSS_CAP_IPV6)
- hv->rss_offloads |= ETH_RSS_IPV6
- | ETH_RSS_NONFRAG_IPV6_TCP;
+ hv->rss_offloads |= RTE_ETH_RSS_IPV6
+ | RTE_ETH_RSS_NONFRAG_IPV6_TCP;
if (caps.ndis_caps & NDIS_RSS_CAP_IPV6_EX)
- hv->rss_offloads |= ETH_RSS_IPV6_EX
- | ETH_RSS_IPV6_TCP_EX;
+ hv->rss_offloads |= RTE_ETH_RSS_IPV6_EX
+ | RTE_ETH_RSS_IPV6_TCP_EX;
/* Commit! */
*rxr_cnt0 = rxr_cnt;
params.ndis_hdr.ndis_size = NDIS_OFFLOAD_PARAMS_SIZE;
}
- if (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) {
+ if (tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) {
if (hwcaps.ndis_csum.ndis_ip4_txcsum & NDIS_TXCSUM_CAP_TCP4)
params.ndis_tcp4csum = NDIS_OFFLOAD_PARAM_TX;
else
goto unsupported;
}
- if (rx_offloads & DEV_RX_OFFLOAD_TCP_CKSUM) {
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_CKSUM) {
if ((hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_TCP4)
== NDIS_RXCSUM_CAP_TCP4)
params.ndis_tcp4csum |= NDIS_OFFLOAD_PARAM_RX;
goto unsupported;
}
- if (tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) {
+ if (tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) {
if (hwcaps.ndis_csum.ndis_ip4_txcsum & NDIS_TXCSUM_CAP_UDP4)
params.ndis_udp4csum = NDIS_OFFLOAD_PARAM_TX;
else
goto unsupported;
}
- if (rx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) {
+ if (rx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) {
if (hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_UDP4)
params.ndis_udp4csum |= NDIS_OFFLOAD_PARAM_RX;
else
goto unsupported;
}
- if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) {
+ if (tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) {
if ((hwcaps.ndis_csum.ndis_ip4_txcsum & NDIS_TXCSUM_CAP_IP4)
== NDIS_TXCSUM_CAP_IP4)
params.ndis_ip4csum = NDIS_OFFLOAD_PARAM_TX;
else
goto unsupported;
}
- if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) {
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM) {
if (hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_IP4)
params.ndis_ip4csum |= NDIS_OFFLOAD_PARAM_RX;
else
goto unsupported;
}
- if (tx_offloads & DEV_TX_OFFLOAD_TCP_TSO) {
+ if (tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO) {
if (hwcaps.ndis_lsov2.ndis_ip4_encap & NDIS_OFFLOAD_ENCAP_8023)
params.ndis_lsov2_ip4 = NDIS_OFFLOAD_LSOV2_ON;
else
return error;
}
- dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS |
- DEV_TX_OFFLOAD_VLAN_INSERT;
+ dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
if ((hwcaps.ndis_csum.ndis_ip4_txcsum & HN_NDIS_TXCSUM_CAP_IP4)
== HN_NDIS_TXCSUM_CAP_IP4)
- dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_IPV4_CKSUM;
+ dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
if ((hwcaps.ndis_csum.ndis_ip4_txcsum & HN_NDIS_TXCSUM_CAP_TCP4)
== HN_NDIS_TXCSUM_CAP_TCP4 &&
(hwcaps.ndis_csum.ndis_ip6_txcsum & HN_NDIS_TXCSUM_CAP_TCP6)
== HN_NDIS_TXCSUM_CAP_TCP6)
- dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_CKSUM;
+ dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
if ((hwcaps.ndis_csum.ndis_ip4_txcsum & NDIS_TXCSUM_CAP_UDP4) &&
(hwcaps.ndis_csum.ndis_ip6_txcsum & NDIS_TXCSUM_CAP_UDP6))
- dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_UDP_CKSUM;
+ dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_UDP_CKSUM;
if ((hwcaps.ndis_lsov2.ndis_ip4_encap & NDIS_OFFLOAD_ENCAP_8023) &&
(hwcaps.ndis_lsov2.ndis_ip6_opts & HN_NDIS_LSOV2_CAP_IP6)
== HN_NDIS_LSOV2_CAP_IP6)
- dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
+ dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
- dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_RSS_HASH;
+ dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+ RTE_ETH_RX_OFFLOAD_RSS_HASH;
if (hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_IP4)
- dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_IPV4_CKSUM;
+ dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_IPV4_CKSUM;
if ((hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_TCP4) &&
(hwcaps.ndis_csum.ndis_ip6_rxcsum & NDIS_RXCSUM_CAP_TCP6))
- dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_CKSUM;
+ dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TCP_CKSUM;
if ((hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_UDP4) &&
(hwcaps.ndis_csum.ndis_ip6_rxcsum & NDIS_RXCSUM_CAP_UDP6))
- dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_UDP_CKSUM;
+ dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_UDP_CKSUM;
return 0;
}
dev_info->max_rx_pktlen = (uint32_t)-1;
dev_info->max_rx_queues = dev->data->nb_rx_queues;
dev_info->max_tx_queues = dev->data->nb_tx_queues;
- dev_info->speed_capa = ETH_LINK_SPEED_100G;
+ dev_info->speed_capa = RTE_ETH_LINK_SPEED_100G;
return 0;
}
status.speed = MAC_SPEED_UNKNOWN;
- link.link_speed = ETH_SPEED_NUM_NONE;
- link.link_status = ETH_LINK_DOWN;
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
- link.link_autoneg = ETH_LINK_SPEED_FIXED;
+ link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+ link.link_status = RTE_ETH_LINK_DOWN;
+ link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+ link.link_autoneg = RTE_ETH_LINK_SPEED_FIXED;
if (internals->rxmac[0] != NULL) {
nc_rxmac_read_status(internals->rxmac[0], &status);
switch (status.speed) {
case MAC_SPEED_10G:
- link.link_speed = ETH_SPEED_NUM_10G;
+ link.link_speed = RTE_ETH_SPEED_NUM_10G;
break;
case MAC_SPEED_40G:
- link.link_speed = ETH_SPEED_NUM_40G;
+ link.link_speed = RTE_ETH_SPEED_NUM_40G;
break;
case MAC_SPEED_100G:
- link.link_speed = ETH_SPEED_NUM_100G;
+ link.link_speed = RTE_ETH_SPEED_NUM_100G;
break;
default:
- link.link_speed = ETH_SPEED_NUM_NONE;
+ link.link_speed = RTE_ETH_SPEED_NUM_NONE;
break;
}
}
nc_rxmac_read_status(internals->rxmac[i], &status);
if (status.enabled && status.link_up) {
- link.link_status = ETH_LINK_UP;
+ link.link_status = RTE_ETH_LINK_UP;
break;
}
}
}
/* Timestamps are enabled when there is
* key-value pair: enable_timestamp=1
- * TODO: timestamp should be enabled with DEV_RX_OFFLOAD_TIMESTAMP
+ * TODO: timestamp should be enabled with RTE_ETH_RX_OFFLOAD_TIMESTAMP
*/
if (rte_kvargs_process(kvlist, TIMESTAMP_ARG,
timestamp_check_handler, NULL) < 0) {
rxmode = &dev_conf->rxmode;
txmode = &dev_conf->txmode;
- if (rxmode->mq_mode & ETH_MQ_RX_RSS_FLAG)
- rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+ if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+ rxmode->offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
/* Checking TX mode */
if (txmode->mq_mode) {
}
/* Checking RX mode */
- if (rxmode->mq_mode & ETH_MQ_RX_RSS &&
+ if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS &&
!(hw->cap & NFP_NET_CFG_CTRL_RSS)) {
PMD_INIT_LOG(INFO, "RSS not supported");
return -EINVAL;
rxmode = &dev_conf->rxmode;
txmode = &dev_conf->txmode;
- if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM) {
if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM)
ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
}
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN)
ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
}
hw->mtu = dev->data->mtu;
- if (txmode->offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
+ if (txmode->offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
/* L2 broadcast */
ctrl |= NFP_NET_CFG_CTRL_L2MC;
/* TX checksum offload */
- if (txmode->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM ||
- txmode->offloads & DEV_TX_OFFLOAD_UDP_CKSUM ||
- txmode->offloads & DEV_TX_OFFLOAD_TCP_CKSUM)
+ if (txmode->offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
+ txmode->offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM ||
+ txmode->offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
/* LSO offload */
- if (txmode->offloads & DEV_TX_OFFLOAD_TCP_TSO) {
+ if (txmode->offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO) {
if (hw->cap & NFP_NET_CFG_CTRL_LSO)
ctrl |= NFP_NET_CFG_CTRL_LSO;
else
}
/* RX gather */
- if (txmode->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+ if (txmode->offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
ctrl |= NFP_NET_CFG_CTRL_GATHER;
return ctrl;
int ret;
static const uint32_t ls_to_ethtool[] = {
- [NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED] = ETH_SPEED_NUM_NONE,
- [NFP_NET_CFG_STS_LINK_RATE_UNKNOWN] = ETH_SPEED_NUM_NONE,
- [NFP_NET_CFG_STS_LINK_RATE_1G] = ETH_SPEED_NUM_1G,
- [NFP_NET_CFG_STS_LINK_RATE_10G] = ETH_SPEED_NUM_10G,
- [NFP_NET_CFG_STS_LINK_RATE_25G] = ETH_SPEED_NUM_25G,
- [NFP_NET_CFG_STS_LINK_RATE_40G] = ETH_SPEED_NUM_40G,
- [NFP_NET_CFG_STS_LINK_RATE_50G] = ETH_SPEED_NUM_50G,
- [NFP_NET_CFG_STS_LINK_RATE_100G] = ETH_SPEED_NUM_100G,
+ [NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED] = RTE_ETH_SPEED_NUM_NONE,
+ [NFP_NET_CFG_STS_LINK_RATE_UNKNOWN] = RTE_ETH_SPEED_NUM_NONE,
+ [NFP_NET_CFG_STS_LINK_RATE_1G] = RTE_ETH_SPEED_NUM_1G,
+ [NFP_NET_CFG_STS_LINK_RATE_10G] = RTE_ETH_SPEED_NUM_10G,
+ [NFP_NET_CFG_STS_LINK_RATE_25G] = RTE_ETH_SPEED_NUM_25G,
+ [NFP_NET_CFG_STS_LINK_RATE_40G] = RTE_ETH_SPEED_NUM_40G,
+ [NFP_NET_CFG_STS_LINK_RATE_50G] = RTE_ETH_SPEED_NUM_50G,
+ [NFP_NET_CFG_STS_LINK_RATE_100G] = RTE_ETH_SPEED_NUM_100G,
};
PMD_DRV_LOG(DEBUG, "Link update");
memset(&link, 0, sizeof(struct rte_eth_link));
if (nn_link_status & NFP_NET_CFG_STS_LINK)
- link.link_status = ETH_LINK_UP;
+ link.link_status = RTE_ETH_LINK_UP;
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
nn_link_status = (nn_link_status >> NFP_NET_CFG_STS_LINK_RATE_SHIFT) &
NFP_NET_CFG_STS_LINK_RATE_MASK;
if (nn_link_status >= RTE_DIM(ls_to_ethtool))
- link.link_speed = ETH_SPEED_NUM_NONE;
+ link.link_speed = RTE_ETH_SPEED_NUM_NONE;
else
link.link_speed = ls_to_ethtool[nn_link_status];
dev_info->max_mac_addrs = 1;
if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN)
- dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
+ dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM)
- dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM;
+ dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM;
if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)
- dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
+ dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
if (hw->cap & NFP_NET_CFG_CTRL_TXCSUM)
- dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM;
+ dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
if (hw->cap & NFP_NET_CFG_CTRL_LSO_ANY)
- dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
+ dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
if (hw->cap & NFP_NET_CFG_CTRL_GATHER)
- dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_MULTI_SEGS;
+ dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_thresh = {
};
if (hw->cap & NFP_NET_CFG_CTRL_RSS) {
- dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_RSS_HASH;
+ dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
- dev_info->flow_type_rss_offloads = ETH_RSS_IPV4 |
- ETH_RSS_NONFRAG_IPV4_TCP |
- ETH_RSS_NONFRAG_IPV4_UDP |
- ETH_RSS_IPV6 |
- ETH_RSS_NONFRAG_IPV6_TCP |
- ETH_RSS_NONFRAG_IPV6_UDP;
+ dev_info->flow_type_rss_offloads = RTE_ETH_RSS_IPV4 |
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+ RTE_ETH_RSS_IPV6 |
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP;
dev_info->reta_size = NFP_NET_CFG_RSS_ITBL_SZ;
dev_info->hash_key_size = NFP_NET_CFG_RSS_KEY_SZ;
}
- dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
- ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G |
- ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G;
+ dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G |
+ RTE_ETH_LINK_SPEED_25G | RTE_ETH_LINK_SPEED_40G |
+ RTE_ETH_LINK_SPEED_50G | RTE_ETH_LINK_SPEED_100G;
return 0;
}
if (link.link_status)
PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
dev->data->port_id, link.link_speed,
- link.link_duplex == ETH_LINK_FULL_DUPLEX
+ link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX
? "full-duplex" : "half-duplex");
else
PMD_DRV_LOG(INFO, " Port %d: Link Down",
new_ctrl = 0;
/* Enable vlan strip if it is not configured yet */
- if ((mask & ETH_VLAN_STRIP_OFFLOAD) &&
+ if ((mask & RTE_ETH_VLAN_STRIP_OFFLOAD) &&
!(hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN))
new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_RXVLAN;
/* Disable vlan strip just if it is configured */
- if (!(mask & ETH_VLAN_STRIP_OFFLOAD) &&
+ if (!(mask & RTE_ETH_VLAN_STRIP_OFFLOAD) &&
(hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN))
new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_RXVLAN;
*/
for (i = 0; i < reta_size; i += 4) {
/* Handling 4 RSS entries per loop */
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xF);
if (!mask)
*/
for (i = 0; i < reta_size; i += 4) {
/* Handling 4 RSS entries per loop */
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xF);
if (!mask)
rss_hf = rss_conf->rss_hf;
- if (rss_hf & ETH_RSS_IPV4)
+ if (rss_hf & RTE_ETH_RSS_IPV4)
cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4;
- if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_TCP;
- if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_UDP;
- if (rss_hf & ETH_RSS_IPV6)
+ if (rss_hf & RTE_ETH_RSS_IPV6)
cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6;
- if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_TCP;
- if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_UDP;
cfg_rss_ctrl |= NFP_NET_CFG_RSS_MASK;
cfg_rss_ctrl = nn_cfg_readl(hw, NFP_NET_CFG_RSS_CTRL);
if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4)
- rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_UDP;
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV4_UDP;
if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_TCP)
- rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_TCP)
- rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_UDP)
- rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_UDP)
- rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6)
- rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP;
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP;
/* Propagate current RSS hash functions to caller */
rss_conf->rss_hf = rss_hf;
dev_conf = &dev->data->dev_conf;
rxmode = &dev_conf->rxmode;
- if (rxmode->mq_mode & ETH_MQ_RX_RSS) {
+ if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS) {
nfp_net_rss_config_default(dev);
update |= NFP_NET_CFG_UPDATE_RSS;
new_ctrl |= NFP_NET_CFG_CTRL_RSS;
dev_conf = &dev->data->dev_conf;
rxmode = &dev_conf->rxmode;
- if (rxmode->mq_mode & ETH_MQ_RX_RSS) {
+ if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS) {
nfp_net_rss_config_default(dev);
update |= NFP_NET_CFG_UPDATE_RSS;
new_ctrl |= NFP_NET_CFG_CTRL_RSS;
dev->data->dev_link.link_status = link_up;
link_speeds = &dev->data->dev_conf.link_speeds;
- if (*link_speeds == ETH_LINK_SPEED_AUTONEG)
+ if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG)
negotiate = true;
err = hw->mac.get_link_capabilities(hw, &speed, &negotiate);
allowed_speeds = 0;
if (hw->mac.default_speeds & NGBE_LINK_SPEED_1GB_FULL)
- allowed_speeds |= ETH_LINK_SPEED_1G;
+ allowed_speeds |= RTE_ETH_LINK_SPEED_1G;
if (hw->mac.default_speeds & NGBE_LINK_SPEED_100M_FULL)
- allowed_speeds |= ETH_LINK_SPEED_100M;
+ allowed_speeds |= RTE_ETH_LINK_SPEED_100M;
if (hw->mac.default_speeds & NGBE_LINK_SPEED_10M_FULL)
- allowed_speeds |= ETH_LINK_SPEED_10M;
+ allowed_speeds |= RTE_ETH_LINK_SPEED_10M;
if (*link_speeds & ~allowed_speeds) {
PMD_INIT_LOG(ERR, "Invalid link setting");
}
speed = 0x0;
- if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
+ if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
speed = hw->mac.default_speeds;
} else {
- if (*link_speeds & ETH_LINK_SPEED_1G)
+ if (*link_speeds & RTE_ETH_LINK_SPEED_1G)
speed |= NGBE_LINK_SPEED_1GB_FULL;
- if (*link_speeds & ETH_LINK_SPEED_100M)
+ if (*link_speeds & RTE_ETH_LINK_SPEED_100M)
speed |= NGBE_LINK_SPEED_100M_FULL;
- if (*link_speeds & ETH_LINK_SPEED_10M)
+ if (*link_speeds & RTE_ETH_LINK_SPEED_10M)
speed |= NGBE_LINK_SPEED_10M_FULL;
}
dev_info->rx_desc_lim = rx_desc_lim;
dev_info->tx_desc_lim = tx_desc_lim;
- dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_100M |
- ETH_LINK_SPEED_10M;
+ dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_100M |
+ RTE_ETH_LINK_SPEED_10M;
/* Driver-preferred Rx/Tx parameters */
dev_info->default_rxportconf.burst_size = 32;
int wait = 1;
memset(&link, 0, sizeof(link));
- link.link_status = ETH_LINK_DOWN;
- link.link_speed = ETH_SPEED_NUM_NONE;
- link.link_duplex = ETH_LINK_HALF_DUPLEX;
+ link.link_status = RTE_ETH_LINK_DOWN;
+ link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+ link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
link.link_autoneg = !(dev->data->dev_conf.link_speeds &
- ~ETH_LINK_SPEED_AUTONEG);
+ ~RTE_ETH_LINK_SPEED_AUTONEG);
hw->mac.get_link_status = true;
err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
if (err != 0) {
- link.link_speed = ETH_SPEED_NUM_NONE;
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+ link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
return rte_eth_linkstatus_set(dev, &link);
}
return rte_eth_linkstatus_set(dev, &link);
intr->flags &= ~NGBE_FLAG_NEED_LINK_CONFIG;
- link.link_status = ETH_LINK_UP;
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_status = RTE_ETH_LINK_UP;
+ link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
switch (link_speed) {
default:
case NGBE_LINK_SPEED_UNKNOWN:
- link.link_speed = ETH_SPEED_NUM_NONE;
+ link.link_speed = RTE_ETH_SPEED_NUM_NONE;
break;
case NGBE_LINK_SPEED_10M_FULL:
- link.link_speed = ETH_SPEED_NUM_10M;
+ link.link_speed = RTE_ETH_SPEED_NUM_10M;
lan_speed = 0;
break;
case NGBE_LINK_SPEED_100M_FULL:
- link.link_speed = ETH_SPEED_NUM_100M;
+ link.link_speed = RTE_ETH_SPEED_NUM_100M;
lan_speed = 1;
break;
case NGBE_LINK_SPEED_1GB_FULL:
- link.link_speed = ETH_SPEED_NUM_1G;
+ link.link_speed = RTE_ETH_SPEED_NUM_1G;
lan_speed = 2;
break;
}
rte_eth_linkstatus_get(dev, &link);
- if (link.link_status == ETH_LINK_UP) {
+ if (link.link_status == RTE_ETH_LINK_UP) {
PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
(int)(dev->data->port_id),
(unsigned int)link.link_speed,
- link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+ link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
"full-duplex" : "half-duplex");
} else {
PMD_INIT_LOG(INFO, " Port %d: Link Down",
ngbe_dev_link_update(dev, 0);
/* likely to up */
- if (link.link_status != ETH_LINK_UP)
+ if (link.link_status != RTE_ETH_LINK_UP)
/* handle it 1 sec later, wait it being stable */
timeout = NGBE_LINK_UP_CHECK_TIMEOUT;
/* likely to down */
rte_spinlock_t rss_lock;
uint16_t reta_size;
- struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
- RTE_RETA_GROUP_SIZE];
+ struct rte_eth_rss_reta_entry64 reta_conf[RTE_ETH_RSS_RETA_SIZE_128 /
+ RTE_ETH_RETA_GROUP_SIZE];
uint8_t rss_key[40]; /**< 40-byte hash key. */
};
static struct rte_eth_link pmd_link = {
- .link_speed = ETH_SPEED_NUM_10G,
- .link_duplex = ETH_LINK_FULL_DUPLEX,
- .link_status = ETH_LINK_DOWN,
- .link_autoneg = ETH_LINK_FIXED,
+ .link_speed = RTE_ETH_SPEED_NUM_10G,
+ .link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+ .link_status = RTE_ETH_LINK_DOWN,
+ .link_autoneg = RTE_ETH_LINK_FIXED,
};
RTE_LOG_REGISTER_DEFAULT(eth_null_logtype, NOTICE);
if (dev == NULL)
return -EINVAL;
- dev->data->dev_link.link_status = ETH_LINK_UP;
+ dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
return 0;
}
if (dev == NULL)
return 0;
- dev->data->dev_link.link_status = ETH_LINK_DOWN;
+ dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
return 0;
}
rte_spinlock_lock(&internal->rss_lock);
/* Copy RETA table */
- for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
+ for (i = 0; i < (internal->reta_size / RTE_ETH_RETA_GROUP_SIZE); i++) {
internal->reta_conf[i].mask = reta_conf[i].mask;
- for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+ for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
if ((reta_conf[i].mask >> j) & 0x01)
internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
}
rte_spinlock_lock(&internal->rss_lock);
/* Copy RETA table */
- for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
- for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+ for (i = 0; i < (internal->reta_size / RTE_ETH_RETA_GROUP_SIZE); i++) {
+ for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
if ((reta_conf[i].mask >> j) & 0x01)
reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
}
internals->port_id = eth_dev->data->port_id;
rte_eth_random_addr(internals->eth_addr.addr_bytes);
- internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
- internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
+ internals->flow_type_rss_offloads = RTE_ETH_RSS_PROTO_MASK;
+ internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_ETH_RETA_GROUP_SIZE;
rte_memcpy(internals->rss_key, default_rss_key, 40);
octeontx_log_info("Port %u: Link Up - speed %u Mbps - %s",
(eth_dev->data->port_id),
link->link_speed,
- link->link_duplex == ETH_LINK_FULL_DUPLEX ?
+ link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
"full-duplex" : "half-duplex");
else
octeontx_log_info("Port %d: Link Down",
{
memset(link, 0, sizeof(*link));
- link->link_status = nic->link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
+ link->link_status = nic->link_up ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
switch (nic->speed) {
case OCTEONTX_LINK_SPEED_SGMII:
- link->link_speed = ETH_SPEED_NUM_1G;
+ link->link_speed = RTE_ETH_SPEED_NUM_1G;
break;
case OCTEONTX_LINK_SPEED_XAUI:
- link->link_speed = ETH_SPEED_NUM_10G;
+ link->link_speed = RTE_ETH_SPEED_NUM_10G;
break;
case OCTEONTX_LINK_SPEED_RXAUI:
case OCTEONTX_LINK_SPEED_10G_R:
- link->link_speed = ETH_SPEED_NUM_10G;
+ link->link_speed = RTE_ETH_SPEED_NUM_10G;
break;
case OCTEONTX_LINK_SPEED_QSGMII:
- link->link_speed = ETH_SPEED_NUM_5G;
+ link->link_speed = RTE_ETH_SPEED_NUM_5G;
break;
case OCTEONTX_LINK_SPEED_40G_R:
- link->link_speed = ETH_SPEED_NUM_40G;
+ link->link_speed = RTE_ETH_SPEED_NUM_40G;
break;
case OCTEONTX_LINK_SPEED_RESERVE1:
case OCTEONTX_LINK_SPEED_RESERVE2:
default:
- link->link_speed = ETH_SPEED_NUM_NONE;
+ link->link_speed = RTE_ETH_SPEED_NUM_NONE;
octeontx_log_err("incorrect link speed %d", nic->speed);
break;
}
- link->link_duplex = ETH_LINK_FULL_DUPLEX;
- link->link_autoneg = ETH_LINK_AUTONEG;
+ link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+ link->link_autoneg = RTE_ETH_LINK_AUTONEG;
}
static void
struct octeontx_nic *nic = octeontx_pmd_priv(eth_dev);
uint16_t flags = 0;
- if (nic->tx_offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
- nic->tx_offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
+ if (nic->tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
+ nic->tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
flags |= OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F;
- if (nic->tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM ||
- nic->tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM ||
- nic->tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM ||
- nic->tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM)
+ if (nic->tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
+ nic->tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM ||
+ nic->tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM ||
+ nic->tx_offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)
flags |= OCCTX_TX_OFFLOAD_L3_L4_CSUM_F;
- if (!(nic->tx_offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
+ if (!(nic->tx_offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
flags |= OCCTX_TX_OFFLOAD_MBUF_NOFF_F;
- if (nic->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+ if (nic->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
flags |= OCCTX_TX_MULTI_SEG_F;
return flags;
struct octeontx_nic *nic = octeontx_pmd_priv(eth_dev);
uint16_t flags = 0;
- if (nic->rx_offloads & (DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM))
+ if (nic->rx_offloads & (RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM))
flags |= OCCTX_RX_OFFLOAD_CSUM_F;
- if (nic->rx_offloads & (DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
+ if (nic->rx_offloads & (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM))
flags |= OCCTX_RX_OFFLOAD_CSUM_F;
- if (nic->rx_offloads & DEV_RX_OFFLOAD_SCATTER) {
+ if (nic->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
flags |= OCCTX_RX_MULTI_SEG_F;
eth_dev->data->scattered_rx = 1;
/* If scatter mode is enabled, TX should also be in multi
* seg mode, else memory leak will occur
*/
- nic->tx_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+ nic->tx_offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
}
return flags;
return -EINVAL;
}
- if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
- rxmode->mq_mode != ETH_MQ_RX_RSS) {
+ if (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE &&
+ rxmode->mq_mode != RTE_ETH_MQ_RX_RSS) {
octeontx_log_err("unsupported rx qmode %d", rxmode->mq_mode);
return -EINVAL;
}
- if (!(txmode->offloads & DEV_TX_OFFLOAD_MT_LOCKFREE)) {
+ if (!(txmode->offloads & RTE_ETH_TX_OFFLOAD_MT_LOCKFREE)) {
PMD_INIT_LOG(NOTICE, "cant disable lockfree tx");
- txmode->offloads |= DEV_TX_OFFLOAD_MT_LOCKFREE;
+ txmode->offloads |= RTE_ETH_TX_OFFLOAD_MT_LOCKFREE;
}
- if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
+ if (conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
octeontx_log_err("setting link speed/duplex not supported");
return -EINVAL;
}
* when this feature has not been enabled before.
*/
if (data->dev_started && frame_size > buffsz &&
- !(nic->rx_offloads & DEV_RX_OFFLOAD_SCATTER)) {
+ !(nic->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)) {
octeontx_log_err("Scatter mode is disabled");
return -EINVAL;
}
/* Check <seg size> * <max_seg> >= max_frame */
- if ((nic->rx_offloads & DEV_RX_OFFLOAD_SCATTER) &&
+ if ((nic->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER) &&
(frame_size > buffsz * OCCTX_RX_NB_SEG_MAX))
return -EINVAL;
/* Setup scatter mode if needed by jumbo */
if (data->mtu > buffsz) {
- nic->rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
+ nic->rx_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
nic->rx_offload_flags |= octeontx_rx_offload_flags(eth_dev);
nic->tx_offload_flags |= octeontx_tx_offload_flags(eth_dev);
}
struct octeontx_nic *nic = octeontx_pmd_priv(dev);
/* Autonegotiation may be disabled */
- dev_info->speed_capa = ETH_LINK_SPEED_FIXED;
- dev_info->speed_capa |= ETH_LINK_SPEED_10M | ETH_LINK_SPEED_100M |
- ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
- ETH_LINK_SPEED_40G;
+ dev_info->speed_capa = RTE_ETH_LINK_SPEED_FIXED;
+ dev_info->speed_capa |= RTE_ETH_LINK_SPEED_10M | RTE_ETH_LINK_SPEED_100M |
+ RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G |
+ RTE_ETH_LINK_SPEED_40G;
/* Min/Max MTU supported */
dev_info->min_rx_bufsize = OCCTX_MIN_FRS;
nic->ev_ports = 1;
nic->print_flag = -1;
- data->dev_link.link_status = ETH_LINK_DOWN;
+ data->dev_link.link_status = RTE_ETH_LINK_DOWN;
data->dev_started = 0;
data->promiscuous = 0;
data->all_multicast = 0;
#define OCCTX_MAX_MTU (OCCTX_MAX_FRS - OCCTX_L2_OVERHEAD)
#define OCTEONTX_RX_OFFLOADS ( \
- DEV_RX_OFFLOAD_CHECKSUM | \
- DEV_RX_OFFLOAD_SCTP_CKSUM | \
- DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
- DEV_RX_OFFLOAD_SCATTER | \
- DEV_RX_OFFLOAD_SCATTER | \
- DEV_RX_OFFLOAD_VLAN_FILTER)
+ RTE_ETH_RX_OFFLOAD_CHECKSUM | \
+ RTE_ETH_RX_OFFLOAD_SCTP_CKSUM | \
+ RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
+ RTE_ETH_RX_OFFLOAD_SCATTER | \
+ RTE_ETH_RX_OFFLOAD_SCATTER | \
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
#define OCTEONTX_TX_OFFLOADS ( \
- DEV_TX_OFFLOAD_MBUF_FAST_FREE | \
- DEV_TX_OFFLOAD_MT_LOCKFREE | \
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
- DEV_TX_OFFLOAD_OUTER_UDP_CKSUM | \
- DEV_TX_OFFLOAD_IPV4_CKSUM | \
- DEV_TX_OFFLOAD_TCP_CKSUM | \
- DEV_TX_OFFLOAD_UDP_CKSUM | \
- DEV_TX_OFFLOAD_SCTP_CKSUM | \
- DEV_TX_OFFLOAD_MULTI_SEGS)
+ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE | \
+ RTE_ETH_TX_OFFLOAD_MT_LOCKFREE | \
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
static inline struct octeontx_nic *
octeontx_pmd_priv(struct rte_eth_dev *dev)
rxmode = &dev->data->dev_conf.rxmode;
- if (mask & ETH_VLAN_FILTER_MASK) {
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
+ if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
rc = octeontx_vlan_hw_filter(nic, true);
if (rc)
goto done;
- nic->rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+ nic->rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
nic->rx_offload_flags |= OCCTX_RX_VLAN_FLTR_F;
} else {
rc = octeontx_vlan_hw_filter(nic, false);
if (rc)
goto done;
- nic->rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
+ nic->rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
nic->rx_offload_flags &= ~OCCTX_RX_VLAN_FLTR_F;
}
}
TAILQ_INIT(&nic->vlan_info.fltr_tbl);
- rc = octeontx_dev_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
+ rc = octeontx_dev_vlan_offload_set(dev, RTE_ETH_VLAN_FILTER_MASK);
if (rc)
octeontx_log_err("Failed to set vlan offload rc=%d", rc);
return rc;
if (conf.rx_pause && conf.tx_pause)
- fc_conf->mode = RTE_FC_FULL;
+ fc_conf->mode = RTE_ETH_FC_FULL;
else if (conf.rx_pause)
- fc_conf->mode = RTE_FC_RX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
else if (conf.tx_pause)
- fc_conf->mode = RTE_FC_TX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
else
- fc_conf->mode = RTE_FC_NONE;
+ fc_conf->mode = RTE_ETH_FC_NONE;
/* low_water & high_water values are in Bytes */
fc_conf->low_water = conf.low_water;
return -EINVAL;
}
- rx_pause = (fc_conf->mode == RTE_FC_FULL) ||
- (fc_conf->mode == RTE_FC_RX_PAUSE);
- tx_pause = (fc_conf->mode == RTE_FC_FULL) ||
- (fc_conf->mode == RTE_FC_TX_PAUSE);
+ rx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) ||
+ (fc_conf->mode == RTE_ETH_FC_RX_PAUSE);
+ tx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) ||
+ (fc_conf->mode == RTE_ETH_FC_TX_PAUSE);
conf.high_water = fc_conf->high_water;
conf.low_water = fc_conf->low_water;
if (otx2_dev_is_vf(dev) ||
dev->npc_flow.switch_header_type == OTX2_PRIV_FLAGS_HIGIG)
- capa &= ~DEV_RX_OFFLOAD_TIMESTAMP;
+ capa &= ~RTE_ETH_RX_OFFLOAD_TIMESTAMP;
return capa;
}
/* TSO not supported for earlier chip revisions */
if (otx2_dev_is_96xx_A0(dev) || otx2_dev_is_95xx_Ax(dev))
- capa &= ~(DEV_TX_OFFLOAD_TCP_TSO |
- DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
- DEV_TX_OFFLOAD_GRE_TNL_TSO);
+ capa &= ~(RTE_ETH_TX_OFFLOAD_TCP_TSO |
+ RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO);
return capa;
}
req->npa_func = otx2_npa_pf_func_get();
req->sso_func = otx2_sso_pf_func_get();
req->rx_cfg = BIT_ULL(35 /* DIS_APAD */);
- if (dev->rx_offloads & (DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM)) {
+ if (dev->rx_offloads & (RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM)) {
req->rx_cfg |= BIT_ULL(37 /* CSUM_OL4 */);
req->rx_cfg |= BIT_ULL(36 /* CSUM_IL4 */);
}
aq->rq.sso_ena = 0;
- if (rxq->offloads & DEV_RX_OFFLOAD_SECURITY)
+ if (rxq->offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
aq->rq.ipsech_ena = 1;
aq->rq.cq = qid; /* RQ to CQ 1:1 mapped */
* These are needed in deriving raw clock value from tsc counter.
* read_clock eth op returns raw clock value.
*/
- if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) ||
+ if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) ||
otx2_ethdev_is_ptp_en(dev)) {
rc = otx2_nix_raw_clock_tsc_conv(dev);
if (rc) {
* Maximum three segments can be supported with W8, Choose
* NIX_MAXSQESZ_W16 for multi segment offload.
*/
- if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+ if (txq->offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
return NIX_MAXSQESZ_W16;
else
return NIX_MAXSQESZ_W8;
struct rte_eth_rxmode *rxmode = &conf->rxmode;
uint16_t flags = 0;
- if (rxmode->mq_mode == ETH_MQ_RX_RSS &&
- (dev->rx_offloads & DEV_RX_OFFLOAD_RSS_HASH))
+ if (rxmode->mq_mode == RTE_ETH_MQ_RX_RSS &&
+ (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH))
flags |= NIX_RX_OFFLOAD_RSS_F;
- if (dev->rx_offloads & (DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM))
+ if (dev->rx_offloads & (RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM))
flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
- if (dev->rx_offloads & (DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
+ if (dev->rx_offloads & (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM))
flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
- if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+ if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
flags |= NIX_RX_MULTI_SEG_F;
- if (dev->rx_offloads & (DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_QINQ_STRIP))
+ if (dev->rx_offloads & (RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+ RTE_ETH_RX_OFFLOAD_QINQ_STRIP))
flags |= NIX_RX_OFFLOAD_VLAN_STRIP_F;
- if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+ if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
flags |= NIX_RX_OFFLOAD_TSTAMP_F;
- if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)
+ if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
flags |= NIX_RX_OFFLOAD_SECURITY_F;
if (!dev->ptype_disable)
RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, tx_offload) !=
offsetof(struct rte_mbuf, pool) + 2 * sizeof(void *));
- if (conf & DEV_TX_OFFLOAD_VLAN_INSERT ||
- conf & DEV_TX_OFFLOAD_QINQ_INSERT)
+ if (conf & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
+ conf & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
- if (conf & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
- conf & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
+ if (conf & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
+ conf & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
flags |= NIX_TX_OFFLOAD_OL3_OL4_CSUM_F;
- if (conf & DEV_TX_OFFLOAD_IPV4_CKSUM ||
- conf & DEV_TX_OFFLOAD_TCP_CKSUM ||
- conf & DEV_TX_OFFLOAD_UDP_CKSUM ||
- conf & DEV_TX_OFFLOAD_SCTP_CKSUM)
+ if (conf & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
+ conf & RTE_ETH_TX_OFFLOAD_TCP_CKSUM ||
+ conf & RTE_ETH_TX_OFFLOAD_UDP_CKSUM ||
+ conf & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)
flags |= NIX_TX_OFFLOAD_L3_L4_CSUM_F;
- if (!(conf & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
+ if (!(conf & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
flags |= NIX_TX_OFFLOAD_MBUF_NOFF_F;
- if (conf & DEV_TX_OFFLOAD_MULTI_SEGS)
+ if (conf & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
flags |= NIX_TX_MULTI_SEG_F;
/* Enable Inner checksum for TSO */
- if (conf & DEV_TX_OFFLOAD_TCP_TSO)
+ if (conf & RTE_ETH_TX_OFFLOAD_TCP_TSO)
flags |= (NIX_TX_OFFLOAD_TSO_F |
NIX_TX_OFFLOAD_L3_L4_CSUM_F);
/* Enable Inner and Outer checksum for Tunnel TSO */
- if (conf & (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
- DEV_TX_OFFLOAD_GRE_TNL_TSO))
+ if (conf & (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO))
flags |= (NIX_TX_OFFLOAD_TSO_F |
NIX_TX_OFFLOAD_OL3_OL4_CSUM_F |
NIX_TX_OFFLOAD_L3_L4_CSUM_F);
- if (conf & DEV_TX_OFFLOAD_SECURITY)
+ if (conf & RTE_ETH_TX_OFFLOAD_SECURITY)
flags |= NIX_TX_OFFLOAD_SECURITY_F;
- if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+ if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
flags |= NIX_TX_OFFLOAD_TSTAMP_F;
return flags;
buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
if (eth_dev->data->mtu + (uint32_t)NIX_L2_OVERHEAD > buffsz) {
- dev->rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
- dev->tx_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+ dev->rx_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
+ dev->tx_offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
/* Setting up the rx[tx]_offload_flags due to change
* in rx[tx]_offloads.
goto fail_configure;
}
- if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
- rxmode->mq_mode != ETH_MQ_RX_RSS) {
+ if (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE &&
+ rxmode->mq_mode != RTE_ETH_MQ_RX_RSS) {
otx2_err("Unsupported mq rx mode %d", rxmode->mq_mode);
goto fail_configure;
}
- if (txmode->mq_mode != ETH_MQ_TX_NONE) {
+ if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) {
otx2_err("Unsupported mq tx mode %d", txmode->mq_mode);
goto fail_configure;
}
if (otx2_dev_is_Ax(dev) &&
- (txmode->offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) &&
- ((txmode->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
- (txmode->offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM))) {
+ (txmode->offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) &&
+ ((txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
+ (txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM))) {
otx2_err("Outer IP and SCTP checksum unsupported");
goto fail_configure;
}
* enabled in PF owning this VF
*/
memset(&dev->tstamp, 0, sizeof(struct otx2_timesync_info));
- if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) ||
+ if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) ||
otx2_ethdev_is_ptp_en(dev))
otx2_nix_timesync_enable(eth_dev);
else
rc = otx2_eth_sec_ctx_create(eth_dev);
if (rc)
goto free_mac_addrs;
- dev->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
- dev->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY;
+ dev->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_SECURITY;
+ dev->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_SECURITY;
/* Initialize rte-flow */
rc = otx2_flow_init(dev);
#define CQ_TIMER_THRESH_DEFAULT 0xAULL /* ~1usec i.e (0xA * 100nsec) */
#define CQ_TIMER_THRESH_MAX 255
-#define NIX_RSS_L3_L4_SRC_DST (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY \
- | ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)
+#define NIX_RSS_L3_L4_SRC_DST (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY \
+ | RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY)
-#define NIX_RSS_OFFLOAD (ETH_RSS_PORT | ETH_RSS_IP | ETH_RSS_UDP |\
- ETH_RSS_TCP | ETH_RSS_SCTP | \
- ETH_RSS_TUNNEL | ETH_RSS_L2_PAYLOAD | \
- NIX_RSS_L3_L4_SRC_DST | ETH_RSS_LEVEL_MASK | \
- ETH_RSS_C_VLAN)
+#define NIX_RSS_OFFLOAD (RTE_ETH_RSS_PORT | RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP |\
+ RTE_ETH_RSS_TCP | RTE_ETH_RSS_SCTP | \
+ RTE_ETH_RSS_TUNNEL | RTE_ETH_RSS_L2_PAYLOAD | \
+ NIX_RSS_L3_L4_SRC_DST | RTE_ETH_RSS_LEVEL_MASK | \
+ RTE_ETH_RSS_C_VLAN)
#define NIX_TX_OFFLOAD_CAPA ( \
- DEV_TX_OFFLOAD_MBUF_FAST_FREE | \
- DEV_TX_OFFLOAD_MT_LOCKFREE | \
- DEV_TX_OFFLOAD_VLAN_INSERT | \
- DEV_TX_OFFLOAD_QINQ_INSERT | \
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
- DEV_TX_OFFLOAD_OUTER_UDP_CKSUM | \
- DEV_TX_OFFLOAD_TCP_CKSUM | \
- DEV_TX_OFFLOAD_UDP_CKSUM | \
- DEV_TX_OFFLOAD_SCTP_CKSUM | \
- DEV_TX_OFFLOAD_TCP_TSO | \
- DEV_TX_OFFLOAD_VXLAN_TNL_TSO | \
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO | \
- DEV_TX_OFFLOAD_GRE_TNL_TSO | \
- DEV_TX_OFFLOAD_MULTI_SEGS | \
- DEV_TX_OFFLOAD_IPV4_CKSUM)
+ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE | \
+ RTE_ETH_TX_OFFLOAD_MT_LOCKFREE | \
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT | \
+ RTE_ETH_TX_OFFLOAD_QINQ_INSERT | \
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_TCP_TSO | \
+ RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | \
+ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | \
+ RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | \
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS | \
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
#define NIX_RX_OFFLOAD_CAPA ( \
- DEV_RX_OFFLOAD_CHECKSUM | \
- DEV_RX_OFFLOAD_SCTP_CKSUM | \
- DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
- DEV_RX_OFFLOAD_SCATTER | \
- DEV_RX_OFFLOAD_OUTER_UDP_CKSUM | \
- DEV_RX_OFFLOAD_VLAN_STRIP | \
- DEV_RX_OFFLOAD_VLAN_FILTER | \
- DEV_RX_OFFLOAD_QINQ_STRIP | \
- DEV_RX_OFFLOAD_TIMESTAMP | \
- DEV_RX_OFFLOAD_RSS_HASH)
+ RTE_ETH_RX_OFFLOAD_CHECKSUM | \
+ RTE_ETH_RX_OFFLOAD_SCTP_CKSUM | \
+ RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
+ RTE_ETH_RX_OFFLOAD_SCATTER | \
+ RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM | \
+ RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER | \
+ RTE_ETH_RX_OFFLOAD_QINQ_STRIP | \
+ RTE_ETH_RX_OFFLOAD_TIMESTAMP | \
+ RTE_ETH_RX_OFFLOAD_RSS_HASH)
#define NIX_DEFAULT_RSS_CTX_GROUP 0
#define NIX_DEFAULT_RSS_MCAM_IDX -1
val = atoi(value);
- if (val <= ETH_RSS_RETA_SIZE_64)
- val = ETH_RSS_RETA_SIZE_64;
- else if (val > ETH_RSS_RETA_SIZE_64 && val <= ETH_RSS_RETA_SIZE_128)
- val = ETH_RSS_RETA_SIZE_128;
- else if (val > ETH_RSS_RETA_SIZE_128 && val <= ETH_RSS_RETA_SIZE_256)
- val = ETH_RSS_RETA_SIZE_256;
+ if (val <= RTE_ETH_RSS_RETA_SIZE_64)
+ val = RTE_ETH_RSS_RETA_SIZE_64;
+ else if (val > RTE_ETH_RSS_RETA_SIZE_64 && val <= RTE_ETH_RSS_RETA_SIZE_128)
+ val = RTE_ETH_RSS_RETA_SIZE_128;
+ else if (val > RTE_ETH_RSS_RETA_SIZE_128 && val <= RTE_ETH_RSS_RETA_SIZE_256)
+ val = RTE_ETH_RSS_RETA_SIZE_256;
else
val = NIX_RSS_RETA_SIZE;
* when this feature has not been enabled before.
*/
if (data->dev_started && frame_size > buffsz &&
- !(dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER))
+ !(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER))
return -EINVAL;
/* Check <seg size> * <max_seg> >= max_frame */
- if ((dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER) &&
+ if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER) &&
(frame_size > buffsz * NIX_RX_NB_SEG_MAX))
return -EINVAL;
};
/* Auto negotiation disabled */
- devinfo->speed_capa = ETH_LINK_SPEED_FIXED;
+ devinfo->speed_capa = RTE_ETH_LINK_SPEED_FIXED;
if (!otx2_dev_is_vf_or_sdp(dev) && !otx2_dev_is_lbk(dev)) {
- devinfo->speed_capa |= ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
- ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G;
+ devinfo->speed_capa |= RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G |
+ RTE_ETH_LINK_SPEED_25G | RTE_ETH_LINK_SPEED_40G;
/* 50G and 100G to be supported for board version C0
* and above.
*/
if (!otx2_dev_is_Ax(dev))
- devinfo->speed_capa |= ETH_LINK_SPEED_50G |
- ETH_LINK_SPEED_100G;
+ devinfo->speed_capa |= RTE_ETH_LINK_SPEED_50G |
+ RTE_ETH_LINK_SPEED_100G;
}
devinfo->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
RTE_BUILD_BUG_ON(sa_width < 32 || sa_width > 512 ||
!RTE_IS_POWER_OF_2(sa_width));
- if (!(dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY) &&
- !(dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY))
+ if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY) &&
+ !(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY))
return 0;
if (rte_security_dynfield_register() < 0)
uint16_t port = eth_dev->data->port_id;
char name[RTE_MEMZONE_NAMESIZE];
- if (!(dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY) &&
- !(dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY))
+ if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY) &&
+ !(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY))
return;
lookup_mem_sa_tbl_clear(eth_dev);
goto err_exit;
}
- if (hw->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+ if (hw->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
rc = flow_update_sec_tt(dev, actions);
if (rc != 0) {
rte_flow_error_set(error, EIO,
int rc;
if (otx2_dev_is_lbk(dev)) {
- fc_conf->mode = RTE_FC_NONE;
+ fc_conf->mode = RTE_ETH_FC_NONE;
return 0;
}
goto done;
if (rsp->rx_pause && rsp->tx_pause)
- fc_conf->mode = RTE_FC_FULL;
+ fc_conf->mode = RTE_ETH_FC_FULL;
else if (rsp->rx_pause)
- fc_conf->mode = RTE_FC_RX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
else if (rsp->tx_pause)
- fc_conf->mode = RTE_FC_TX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
else
- fc_conf->mode = RTE_FC_NONE;
+ fc_conf->mode = RTE_ETH_FC_NONE;
done:
return rc;
if (fc_conf->mode == fc->mode)
return 0;
- rx_pause = (fc_conf->mode == RTE_FC_FULL) ||
- (fc_conf->mode == RTE_FC_RX_PAUSE);
- tx_pause = (fc_conf->mode == RTE_FC_FULL) ||
- (fc_conf->mode == RTE_FC_TX_PAUSE);
+ rx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) ||
+ (fc_conf->mode == RTE_ETH_FC_RX_PAUSE);
+ tx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) ||
+ (fc_conf->mode == RTE_ETH_FC_TX_PAUSE);
/* Check if TX pause frame is already enabled or not */
if (fc->tx_pause ^ tx_pause) {
/* To avoid Link credit deadlock on Ax, disable Tx FC if it's enabled */
if (otx2_dev_is_Ax(dev) &&
(dev->npc_flow.switch_header_type != OTX2_PRIV_FLAGS_HIGIG) &&
- (fc_conf.mode == RTE_FC_FULL || fc_conf.mode == RTE_FC_RX_PAUSE)) {
+ (fc_conf.mode == RTE_ETH_FC_FULL || fc_conf.mode == RTE_ETH_FC_RX_PAUSE)) {
fc_conf.mode =
- (fc_conf.mode == RTE_FC_FULL ||
- fc_conf.mode == RTE_FC_TX_PAUSE) ?
- RTE_FC_TX_PAUSE : RTE_FC_NONE;
+ (fc_conf.mode == RTE_ETH_FC_FULL ||
+ fc_conf.mode == RTE_ETH_FC_TX_PAUSE) ?
+ RTE_ETH_FC_TX_PAUSE : RTE_ETH_FC_NONE;
}
return otx2_nix_flow_ctrl_set(eth_dev, &fc_conf);
return 0;
memset(&fc_conf, 0, sizeof(struct rte_eth_fc_conf));
- /* Both Rx & Tx flow ctrl get enabled(RTE_FC_FULL) in HW
+ /* Both Rx & Tx flow ctrl get enabled(RTE_ETH_FC_FULL) in HW
* by AF driver, update those info in PMD structure.
*/
rc = otx2_nix_flow_ctrl_get(eth_dev, &fc_conf);
goto exit;
fc->mode = fc_conf.mode;
- fc->rx_pause = (fc_conf.mode == RTE_FC_FULL) ||
- (fc_conf.mode == RTE_FC_RX_PAUSE);
- fc->tx_pause = (fc_conf.mode == RTE_FC_FULL) ||
- (fc_conf.mode == RTE_FC_TX_PAUSE);
+ fc->rx_pause = (fc_conf.mode == RTE_ETH_FC_FULL) ||
+ (fc_conf.mode == RTE_ETH_FC_RX_PAUSE);
+ fc->tx_pause = (fc_conf.mode == RTE_ETH_FC_FULL) ||
+ (fc_conf.mode == RTE_ETH_FC_TX_PAUSE);
exit:
return rc;
attr, "No support of RSS in egress");
}
- if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS)
+ if (dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_RSS)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
act, "multi-queue mode is disabled");
*FLOW_KEY_ALG index. So, till we update the action with
*flow_key_alg index, set the action to drop.
*/
- if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
+ if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS)
flow->npc_action = NIX_RX_ACTIONOP_DROP;
else
flow->npc_action = NIX_RX_ACTIONOP_UCAST;
otx2_info("Port %d: Link Up - speed %u Mbps - %s",
(int)(eth_dev->data->port_id),
(uint32_t)link->link_speed,
- link->link_duplex == ETH_LINK_FULL_DUPLEX ?
+ link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
"full-duplex" : "half-duplex");
else
otx2_info("Port %d: Link Down", (int)(eth_dev->data->port_id));
eth_link.link_status = link->link_up;
eth_link.link_speed = link->speed;
- eth_link.link_autoneg = ETH_LINK_AUTONEG;
+ eth_link.link_autoneg = RTE_ETH_LINK_AUTONEG;
eth_link.link_duplex = link->full_duplex;
otx2_dev->speed = link->speed;
static int
lbk_link_update(struct rte_eth_link *link)
{
- link->link_status = ETH_LINK_UP;
- link->link_speed = ETH_SPEED_NUM_100G;
- link->link_autoneg = ETH_LINK_FIXED;
- link->link_duplex = ETH_LINK_FULL_DUPLEX;
+ link->link_status = RTE_ETH_LINK_UP;
+ link->link_speed = RTE_ETH_SPEED_NUM_100G;
+ link->link_autoneg = RTE_ETH_LINK_FIXED;
+ link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
return 0;
}
link->link_status = rsp->link_info.link_up;
link->link_speed = rsp->link_info.speed;
- link->link_autoneg = ETH_LINK_AUTONEG;
+ link->link_autoneg = RTE_ETH_LINK_AUTONEG;
if (rsp->link_info.full_duplex)
link->link_duplex = rsp->link_info.full_duplex;
/* 50G and 100G to be supported for board version C0 and above */
if (!otx2_dev_is_Ax(dev)) {
- if (link_speeds & ETH_LINK_SPEED_100G)
+ if (link_speeds & RTE_ETH_LINK_SPEED_100G)
link_speed = 100000;
- if (link_speeds & ETH_LINK_SPEED_50G)
+ if (link_speeds & RTE_ETH_LINK_SPEED_50G)
link_speed = 50000;
}
- if (link_speeds & ETH_LINK_SPEED_40G)
+ if (link_speeds & RTE_ETH_LINK_SPEED_40G)
link_speed = 40000;
- if (link_speeds & ETH_LINK_SPEED_25G)
+ if (link_speeds & RTE_ETH_LINK_SPEED_25G)
link_speed = 25000;
- if (link_speeds & ETH_LINK_SPEED_20G)
+ if (link_speeds & RTE_ETH_LINK_SPEED_20G)
link_speed = 20000;
- if (link_speeds & ETH_LINK_SPEED_10G)
+ if (link_speeds & RTE_ETH_LINK_SPEED_10G)
link_speed = 10000;
- if (link_speeds & ETH_LINK_SPEED_5G)
+ if (link_speeds & RTE_ETH_LINK_SPEED_5G)
link_speed = 5000;
- if (link_speeds & ETH_LINK_SPEED_1G)
+ if (link_speeds & RTE_ETH_LINK_SPEED_1G)
link_speed = 1000;
return link_speed;
static inline uint8_t
nix_parse_eth_link_duplex(uint32_t link_speeds)
{
- if ((link_speeds & ETH_LINK_SPEED_10M_HD) ||
- (link_speeds & ETH_LINK_SPEED_100M_HD))
- return ETH_LINK_HALF_DUPLEX;
+ if ((link_speeds & RTE_ETH_LINK_SPEED_10M_HD) ||
+ (link_speeds & RTE_ETH_LINK_SPEED_100M_HD))
+ return RTE_ETH_LINK_HALF_DUPLEX;
else
- return ETH_LINK_FULL_DUPLEX;
+ return RTE_ETH_LINK_FULL_DUPLEX;
}
int
cfg.speed = nix_parse_link_speeds(dev, conf->link_speeds);
if (cfg.speed != SPEED_NONE && cfg.speed != dev->speed) {
cfg.duplex = nix_parse_eth_link_duplex(conf->link_speeds);
- cfg.an = (conf->link_speeds & ETH_LINK_SPEED_FIXED) == 0;
+ cfg.an = (conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) == 0;
return cgx_change_mode(dev, &cfg);
}
action = NIX_RX_ACTIONOP_UCAST;
- if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
+ if (eth_dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
action = NIX_RX_ACTIONOP_RSS;
action |= (uint64_t)(dev->rss_info.alg_idx) << 56;
}
/* System time should be already on by default */
nix_start_timecounters(eth_dev);
- dev->rx_offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
+ dev->rx_offloads |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
dev->rx_offload_flags |= NIX_RX_OFFLOAD_TSTAMP_F;
dev->tx_offload_flags |= NIX_TX_OFFLOAD_TSTAMP_F;
if (otx2_dev_is_vf_or_sdp(dev) || otx2_dev_is_lbk(dev))
return -EINVAL;
- dev->rx_offloads &= ~DEV_RX_OFFLOAD_TIMESTAMP;
+ dev->rx_offloads &= ~RTE_ETH_RX_OFFLOAD_TIMESTAMP;
dev->rx_offload_flags &= ~NIX_RX_OFFLOAD_TSTAMP_F;
dev->tx_offload_flags &= ~NIX_TX_OFFLOAD_TSTAMP_F;
}
/* Copy RETA table */
- for (i = 0; i < (dev->rss_info.rss_size / RTE_RETA_GROUP_SIZE); i++) {
- for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) {
+ for (i = 0; i < (dev->rss_info.rss_size / RTE_ETH_RETA_GROUP_SIZE); i++) {
+ for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) {
if ((reta_conf[i].mask >> j) & 0x01)
rss->ind_tbl[idx] = reta_conf[i].reta[j];
idx++;
}
/* Copy RETA table */
- for (i = 0; i < (dev->rss_info.rss_size / RTE_RETA_GROUP_SIZE); i++) {
- for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+ for (i = 0; i < (dev->rss_info.rss_size / RTE_ETH_RETA_GROUP_SIZE); i++) {
+ for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
if ((reta_conf[i].mask >> j) & 0x01)
reta_conf[i].reta[j] = rss->ind_tbl[j];
}
}
#define RSS_IPV4_ENABLE ( \
- ETH_RSS_IPV4 | \
- ETH_RSS_FRAG_IPV4 | \
- ETH_RSS_NONFRAG_IPV4_UDP | \
- ETH_RSS_NONFRAG_IPV4_TCP | \
- ETH_RSS_NONFRAG_IPV4_SCTP)
+ RTE_ETH_RSS_IPV4 | \
+ RTE_ETH_RSS_FRAG_IPV4 | \
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_SCTP)
#define RSS_IPV6_ENABLE ( \
- ETH_RSS_IPV6 | \
- ETH_RSS_FRAG_IPV6 | \
- ETH_RSS_NONFRAG_IPV6_UDP | \
- ETH_RSS_NONFRAG_IPV6_TCP | \
- ETH_RSS_NONFRAG_IPV6_SCTP)
+ RTE_ETH_RSS_IPV6 | \
+ RTE_ETH_RSS_FRAG_IPV6 | \
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
#define RSS_IPV6_EX_ENABLE ( \
- ETH_RSS_IPV6_EX | \
- ETH_RSS_IPV6_TCP_EX | \
- ETH_RSS_IPV6_UDP_EX)
+ RTE_ETH_RSS_IPV6_EX | \
+ RTE_ETH_RSS_IPV6_TCP_EX | \
+ RTE_ETH_RSS_IPV6_UDP_EX)
#define RSS_MAX_LEVELS 3
dev->rss_info.nix_rss = ethdev_rss;
- if (ethdev_rss & ETH_RSS_L2_PAYLOAD &&
+ if (ethdev_rss & RTE_ETH_RSS_L2_PAYLOAD &&
dev->npc_flow.switch_header_type == OTX2_PRIV_FLAGS_CH_LEN_90B) {
flowkey_cfg |= FLOW_KEY_TYPE_CH_LEN_90B;
}
- if (ethdev_rss & ETH_RSS_C_VLAN)
+ if (ethdev_rss & RTE_ETH_RSS_C_VLAN)
flowkey_cfg |= FLOW_KEY_TYPE_VLAN;
- if (ethdev_rss & ETH_RSS_L3_SRC_ONLY)
+ if (ethdev_rss & RTE_ETH_RSS_L3_SRC_ONLY)
flowkey_cfg |= FLOW_KEY_TYPE_L3_SRC;
- if (ethdev_rss & ETH_RSS_L3_DST_ONLY)
+ if (ethdev_rss & RTE_ETH_RSS_L3_DST_ONLY)
flowkey_cfg |= FLOW_KEY_TYPE_L3_DST;
- if (ethdev_rss & ETH_RSS_L4_SRC_ONLY)
+ if (ethdev_rss & RTE_ETH_RSS_L4_SRC_ONLY)
flowkey_cfg |= FLOW_KEY_TYPE_L4_SRC;
- if (ethdev_rss & ETH_RSS_L4_DST_ONLY)
+ if (ethdev_rss & RTE_ETH_RSS_L4_DST_ONLY)
flowkey_cfg |= FLOW_KEY_TYPE_L4_DST;
if (ethdev_rss & RSS_IPV4_ENABLE)
if (ethdev_rss & RSS_IPV6_ENABLE)
flowkey_cfg |= flow_key_type[rss_level][RSS_IPV6_INDEX];
- if (ethdev_rss & ETH_RSS_TCP)
+ if (ethdev_rss & RTE_ETH_RSS_TCP)
flowkey_cfg |= flow_key_type[rss_level][RSS_TCP_INDEX];
- if (ethdev_rss & ETH_RSS_UDP)
+ if (ethdev_rss & RTE_ETH_RSS_UDP)
flowkey_cfg |= flow_key_type[rss_level][RSS_UDP_INDEX];
- if (ethdev_rss & ETH_RSS_SCTP)
+ if (ethdev_rss & RTE_ETH_RSS_SCTP)
flowkey_cfg |= flow_key_type[rss_level][RSS_SCTP_INDEX];
- if (ethdev_rss & ETH_RSS_L2_PAYLOAD)
+ if (ethdev_rss & RTE_ETH_RSS_L2_PAYLOAD)
flowkey_cfg |= flow_key_type[rss_level][RSS_DMAC_INDEX];
if (ethdev_rss & RSS_IPV6_EX_ENABLE)
flowkey_cfg |= FLOW_KEY_TYPE_IPV6_EXT;
- if (ethdev_rss & ETH_RSS_PORT)
+ if (ethdev_rss & RTE_ETH_RSS_PORT)
flowkey_cfg |= FLOW_KEY_TYPE_PORT;
- if (ethdev_rss & ETH_RSS_NVGRE)
+ if (ethdev_rss & RTE_ETH_RSS_NVGRE)
flowkey_cfg |= FLOW_KEY_TYPE_NVGRE;
- if (ethdev_rss & ETH_RSS_VXLAN)
+ if (ethdev_rss & RTE_ETH_RSS_VXLAN)
flowkey_cfg |= FLOW_KEY_TYPE_VXLAN;
- if (ethdev_rss & ETH_RSS_GENEVE)
+ if (ethdev_rss & RTE_ETH_RSS_GENEVE)
flowkey_cfg |= FLOW_KEY_TYPE_GENEVE;
- if (ethdev_rss & ETH_RSS_GTPU)
+ if (ethdev_rss & RTE_ETH_RSS_GTPU)
flowkey_cfg |= FLOW_KEY_TYPE_GTPU;
return flowkey_cfg;
otx2_nix_rss_set_key(dev, rss_conf->rss_key,
(uint32_t)rss_conf->rss_key_len);
- rss_hash_level = ETH_RSS_LEVEL(rss_conf->rss_hf);
+ rss_hash_level = RTE_ETH_RSS_LEVEL(rss_conf->rss_hf);
if (rss_hash_level)
rss_hash_level -= 1;
flowkey_cfg =
int rc;
/* Skip further configuration if selected mode is not RSS */
- if (eth_dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS || !qcnt)
+ if (eth_dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_RSS || !qcnt)
return 0;
/* Update default RSS key and cfg */
}
rss_hf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
- rss_hash_level = ETH_RSS_LEVEL(rss_hf);
+ rss_hash_level = RTE_ETH_RSS_LEVEL(rss_hf);
if (rss_hash_level)
rss_hash_level -= 1;
flowkey_cfg = otx2_rss_ethdev_to_nix(dev, rss_hf, rss_hash_level);
/* For PTP enabled, scalar rx function should be chosen as most of the
* PTP apps are implemented to rx burst 1 pkt.
*/
- if (dev->scalar_ena || dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)
+ if (dev->scalar_ena || dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
pick_rx_func(eth_dev, nix_eth_rx_burst);
else
pick_rx_func(eth_dev, nix_eth_rx_vec_burst);
- if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+ if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
pick_rx_func(eth_dev, nix_eth_rx_burst_mseg);
/* Copy multi seg version with no offload for tear down sequence */
else
pick_tx_func(eth_dev, nix_eth_tx_vec_burst);
- if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+ if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
pick_tx_func(eth_dev, nix_eth_tx_burst_mseg);
rte_mb();
action = NIX_RX_ACTIONOP_UCAST;
- if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
+ if (eth_dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
action = NIX_RX_ACTIONOP_RSS;
action |= (uint64_t)(dev->rss_info.alg_idx) << 56;
}
* Take offset from LA since in case of untagged packet,
* lbptr is zero.
*/
- if (type == ETH_VLAN_TYPE_OUTER) {
+ if (type == RTE_ETH_VLAN_TYPE_OUTER) {
vtag_action.act.vtag0_def = vtag_index;
vtag_action.act.vtag0_lid = NPC_LID_LA;
vtag_action.act.vtag0_op = NIX_TX_VTAGOP_INSERT;
if (vlan->strip_on ||
(vlan->qinq_on && !vlan->qinq_before_def)) {
if (eth_dev->data->dev_conf.rxmode.mq_mode ==
- ETH_MQ_RX_RSS)
+ RTE_ETH_MQ_RX_RSS)
vlan->def_rx_mcam_ent.action |=
NIX_RX_ACTIONOP_RSS;
else
rxmode = ð_dev->data->dev_conf.rxmode;
- if (mask & ETH_VLAN_STRIP_MASK) {
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
- offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
+ offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
rc = nix_vlan_hw_strip(eth_dev, true);
} else {
- offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+ offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
rc = nix_vlan_hw_strip(eth_dev, false);
}
if (rc)
goto done;
}
- if (mask & ETH_VLAN_FILTER_MASK) {
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
- offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+ if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
+ offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
rc = nix_vlan_hw_filter(eth_dev, true, 0);
} else {
- offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
+ offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
rc = nix_vlan_hw_filter(eth_dev, false, 0);
}
if (rc)
goto done;
}
- if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP) {
if (!dev->vlan_info.qinq_on) {
- offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
+ offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
rc = otx2_nix_config_double_vlan(eth_dev, true);
if (rc)
goto done;
}
} else {
if (dev->vlan_info.qinq_on) {
- offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
+ offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
rc = otx2_nix_config_double_vlan(eth_dev, false);
if (rc)
goto done;
}
}
- if (offloads & (DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_QINQ_STRIP)) {
+ if (offloads & (RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+ RTE_ETH_RX_OFFLOAD_QINQ_STRIP)) {
dev->rx_offloads |= offloads;
dev->rx_offload_flags |= NIX_RX_OFFLOAD_VLAN_STRIP_F;
otx2_eth_set_rx_function(eth_dev);
tpid_cfg = otx2_mbox_alloc_msg_nix_set_vlan_tpid(mbox);
tpid_cfg->tpid = tpid;
- if (type == ETH_VLAN_TYPE_OUTER)
+ if (type == RTE_ETH_VLAN_TYPE_OUTER)
tpid_cfg->vlan_type = NIX_VLAN_TYPE_OUTER;
else
tpid_cfg->vlan_type = NIX_VLAN_TYPE_INNER;
if (rc)
return rc;
- if (type == ETH_VLAN_TYPE_OUTER)
+ if (type == RTE_ETH_VLAN_TYPE_OUTER)
dev->vlan_info.outer_vlan_tpid = tpid;
else
dev->vlan_info.inner_vlan_tpid = tpid;
vlan->outer_vlan_idx = 0;
}
- rc = nix_vlan_handle_default_tx_entry(dev, ETH_VLAN_TYPE_OUTER,
+ rc = nix_vlan_handle_default_tx_entry(dev, RTE_ETH_VLAN_TYPE_OUTER,
vtag_index, on);
if (rc < 0) {
printf("Default tx entry failed with rc %d\n", rc);
} else {
/* Reinstall all mcam entries now if filter offload is set */
if (eth_dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_VLAN_FILTER)
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
nix_vlan_reinstall_vlan_filters(eth_dev);
}
mask =
- ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK;
+ RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK;
rc = otx2_nix_vlan_offload_set(eth_dev, mask);
if (rc) {
otx2_err("Failed to set vlan offload rc=%d", rc);
otx_epvf = OTX_EP_DEV(eth_dev);
- devinfo->speed_capa = ETH_LINK_SPEED_10G;
+ devinfo->speed_capa = RTE_ETH_LINK_SPEED_10G;
devinfo->max_rx_queues = otx_epvf->max_rx_queues;
devinfo->max_tx_queues = otx_epvf->max_tx_queues;
devinfo->min_rx_bufsize = OTX_EP_MIN_RX_BUF_SIZE;
devinfo->max_rx_pktlen = OTX_EP_MAX_PKT_SZ;
- devinfo->rx_offload_capa = DEV_RX_OFFLOAD_SCATTER;
- devinfo->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS;
+ devinfo->rx_offload_capa = RTE_ETH_RX_OFFLOAD_SCATTER;
+ devinfo->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
devinfo->max_mac_addrs = OTX_EP_MAX_MAC_ADDRS;
struct otx_ep_buf_free_info *finfo;
int j, frags, num_sg;
- if (!(otx_ep->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS))
+ if (!(otx_ep->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS))
goto xmit_fail;
finfo = (struct otx_ep_buf_free_info *)rte_malloc(NULL,
struct otx_ep_buf_free_info *finfo;
int j, frags, num_sg;
- if (!(otx_ep->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS))
+ if (!(otx_ep->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS))
goto xmit_fail;
finfo = (struct otx_ep_buf_free_info *)
droq_pkt->l4_len = hdr_lens.l4_len;
if (droq_pkt->nb_segs > 1 &&
- !(otx_ep->rx_offloads & DEV_RX_OFFLOAD_SCATTER)) {
+ !(otx_ep->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)) {
rte_pktmbuf_free(droq_pkt);
goto oq_read_fail;
}
};
static struct rte_eth_link pmd_link = {
- .link_speed = ETH_SPEED_NUM_10G,
- .link_duplex = ETH_LINK_FULL_DUPLEX,
- .link_status = ETH_LINK_DOWN,
- .link_autoneg = ETH_LINK_FIXED,
+ .link_speed = RTE_ETH_SPEED_NUM_10G,
+ .link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+ .link_status = RTE_ETH_LINK_DOWN,
+ .link_autoneg = RTE_ETH_LINK_FIXED,
};
RTE_LOG_REGISTER_DEFAULT(eth_pcap_logtype, NOTICE);
for (i = 0; i < dev->data->nb_tx_queues; i++)
dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
- dev->data->dev_link.link_status = ETH_LINK_UP;
+ dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
return 0;
}
for (i = 0; i < dev->data->nb_tx_queues; i++)
dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
- dev->data->dev_link.link_status = ETH_LINK_DOWN;
+ dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
return 0;
}
static struct pfe *g_pfe;
/* Supported Rx offloads */
static uint64_t dev_rx_offloads_sup =
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM;
+ RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM;
/* Supported Tx offloads */
static uint64_t dev_tx_offloads_sup =
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM;
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
/* TODO: make pfe_svr a runtime option.
* Driver should be able to get the SVR
}
link.link_status = lstatus;
- link.link_speed = ETH_LINK_SPEED_1G;
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
- link.link_autoneg = ETH_LINK_AUTONEG;
+ link.link_speed = RTE_ETH_LINK_SPEED_1G;
+ link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+ link.link_autoneg = RTE_ETH_LINK_AUTONEG;
pfe_eth_atomic_write_link_status(dev, &link);
struct eth_phy_cfg {
/* 0 = autoneg, 1000/10000/20000/25000/40000/50000/100000 */
u32 speed;
-#define ETH_SPEED_AUTONEG 0
-#define ETH_SPEED_SMARTLINQ 0x8 /* deprecated - use link_modes field instead */
+#define RTE_ETH_SPEED_AUTONEG 0
+#define RTE_ETH_SPEED_SMARTLINQ 0x8 /* deprecated - use link_modes field instead */
u32 pause; /* bitmask */
#define ETH_PAUSE_NONE 0x0
}
use_tx_offload = !!(tx_offloads &
- (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | /* tunnel */
- DEV_TX_OFFLOAD_TCP_TSO | /* tso */
- DEV_TX_OFFLOAD_VLAN_INSERT)); /* vlan insert */
+ (RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | /* tunnel */
+ RTE_ETH_TX_OFFLOAD_TCP_TSO | /* tso */
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT)); /* vlan insert */
if (use_tx_offload) {
DP_INFO(edev, "Assigning qede_xmit_pkts\n");
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
- if (mask & ETH_VLAN_STRIP_MASK) {
- if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
(void)qede_vlan_stripping(eth_dev, 1);
else
(void)qede_vlan_stripping(eth_dev, 0);
}
- if (mask & ETH_VLAN_FILTER_MASK) {
+ if (mask & RTE_ETH_VLAN_FILTER_MASK) {
/* VLAN filtering kicks in when a VLAN is added */
- if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
qede_vlan_filter_set(eth_dev, 0, 1);
} else {
if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */
* enabled
*/
eth_dev->data->dev_conf.rxmode.offloads |=
- DEV_RX_OFFLOAD_VLAN_FILTER;
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
} else {
qede_vlan_filter_set(eth_dev, 0, 0);
}
/* Configure default RETA */
memset(reta_conf, 0, sizeof(reta_conf));
for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++)
- reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX;
+ reta_conf[i / RTE_ETH_RETA_GROUP_SIZE].mask = UINT64_MAX;
for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
- id = i / RTE_RETA_GROUP_SIZE;
- pos = i % RTE_RETA_GROUP_SIZE;
+ id = i / RTE_ETH_RETA_GROUP_SIZE;
+ pos = i % RTE_ETH_RETA_GROUP_SIZE;
q = i % QEDE_RSS_COUNT(eth_dev);
reta_conf[id].reta[pos] = q;
}
}
/* Configure TPA parameters */
- if (rxmode->offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
if (qede_enable_tpa(eth_dev, true))
return -EINVAL;
/* Enable scatter mode for LRO */
if (!eth_dev->data->scattered_rx)
- rxmode->offloads |= DEV_RX_OFFLOAD_SCATTER;
+ rxmode->offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
}
/* Start queues */
* Also, we would like to retain similar behavior in PF case, so we
* don't do PF/VF specific check here.
*/
- if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
+ if (eth_dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS)
if (qede_config_rss(eth_dev))
goto err;
PMD_INIT_FUNC_TRACE(edev);
- if (rxmode->mq_mode & ETH_MQ_RX_RSS_FLAG)
- rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+ if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+ rxmode->offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
/* We need to have min 1 RX queue.There is no min check in
* rte_eth_dev_configure(), so we are checking it here.
DP_NOTICE(edev, false,
"Invalid devargs supplied, requested change will not take effect\n");
- if (!(rxmode->mq_mode == ETH_MQ_RX_NONE ||
- rxmode->mq_mode == ETH_MQ_RX_RSS)) {
+ if (!(rxmode->mq_mode == RTE_ETH_MQ_RX_NONE ||
+ rxmode->mq_mode == RTE_ETH_MQ_RX_RSS)) {
DP_ERR(edev, "Unsupported multi-queue mode\n");
return -ENOTSUP;
}
return -ENOMEM;
}
- if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER)
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
eth_dev->data->scattered_rx = 1;
if (qede_start_vport(qdev, eth_dev->data->mtu))
qdev->mtu = eth_dev->data->mtu;
/* Enable VLAN offloads by default */
- ret = qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK |
- ETH_VLAN_FILTER_MASK);
+ ret = qede_vlan_offload_set(eth_dev, RTE_ETH_VLAN_STRIP_MASK |
+ RTE_ETH_VLAN_FILTER_MASK);
if (ret)
return ret;
dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE;
dev_info->hash_key_size = ECORE_RSS_KEY_SIZE * sizeof(uint32_t);
dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL;
- dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_TCP_LRO |
- DEV_RX_OFFLOAD_KEEP_CRC |
- DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_VLAN_FILTER |
- DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_RSS_HASH);
+ dev_info->rx_offload_capa = (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_LRO |
+ RTE_ETH_RX_OFFLOAD_KEEP_CRC |
+ RTE_ETH_RX_OFFLOAD_SCATTER |
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+ RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+ RTE_ETH_RX_OFFLOAD_RSS_HASH);
dev_info->rx_queue_offload_capa = 0;
/* TX offloads are on a per-packet basis, so it is applicable
* to both at port and queue levels.
*/
- dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_TX_OFFLOAD_MULTI_SEGS |
- DEV_TX_OFFLOAD_TCP_TSO |
- DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO);
+ dev_info->tx_offload_capa = (RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+ RTE_ETH_TX_OFFLOAD_TCP_TSO |
+ RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO);
dev_info->tx_queue_offload_capa = dev_info->tx_offload_capa;
dev_info->default_txconf = (struct rte_eth_txconf) {
- .offloads = DEV_TX_OFFLOAD_MULTI_SEGS,
+ .offloads = RTE_ETH_TX_OFFLOAD_MULTI_SEGS,
};
dev_info->default_rxconf = (struct rte_eth_rxconf) {
memset(&link, 0, sizeof(struct qed_link_output));
qdev->ops->common->get_link(edev, &link);
if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
- speed_cap |= ETH_LINK_SPEED_1G;
+ speed_cap |= RTE_ETH_LINK_SPEED_1G;
if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
- speed_cap |= ETH_LINK_SPEED_10G;
+ speed_cap |= RTE_ETH_LINK_SPEED_10G;
if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
- speed_cap |= ETH_LINK_SPEED_25G;
+ speed_cap |= RTE_ETH_LINK_SPEED_25G;
if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
- speed_cap |= ETH_LINK_SPEED_40G;
+ speed_cap |= RTE_ETH_LINK_SPEED_40G;
if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
- speed_cap |= ETH_LINK_SPEED_50G;
+ speed_cap |= RTE_ETH_LINK_SPEED_50G;
if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
- speed_cap |= ETH_LINK_SPEED_100G;
+ speed_cap |= RTE_ETH_LINK_SPEED_100G;
dev_info->speed_capa = speed_cap;
return 0;
/* Link Mode */
switch (q_link.duplex) {
case QEDE_DUPLEX_HALF:
- link_duplex = ETH_LINK_HALF_DUPLEX;
+ link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
break;
case QEDE_DUPLEX_FULL:
- link_duplex = ETH_LINK_FULL_DUPLEX;
+ link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
break;
case QEDE_DUPLEX_UNKNOWN:
default:
link.link_duplex = link_duplex;
/* Link Status */
- link.link_status = q_link.link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
+ link.link_status = q_link.link_up ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
/* AN */
link.link_autoneg = (q_link.supported_caps & QEDE_SUPPORTED_AUTONEG) ?
- ETH_LINK_AUTONEG : ETH_LINK_FIXED;
+ RTE_ETH_LINK_AUTONEG : RTE_ETH_LINK_FIXED;
DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n",
link.link_speed, link.link_duplex,
}
/* Pause is assumed to be supported (SUPPORTED_Pause) */
- if (fc_conf->mode == RTE_FC_FULL)
+ if (fc_conf->mode == RTE_ETH_FC_FULL)
params.pause_config |= (QED_LINK_PAUSE_TX_ENABLE |
QED_LINK_PAUSE_RX_ENABLE);
- if (fc_conf->mode == RTE_FC_TX_PAUSE)
+ if (fc_conf->mode == RTE_ETH_FC_TX_PAUSE)
params.pause_config |= QED_LINK_PAUSE_TX_ENABLE;
- if (fc_conf->mode == RTE_FC_RX_PAUSE)
+ if (fc_conf->mode == RTE_ETH_FC_RX_PAUSE)
params.pause_config |= QED_LINK_PAUSE_RX_ENABLE;
params.link_up = true;
if (current_link.pause_config & (QED_LINK_PAUSE_RX_ENABLE |
QED_LINK_PAUSE_TX_ENABLE))
- fc_conf->mode = RTE_FC_FULL;
+ fc_conf->mode = RTE_ETH_FC_FULL;
else if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE)
- fc_conf->mode = RTE_FC_RX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
else if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE)
- fc_conf->mode = RTE_FC_TX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
else
- fc_conf->mode = RTE_FC_NONE;
+ fc_conf->mode = RTE_ETH_FC_NONE;
return 0;
}
static void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf)
{
*rss_caps = 0;
- *rss_caps |= (hf & ETH_RSS_IPV4) ? ECORE_RSS_IPV4 : 0;
- *rss_caps |= (hf & ETH_RSS_IPV6) ? ECORE_RSS_IPV6 : 0;
- *rss_caps |= (hf & ETH_RSS_IPV6_EX) ? ECORE_RSS_IPV6 : 0;
- *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? ECORE_RSS_IPV4_TCP : 0;
- *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? ECORE_RSS_IPV6_TCP : 0;
- *rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX) ? ECORE_RSS_IPV6_TCP : 0;
- *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? ECORE_RSS_IPV4_UDP : 0;
- *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? ECORE_RSS_IPV6_UDP : 0;
+ *rss_caps |= (hf & RTE_ETH_RSS_IPV4) ? ECORE_RSS_IPV4 : 0;
+ *rss_caps |= (hf & RTE_ETH_RSS_IPV6) ? ECORE_RSS_IPV6 : 0;
+ *rss_caps |= (hf & RTE_ETH_RSS_IPV6_EX) ? ECORE_RSS_IPV6 : 0;
+ *rss_caps |= (hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) ? ECORE_RSS_IPV4_TCP : 0;
+ *rss_caps |= (hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) ? ECORE_RSS_IPV6_TCP : 0;
+ *rss_caps |= (hf & RTE_ETH_RSS_IPV6_TCP_EX) ? ECORE_RSS_IPV6_TCP : 0;
+ *rss_caps |= (hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) ? ECORE_RSS_IPV4_UDP : 0;
+ *rss_caps |= (hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) ? ECORE_RSS_IPV6_UDP : 0;
}
int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
uint8_t entry;
int rc = 0;
- if (reta_size > ETH_RSS_RETA_SIZE_128) {
+ if (reta_size > RTE_ETH_RSS_RETA_SIZE_128) {
DP_ERR(edev, "reta_size %d is not supported by hardware\n",
reta_size);
return -EINVAL;
for_each_hwfn(edev, i) {
for (j = 0; j < reta_size; j++) {
- idx = j / RTE_RETA_GROUP_SIZE;
- shift = j % RTE_RETA_GROUP_SIZE;
+ idx = j / RTE_ETH_RETA_GROUP_SIZE;
+ shift = j % RTE_ETH_RETA_GROUP_SIZE;
if (reta_conf[idx].mask & (1ULL << shift)) {
entry = reta_conf[idx].reta[shift];
fid = entry * edev->num_hwfns + i;
uint16_t i, idx, shift;
uint8_t entry;
- if (reta_size > ETH_RSS_RETA_SIZE_128) {
+ if (reta_size > RTE_ETH_RSS_RETA_SIZE_128) {
DP_ERR(edev, "reta_size %d is not supported\n",
reta_size);
return -EINVAL;
}
for (i = 0; i < reta_size; i++) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
if (reta_conf[idx].mask & (1ULL << shift)) {
entry = qdev->rss_ind_table[i];
reta_conf[idx].reta[shift] = entry;
adapter->ipgre.num_filters = 0;
if (is_vf) {
adapter->vxlan.enable = true;
- adapter->vxlan.filter_type = ETH_TUNNEL_FILTER_IMAC |
- ETH_TUNNEL_FILTER_IVLAN;
+ adapter->vxlan.filter_type = RTE_ETH_TUNNEL_FILTER_IMAC |
+ RTE_ETH_TUNNEL_FILTER_IVLAN;
adapter->vxlan.udp_port = QEDE_VXLAN_DEF_PORT;
adapter->geneve.enable = true;
- adapter->geneve.filter_type = ETH_TUNNEL_FILTER_IMAC |
- ETH_TUNNEL_FILTER_IVLAN;
+ adapter->geneve.filter_type = RTE_ETH_TUNNEL_FILTER_IMAC |
+ RTE_ETH_TUNNEL_FILTER_IVLAN;
adapter->geneve.udp_port = QEDE_GENEVE_DEF_PORT;
adapter->ipgre.enable = true;
- adapter->ipgre.filter_type = ETH_TUNNEL_FILTER_IMAC |
- ETH_TUNNEL_FILTER_IVLAN;
+ adapter->ipgre.filter_type = RTE_ETH_TUNNEL_FILTER_IMAC |
+ RTE_ETH_TUNNEL_FILTER_IVLAN;
} else {
adapter->vxlan.enable = false;
adapter->geneve.enable = false;
const char *string;
} qede_tunn_types[] = {
{
- ETH_TUNNEL_FILTER_OMAC,
+ RTE_ETH_TUNNEL_FILTER_OMAC,
ECORE_FILTER_MAC,
ECORE_TUNN_CLSS_MAC_VLAN,
"outer-mac"
},
{
- ETH_TUNNEL_FILTER_TENID,
+ RTE_ETH_TUNNEL_FILTER_TENID,
ECORE_FILTER_VNI,
ECORE_TUNN_CLSS_MAC_VNI,
"vni"
},
{
- ETH_TUNNEL_FILTER_IMAC,
+ RTE_ETH_TUNNEL_FILTER_IMAC,
ECORE_FILTER_INNER_MAC,
ECORE_TUNN_CLSS_INNER_MAC_VLAN,
"inner-mac"
},
{
- ETH_TUNNEL_FILTER_IVLAN,
+ RTE_ETH_TUNNEL_FILTER_IVLAN,
ECORE_FILTER_INNER_VLAN,
ECORE_TUNN_CLSS_INNER_MAC_VLAN,
"inner-vlan"
},
{
- ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID,
+ RTE_ETH_TUNNEL_FILTER_OMAC | RTE_ETH_TUNNEL_FILTER_TENID,
ECORE_FILTER_MAC_VNI_PAIR,
ECORE_TUNN_CLSS_MAC_VNI,
"outer-mac and vni"
},
{
- ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IMAC,
+ RTE_ETH_TUNNEL_FILTER_OMAC | RTE_ETH_TUNNEL_FILTER_IMAC,
ECORE_FILTER_UNUSED,
MAX_ECORE_TUNN_CLSS,
"outer-mac and inner-mac"
},
{
- ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IVLAN,
+ RTE_ETH_TUNNEL_FILTER_OMAC | RTE_ETH_TUNNEL_FILTER_IVLAN,
ECORE_FILTER_UNUSED,
MAX_ECORE_TUNN_CLSS,
"outer-mac and inner-vlan"
},
{
- ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IMAC,
+ RTE_ETH_TUNNEL_FILTER_TENID | RTE_ETH_TUNNEL_FILTER_IMAC,
ECORE_FILTER_INNER_MAC_VNI_PAIR,
ECORE_TUNN_CLSS_INNER_MAC_VNI,
"vni and inner-mac",
},
{
- ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IVLAN,
+ RTE_ETH_TUNNEL_FILTER_TENID | RTE_ETH_TUNNEL_FILTER_IVLAN,
ECORE_FILTER_UNUSED,
MAX_ECORE_TUNN_CLSS,
"vni and inner-vlan",
},
{
- ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
+ RTE_ETH_TUNNEL_FILTER_IMAC | RTE_ETH_TUNNEL_FILTER_IVLAN,
ECORE_FILTER_INNER_PAIR,
ECORE_TUNN_CLSS_INNER_MAC_VLAN,
"inner-mac and inner-vlan",
},
{
- ETH_TUNNEL_FILTER_OIP,
+ RTE_ETH_TUNNEL_FILTER_OIP,
ECORE_FILTER_UNUSED,
MAX_ECORE_TUNN_CLSS,
"outer-IP"
},
{
- ETH_TUNNEL_FILTER_IIP,
+ RTE_ETH_TUNNEL_FILTER_IIP,
ECORE_FILTER_UNUSED,
MAX_ECORE_TUNN_CLSS,
"inner-IP"
},
{
- RTE_TUNNEL_FILTER_IMAC_IVLAN,
+ RTE_ETH_TUNNEL_FILTER_IMAC_IVLAN,
ECORE_FILTER_UNUSED,
MAX_ECORE_TUNN_CLSS,
"IMAC_IVLAN"
},
{
- RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID,
+ RTE_ETH_TUNNEL_FILTER_IMAC_IVLAN_TENID,
ECORE_FILTER_UNUSED,
MAX_ECORE_TUNN_CLSS,
"IMAC_IVLAN_TENID"
},
{
- RTE_TUNNEL_FILTER_IMAC_TENID,
+ RTE_ETH_TUNNEL_FILTER_IMAC_TENID,
ECORE_FILTER_UNUSED,
MAX_ECORE_TUNN_CLSS,
"IMAC_TENID"
},
{
- RTE_TUNNEL_FILTER_OMAC_TENID_IMAC,
+ RTE_ETH_TUNNEL_FILTER_OMAC_TENID_IMAC,
ECORE_FILTER_UNUSED,
MAX_ECORE_TUNN_CLSS,
"OMAC_TENID_IMAC"
{
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- struct rte_fdir_conf *fdir = ð_dev->data->dev_conf.fdir_conf;
+ struct rte_eth_fdir_conf *fdir = ð_dev->data->dev_conf.fdir_conf;
/* check FDIR modes */
switch (fdir->mode) {
memset(&tunn, 0, sizeof(tunn));
switch (tunnel_udp->prot_type) {
- case RTE_TUNNEL_TYPE_VXLAN:
+ case RTE_ETH_TUNNEL_TYPE_VXLAN:
if (qdev->vxlan.udp_port != tunnel_udp->udp_port) {
DP_ERR(edev, "UDP port %u doesn't exist\n",
tunnel_udp->udp_port);
ECORE_TUNN_CLSS_MAC_VLAN, false);
break;
- case RTE_TUNNEL_TYPE_GENEVE:
+ case RTE_ETH_TUNNEL_TYPE_GENEVE:
if (qdev->geneve.udp_port != tunnel_udp->udp_port) {
DP_ERR(edev, "UDP port %u doesn't exist\n",
tunnel_udp->udp_port);
memset(&tunn, 0, sizeof(tunn));
switch (tunnel_udp->prot_type) {
- case RTE_TUNNEL_TYPE_VXLAN:
+ case RTE_ETH_TUNNEL_TYPE_VXLAN:
if (qdev->vxlan.udp_port == tunnel_udp->udp_port) {
DP_INFO(edev,
"UDP port %u for VXLAN was already configured\n",
qdev->vxlan.udp_port = udp_port;
break;
- case RTE_TUNNEL_TYPE_GENEVE:
+ case RTE_ETH_TUNNEL_TYPE_GENEVE:
if (qdev->geneve.udp_port == tunnel_udp->udp_port) {
DP_INFO(edev,
"UDP port %u for GENEVE was already configured\n",
bufsz = (uint16_t)rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
/* cache align the mbuf size to simplfy rx_buf_size calculation */
bufsz = QEDE_FLOOR_TO_CACHE_LINE_SIZE(bufsz);
- if ((rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) ||
+ if ((rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ||
(max_rx_pktlen + QEDE_ETH_OVERHEAD) > bufsz) {
if (!dev->data->scattered_rx) {
DP_INFO(edev, "Forcing scatter-gather mode\n");
#define QEDE_MAX_ETHER_HDR_LEN (RTE_ETHER_HDR_LEN + QEDE_ETH_OVERHEAD)
#define QEDE_ETH_MAX_LEN (RTE_ETHER_MTU + QEDE_MAX_ETHER_HDR_LEN)
-#define QEDE_RSS_OFFLOAD_ALL (ETH_RSS_IPV4 |\
- ETH_RSS_NONFRAG_IPV4_TCP |\
- ETH_RSS_NONFRAG_IPV4_UDP |\
- ETH_RSS_IPV6 |\
- ETH_RSS_NONFRAG_IPV6_TCP |\
- ETH_RSS_NONFRAG_IPV6_UDP |\
- ETH_RSS_VXLAN |\
- ETH_RSS_GENEVE)
+#define QEDE_RSS_OFFLOAD_ALL (RTE_ETH_RSS_IPV4 |\
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP |\
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP |\
+ RTE_ETH_RSS_IPV6 |\
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP |\
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP |\
+ RTE_ETH_RSS_VXLAN |\
+ RTE_ETH_RSS_GENEVE)
#define QEDE_RXTX_MAX(qdev) \
(RTE_MAX(qdev->num_rx_queues, qdev->num_tx_queues))
};
static struct rte_eth_link pmd_link = {
- .link_speed = ETH_SPEED_NUM_10G,
- .link_duplex = ETH_LINK_FULL_DUPLEX,
- .link_status = ETH_LINK_DOWN,
- .link_autoneg = ETH_LINK_FIXED,
+ .link_speed = RTE_ETH_SPEED_NUM_10G,
+ .link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+ .link_status = RTE_ETH_LINK_DOWN,
+ .link_autoneg = RTE_ETH_LINK_FIXED,
};
RTE_LOG_REGISTER_DEFAULT(eth_ring_logtype, NOTICE);
static int
eth_dev_start(struct rte_eth_dev *dev)
{
- dev->data->dev_link.link_status = ETH_LINK_UP;
+ dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
return 0;
}
eth_dev_stop(struct rte_eth_dev *dev)
{
dev->data->dev_started = 0;
- dev->data->dev_link.link_status = ETH_LINK_DOWN;
+ dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
return 0;
}
static int
eth_dev_set_link_down(struct rte_eth_dev *dev)
{
- dev->data->dev_link.link_status = ETH_LINK_DOWN;
+ dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
return 0;
}
static int
eth_dev_set_link_up(struct rte_eth_dev *dev)
{
- dev->data->dev_link.link_status = ETH_LINK_UP;
+ dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
return 0;
}
dev_info->max_mac_addrs = 1;
dev_info->max_rx_pktlen = (uint32_t)-1;
dev_info->max_rx_queues = (uint16_t)internals->max_rx_queues;
- dev_info->rx_offload_capa = DEV_RX_OFFLOAD_SCATTER;
- dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS;
+ dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_SCATTER;
+ dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
dev_info->max_tx_queues = (uint16_t)internals->max_tx_queues;
dev_info->min_rx_bufsize = 0;
{
uint32_t phy_caps = 0;
- if (~speeds & ETH_LINK_SPEED_FIXED) {
+ if (~speeds & RTE_ETH_LINK_SPEED_FIXED) {
phy_caps |= (1 << EFX_PHY_CAP_AN);
/*
* If no speeds are specified in the mask, any supported
* may be negotiated
*/
- if (speeds == ETH_LINK_SPEED_AUTONEG)
+ if (speeds == RTE_ETH_LINK_SPEED_AUTONEG)
phy_caps |=
(1 << EFX_PHY_CAP_1000FDX) |
(1 << EFX_PHY_CAP_10000FDX) |
(1 << EFX_PHY_CAP_50000FDX) |
(1 << EFX_PHY_CAP_100000FDX);
}
- if (speeds & ETH_LINK_SPEED_1G)
+ if (speeds & RTE_ETH_LINK_SPEED_1G)
phy_caps |= (1 << EFX_PHY_CAP_1000FDX);
- if (speeds & ETH_LINK_SPEED_10G)
+ if (speeds & RTE_ETH_LINK_SPEED_10G)
phy_caps |= (1 << EFX_PHY_CAP_10000FDX);
- if (speeds & ETH_LINK_SPEED_25G)
+ if (speeds & RTE_ETH_LINK_SPEED_25G)
phy_caps |= (1 << EFX_PHY_CAP_25000FDX);
- if (speeds & ETH_LINK_SPEED_40G)
+ if (speeds & RTE_ETH_LINK_SPEED_40G)
phy_caps |= (1 << EFX_PHY_CAP_40000FDX);
- if (speeds & ETH_LINK_SPEED_50G)
+ if (speeds & RTE_ETH_LINK_SPEED_50G)
phy_caps |= (1 << EFX_PHY_CAP_50000FDX);
- if (speeds & ETH_LINK_SPEED_100G)
+ if (speeds & RTE_ETH_LINK_SPEED_100G)
phy_caps |= (1 << EFX_PHY_CAP_100000FDX);
return phy_caps;
tx_offloads |= txq_info->offloads;
}
- if (tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM))
+ if (tx_offloads & (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM))
req_fw_subvariant = EFX_NIC_FW_SUBVARIANT_DEFAULT;
else
req_fw_subvariant = EFX_NIC_FW_SUBVARIANT_NO_TX_CSUM;
sa->priv.shared->tunnel_encaps =
encp->enc_tunnel_encapsulations_supported;
- if (sfc_dp_tx_offload_capa(sa->priv.dp_tx) & DEV_TX_OFFLOAD_TCP_TSO) {
+ if (sfc_dp_tx_offload_capa(sa->priv.dp_tx) & RTE_ETH_TX_OFFLOAD_TCP_TSO) {
sa->tso = encp->enc_fw_assisted_tso_v2_enabled ||
encp->enc_tso_v3_enabled;
if (!sa->tso)
if (sa->tso &&
(sfc_dp_tx_offload_capa(sa->priv.dp_tx) &
- (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO)) != 0) {
+ (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO)) != 0) {
sa->tso_encap = encp->enc_fw_assisted_tso_v2_encap_enabled ||
encp->enc_tso_v3_enabled;
if (!sa->tso_encap)
SFC_DP_RX_FEAT_INTR |
SFC_DP_RX_FEAT_STATS,
.dev_offload_capa = 0,
- .queue_offload_capa = DEV_RX_OFFLOAD_CHECKSUM |
- DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
- DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_RSS_HASH,
+ .queue_offload_capa = RTE_ETH_RX_OFFLOAD_CHECKSUM |
+ RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_SCATTER |
+ RTE_ETH_RX_OFFLOAD_RSS_HASH,
.get_dev_info = sfc_ef100_rx_get_dev_info,
.qsize_up_rings = sfc_ef100_rx_qsize_up_rings,
.qcreate = sfc_ef100_rx_qcreate,
.features = SFC_DP_TX_FEAT_MULTI_PROCESS |
SFC_DP_TX_FEAT_STATS,
.dev_offload_capa = 0,
- .queue_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_TX_OFFLOAD_OUTER_UDP_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_MULTI_SEGS |
- DEV_TX_OFFLOAD_TCP_TSO |
- DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO,
+ .queue_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+ RTE_ETH_TX_OFFLOAD_TCP_TSO |
+ RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO,
.get_dev_info = sfc_ef100_get_dev_info,
.qsize_up_rings = sfc_ef100_tx_qsize_up_rings,
.qcreate = sfc_ef100_tx_qcreate,
},
.features = SFC_DP_RX_FEAT_FLOW_FLAG |
SFC_DP_RX_FEAT_FLOW_MARK,
- .dev_offload_capa = DEV_RX_OFFLOAD_CHECKSUM |
- DEV_RX_OFFLOAD_RSS_HASH,
+ .dev_offload_capa = RTE_ETH_RX_OFFLOAD_CHECKSUM |
+ RTE_ETH_RX_OFFLOAD_RSS_HASH,
.queue_offload_capa = 0,
.get_dev_info = sfc_ef10_essb_rx_get_dev_info,
.pool_ops_supported = sfc_ef10_essb_rx_pool_ops_supported,
},
.features = SFC_DP_RX_FEAT_MULTI_PROCESS |
SFC_DP_RX_FEAT_INTR,
- .dev_offload_capa = DEV_RX_OFFLOAD_CHECKSUM |
- DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_RSS_HASH,
- .queue_offload_capa = DEV_RX_OFFLOAD_SCATTER,
+ .dev_offload_capa = RTE_ETH_RX_OFFLOAD_CHECKSUM |
+ RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_RSS_HASH,
+ .queue_offload_capa = RTE_ETH_RX_OFFLOAD_SCATTER,
.get_dev_info = sfc_ef10_rx_get_dev_info,
.qsize_up_rings = sfc_ef10_rx_qsize_up_rings,
.qcreate = sfc_ef10_rx_qcreate,
if (txq->sw_ring == NULL)
goto fail_sw_ring_alloc;
- if (info->offloads & (DEV_TX_OFFLOAD_TCP_TSO |
- DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO)) {
+ if (info->offloads & (RTE_ETH_TX_OFFLOAD_TCP_TSO |
+ RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO)) {
txq->tsoh = rte_calloc_socket("sfc-ef10-txq-tsoh",
info->txq_entries,
SFC_TSOH_STD_LEN,
.hw_fw_caps = SFC_DP_HW_FW_CAP_EF10,
},
.features = SFC_DP_TX_FEAT_MULTI_PROCESS,
- .dev_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS,
- .queue_offload_capa = DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO |
- DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO,
+ .dev_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS,
+ .queue_offload_capa = RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_TSO |
+ RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO,
.get_dev_info = sfc_ef10_get_dev_info,
.qsize_up_rings = sfc_ef10_tx_qsize_up_rings,
.qcreate = sfc_ef10_tx_qcreate,
.type = SFC_DP_TX,
},
.features = SFC_DP_TX_FEAT_MULTI_PROCESS,
- .dev_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
- .queue_offload_capa = DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM,
+ .dev_offload_capa = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE,
+ .queue_offload_capa = RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM,
.get_dev_info = sfc_ef10_get_dev_info,
.qsize_up_rings = sfc_ef10_tx_qsize_up_rings,
.qcreate = sfc_ef10_tx_qcreate,
dev_info->max_vfs = sa->sriov.num_vfs;
/* Autonegotiation may be disabled */
- dev_info->speed_capa = ETH_LINK_SPEED_FIXED;
+ dev_info->speed_capa = RTE_ETH_LINK_SPEED_FIXED;
if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_1000FDX))
- dev_info->speed_capa |= ETH_LINK_SPEED_1G;
+ dev_info->speed_capa |= RTE_ETH_LINK_SPEED_1G;
if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_10000FDX))
- dev_info->speed_capa |= ETH_LINK_SPEED_10G;
+ dev_info->speed_capa |= RTE_ETH_LINK_SPEED_10G;
if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_25000FDX))
- dev_info->speed_capa |= ETH_LINK_SPEED_25G;
+ dev_info->speed_capa |= RTE_ETH_LINK_SPEED_25G;
if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_40000FDX))
- dev_info->speed_capa |= ETH_LINK_SPEED_40G;
+ dev_info->speed_capa |= RTE_ETH_LINK_SPEED_40G;
if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_50000FDX))
- dev_info->speed_capa |= ETH_LINK_SPEED_50G;
+ dev_info->speed_capa |= RTE_ETH_LINK_SPEED_50G;
if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_100000FDX))
- dev_info->speed_capa |= ETH_LINK_SPEED_100G;
+ dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100G;
dev_info->max_rx_queues = sa->rxq_max;
dev_info->max_tx_queues = sa->txq_max;
dev_info->tx_offload_capa = sfc_tx_get_dev_offload_caps(sa) |
dev_info->tx_queue_offload_capa;
- if (dev_info->tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
- txq_offloads_def |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ if (dev_info->tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
+ txq_offloads_def |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
dev_info->default_txconf.offloads |= txq_offloads_def;
switch (link_fc) {
case 0:
- fc_conf->mode = RTE_FC_NONE;
+ fc_conf->mode = RTE_ETH_FC_NONE;
break;
case EFX_FCNTL_RESPOND:
- fc_conf->mode = RTE_FC_RX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
break;
case EFX_FCNTL_GENERATE:
- fc_conf->mode = RTE_FC_TX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
break;
case (EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE):
- fc_conf->mode = RTE_FC_FULL;
+ fc_conf->mode = RTE_ETH_FC_FULL;
break;
default:
sfc_err(sa, "%s: unexpected flow control value %#x",
}
switch (fc_conf->mode) {
- case RTE_FC_NONE:
+ case RTE_ETH_FC_NONE:
fcntl = 0;
break;
- case RTE_FC_RX_PAUSE:
+ case RTE_ETH_FC_RX_PAUSE:
fcntl = EFX_FCNTL_RESPOND;
break;
- case RTE_FC_TX_PAUSE:
+ case RTE_ETH_FC_TX_PAUSE:
fcntl = EFX_FCNTL_GENERATE;
break;
- case RTE_FC_FULL:
+ case RTE_ETH_FC_FULL:
fcntl = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE;
break;
default:
qinfo->conf.rx_deferred_start = rxq_info->deferred_start;
qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
if (rxq_info->type_flags & EFX_RXQ_FLAG_SCATTER) {
- qinfo->conf.offloads |= DEV_RX_OFFLOAD_SCATTER;
+ qinfo->conf.offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
qinfo->scattered_rx = 1;
}
qinfo->nb_desc = rxq_info->entries;
sfc_tunnel_rte_type_to_efx_udp_proto(enum rte_eth_tunnel_type rte_type)
{
switch (rte_type) {
- case RTE_TUNNEL_TYPE_VXLAN:
+ case RTE_ETH_TUNNEL_TYPE_VXLAN:
return EFX_TUNNEL_PROTOCOL_VXLAN;
- case RTE_TUNNEL_TYPE_GENEVE:
+ case RTE_ETH_TUNNEL_TYPE_GENEVE:
return EFX_TUNNEL_PROTOCOL_GENEVE;
default:
return EFX_TUNNEL_NPROTOS;
/*
* Mapping of hash configuration between RTE and EFX is not one-to-one,
- * hence, conversion is done here to derive a correct set of ETH_RSS
+ * hence, conversion is done here to derive a correct set of RTE_ETH_RSS
* flags which corresponds to the active EFX configuration stored
* locally in 'sfc_adapter' and kept up-to-date
*/
return -EINVAL;
for (entry = 0; entry < reta_size; entry++) {
- int grp = entry / RTE_RETA_GROUP_SIZE;
- int grp_idx = entry % RTE_RETA_GROUP_SIZE;
+ int grp = entry / RTE_ETH_RETA_GROUP_SIZE;
+ int grp_idx = entry % RTE_ETH_RETA_GROUP_SIZE;
if ((reta_conf[grp].mask >> grp_idx) & 1)
reta_conf[grp].reta[grp_idx] = rss->tbl[entry];
rte_memcpy(rss_tbl_new, rss->tbl, sizeof(rss->tbl));
for (entry = 0; entry < reta_size; entry++) {
- int grp_idx = entry % RTE_RETA_GROUP_SIZE;
+ int grp_idx = entry % RTE_ETH_RETA_GROUP_SIZE;
struct rte_eth_rss_reta_entry64 *grp;
- grp = &reta_conf[entry / RTE_RETA_GROUP_SIZE];
+ grp = &reta_conf[entry / RTE_ETH_RETA_GROUP_SIZE];
if (grp->mask & (1ull << grp_idx)) {
if (grp->reta[grp_idx] >= rss->channels) {
const struct rte_flow_item_vlan *spec = NULL;
const struct rte_flow_item_vlan *mask = NULL;
const struct rte_flow_item_vlan supp_mask = {
- .tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX),
+ .tci = rte_cpu_to_be_16(RTE_ETH_VLAN_ID_MAX),
.inner_type = RTE_BE16(0xffff),
};
memset(link_info, 0, sizeof(*link_info));
if ((link_mode == EFX_LINK_DOWN) || (link_mode == EFX_LINK_UNKNOWN))
- link_info->link_status = ETH_LINK_DOWN;
+ link_info->link_status = RTE_ETH_LINK_DOWN;
else
- link_info->link_status = ETH_LINK_UP;
+ link_info->link_status = RTE_ETH_LINK_UP;
switch (link_mode) {
case EFX_LINK_10HDX:
- link_info->link_speed = ETH_SPEED_NUM_10M;
- link_info->link_duplex = ETH_LINK_HALF_DUPLEX;
+ link_info->link_speed = RTE_ETH_SPEED_NUM_10M;
+ link_info->link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
break;
case EFX_LINK_10FDX:
- link_info->link_speed = ETH_SPEED_NUM_10M;
- link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+ link_info->link_speed = RTE_ETH_SPEED_NUM_10M;
+ link_info->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
break;
case EFX_LINK_100HDX:
- link_info->link_speed = ETH_SPEED_NUM_100M;
- link_info->link_duplex = ETH_LINK_HALF_DUPLEX;
+ link_info->link_speed = RTE_ETH_SPEED_NUM_100M;
+ link_info->link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
break;
case EFX_LINK_100FDX:
- link_info->link_speed = ETH_SPEED_NUM_100M;
- link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+ link_info->link_speed = RTE_ETH_SPEED_NUM_100M;
+ link_info->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
break;
case EFX_LINK_1000HDX:
- link_info->link_speed = ETH_SPEED_NUM_1G;
- link_info->link_duplex = ETH_LINK_HALF_DUPLEX;
+ link_info->link_speed = RTE_ETH_SPEED_NUM_1G;
+ link_info->link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
break;
case EFX_LINK_1000FDX:
- link_info->link_speed = ETH_SPEED_NUM_1G;
- link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+ link_info->link_speed = RTE_ETH_SPEED_NUM_1G;
+ link_info->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
break;
case EFX_LINK_10000FDX:
- link_info->link_speed = ETH_SPEED_NUM_10G;
- link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+ link_info->link_speed = RTE_ETH_SPEED_NUM_10G;
+ link_info->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
break;
case EFX_LINK_25000FDX:
- link_info->link_speed = ETH_SPEED_NUM_25G;
- link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+ link_info->link_speed = RTE_ETH_SPEED_NUM_25G;
+ link_info->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
break;
case EFX_LINK_40000FDX:
- link_info->link_speed = ETH_SPEED_NUM_40G;
- link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+ link_info->link_speed = RTE_ETH_SPEED_NUM_40G;
+ link_info->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
break;
case EFX_LINK_50000FDX:
- link_info->link_speed = ETH_SPEED_NUM_50G;
- link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+ link_info->link_speed = RTE_ETH_SPEED_NUM_50G;
+ link_info->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
break;
case EFX_LINK_100000FDX:
- link_info->link_speed = ETH_SPEED_NUM_100G;
- link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+ link_info->link_speed = RTE_ETH_SPEED_NUM_100G;
+ link_info->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
break;
default:
SFC_ASSERT(B_FALSE);
/* FALLTHROUGH */
case EFX_LINK_UNKNOWN:
case EFX_LINK_DOWN:
- link_info->link_speed = ETH_SPEED_NUM_NONE;
+ link_info->link_speed = RTE_ETH_SPEED_NUM_NONE;
link_info->link_duplex = 0;
break;
}
- link_info->link_autoneg = ETH_LINK_AUTONEG;
+ link_info->link_autoneg = RTE_ETH_LINK_AUTONEG;
}
int
}
switch (conf->rxmode.mq_mode) {
- case ETH_MQ_RX_RSS:
+ case RTE_ETH_MQ_RX_RSS:
if (nb_rx_queues != 1) {
sfcr_err(sr, "Rx RSS is not supported with %u queues",
nb_rx_queues);
ret = -EINVAL;
}
break;
- case ETH_MQ_RX_NONE:
+ case RTE_ETH_MQ_RX_NONE:
break;
default:
sfcr_err(sr, "Rx mode MQ modes other than RSS not supported");
break;
}
- if (conf->txmode.mq_mode != ETH_MQ_TX_NONE) {
+ if (conf->txmode.mq_mode != RTE_ETH_MQ_TX_NONE) {
sfcr_err(sr, "Tx mode MQ modes not supported");
ret = -EINVAL;
}
sfc_port_link_mode_to_info(EFX_LINK_UNKNOWN, &link);
} else {
memset(&link, 0, sizeof(link));
- link.link_status = ETH_LINK_UP;
- link.link_speed = ETH_SPEED_NUM_UNKNOWN;
+ link.link_status = RTE_ETH_LINK_UP;
+ link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
}
return rte_eth_linkstatus_set(dev, &link);
.hw_fw_caps = SFC_DP_HW_FW_CAP_RX_EFX,
},
.features = SFC_DP_RX_FEAT_INTR,
- .dev_offload_capa = DEV_RX_OFFLOAD_CHECKSUM |
- DEV_RX_OFFLOAD_RSS_HASH,
- .queue_offload_capa = DEV_RX_OFFLOAD_SCATTER,
+ .dev_offload_capa = RTE_ETH_RX_OFFLOAD_CHECKSUM |
+ RTE_ETH_RX_OFFLOAD_RSS_HASH,
+ .queue_offload_capa = RTE_ETH_RX_OFFLOAD_SCATTER,
.qsize_up_rings = sfc_efx_rx_qsize_up_rings,
.qcreate = sfc_efx_rx_qcreate,
.qdestroy = sfc_efx_rx_qdestroy,
uint64_t no_caps = 0;
if (encp->enc_tunnel_encapsulations_supported == 0)
- no_caps |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+ no_caps |= RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM;
return ~no_caps;
}
if (!sfc_rx_check_scatter(sa->port.pdu, buf_size,
encp->enc_rx_prefix_size,
- (offloads & DEV_RX_OFFLOAD_SCATTER),
+ (offloads & RTE_ETH_RX_OFFLOAD_SCATTER),
encp->enc_rx_scatter_max,
&error)) {
sfc_err(sa, "RxQ %d (internal %u) MTU check failed: %s",
rxq_info->type = EFX_RXQ_TYPE_DEFAULT;
rxq_info->type_flags |=
- (offloads & DEV_RX_OFFLOAD_SCATTER) ?
+ (offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ?
EFX_RXQ_FLAG_SCATTER : EFX_RXQ_FLAG_NONE;
if ((encp->enc_tunnel_encapsulations_supported != 0) &&
(sfc_dp_rx_offload_capa(sa->priv.dp_rx) &
- DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) != 0)
+ RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM) != 0)
rxq_info->type_flags |= EFX_RXQ_FLAG_INNER_CLASSES;
- if (offloads & DEV_RX_OFFLOAD_RSS_HASH)
+ if (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)
rxq_info->type_flags |= EFX_RXQ_FLAG_RSS_HASH;
if ((sa->negotiated_rx_metadata & RTE_ETH_RX_METADATA_USER_FLAG) != 0)
rxq_info->refill_mb_pool = mb_pool;
if (rss->hash_support == EFX_RX_HASH_AVAILABLE && rss->channels > 0 &&
- (offloads & DEV_RX_OFFLOAD_RSS_HASH))
+ (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH))
rxq_info->rxq_flags = SFC_RXQ_FLAG_RSS_HASH;
else
rxq_info->rxq_flags = 0;
* Mapping between RTE RSS hash functions and their EFX counterparts.
*/
static const struct sfc_rss_hf_rte_to_efx sfc_rss_hf_map[] = {
- { ETH_RSS_NONFRAG_IPV4_TCP,
+ { RTE_ETH_RSS_NONFRAG_IPV4_TCP,
EFX_RX_HASH(IPV4_TCP, 4TUPLE) },
- { ETH_RSS_NONFRAG_IPV4_UDP,
+ { RTE_ETH_RSS_NONFRAG_IPV4_UDP,
EFX_RX_HASH(IPV4_UDP, 4TUPLE) },
- { ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX,
+ { RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_IPV6_TCP_EX,
EFX_RX_HASH(IPV6_TCP, 4TUPLE) },
- { ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_IPV6_UDP_EX,
+ { RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_IPV6_UDP_EX,
EFX_RX_HASH(IPV6_UDP, 4TUPLE) },
- { ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_OTHER,
+ { RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
EFX_RX_HASH(IPV4_TCP, 2TUPLE) | EFX_RX_HASH(IPV4_UDP, 2TUPLE) |
EFX_RX_HASH(IPV4, 2TUPLE) },
- { ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER |
- ETH_RSS_IPV6_EX,
+ { RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_OTHER |
+ RTE_ETH_RSS_IPV6_EX,
EFX_RX_HASH(IPV6_TCP, 2TUPLE) | EFX_RX_HASH(IPV6_UDP, 2TUPLE) |
EFX_RX_HASH(IPV6, 2TUPLE) }
};
int rc = 0;
switch (rxmode->mq_mode) {
- case ETH_MQ_RX_NONE:
+ case RTE_ETH_MQ_RX_NONE:
/* No special checks are required */
break;
- case ETH_MQ_RX_RSS:
+ case RTE_ETH_MQ_RX_RSS:
if (rss->context_type == EFX_RX_SCALE_UNAVAILABLE) {
sfc_err(sa, "RSS is not available");
rc = EINVAL;
* so unsupported offloads cannot be added as the result of
* below check.
*/
- if ((rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM) !=
- (offloads_supported & DEV_RX_OFFLOAD_CHECKSUM)) {
+ if ((rxmode->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM) !=
+ (offloads_supported & RTE_ETH_RX_OFFLOAD_CHECKSUM)) {
sfc_warn(sa, "Rx checksum offloads cannot be disabled - always on (IPv4/TCP/UDP)");
- rxmode->offloads |= DEV_RX_OFFLOAD_CHECKSUM;
+ rxmode->offloads |= RTE_ETH_RX_OFFLOAD_CHECKSUM;
}
- if ((offloads_supported & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) &&
- (~rxmode->offloads & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM)) {
+ if ((offloads_supported & RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM) &&
+ (~rxmode->offloads & RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM)) {
sfc_warn(sa, "Rx outer IPv4 checksum offload cannot be disabled - always on");
- rxmode->offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+ rxmode->offloads |= RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM;
}
return rc;
}
configure_rss:
- rss->channels = (dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ?
+ rss->channels = (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) ?
MIN(sas->ethdev_rxq_count, EFX_MAXRSS) : 0;
if (rss->channels > 0) {
uint64_t no_caps = 0;
if (!encp->enc_hw_tx_insert_vlan_enabled)
- no_caps |= DEV_TX_OFFLOAD_VLAN_INSERT;
+ no_caps |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
if (!encp->enc_tunnel_encapsulations_supported)
- no_caps |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+ no_caps |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM;
if (!sa->tso)
- no_caps |= DEV_TX_OFFLOAD_TCP_TSO;
+ no_caps |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
if (!sa->tso_encap ||
(encp->enc_tunnel_encapsulations_supported &
(1u << EFX_TUNNEL_PROTOCOL_VXLAN)) == 0)
- no_caps |= DEV_TX_OFFLOAD_VXLAN_TNL_TSO;
+ no_caps |= RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO;
if (!sa->tso_encap ||
(encp->enc_tunnel_encapsulations_supported &
(1u << EFX_TUNNEL_PROTOCOL_GENEVE)) == 0)
- no_caps |= DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
+ no_caps |= RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO;
return ~no_caps;
}
}
/* We either perform both TCP and UDP offload, or no offload at all */
- if (((offloads & DEV_TX_OFFLOAD_TCP_CKSUM) == 0) !=
- ((offloads & DEV_TX_OFFLOAD_UDP_CKSUM) == 0)) {
+ if (((offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) == 0) !=
+ ((offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) == 0)) {
sfc_err(sa, "TCP and UDP offloads can't be set independently");
rc = EINVAL;
}
int rc = 0;
switch (txmode->mq_mode) {
- case ETH_MQ_TX_NONE:
+ case RTE_ETH_MQ_TX_NONE:
break;
default:
sfc_err(sa, "Tx multi-queue mode %u not supported",
if (rc != 0)
goto fail_ev_qstart;
- if (txq_info->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
+ if (txq_info->offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
flags |= EFX_TXQ_CKSUM_IPV4;
- if (txq_info->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
+ if (txq_info->offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)
flags |= EFX_TXQ_CKSUM_INNER_IPV4;
- if ((txq_info->offloads & DEV_TX_OFFLOAD_TCP_CKSUM) ||
- (txq_info->offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) {
+ if ((txq_info->offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) ||
+ (txq_info->offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM)) {
flags |= EFX_TXQ_CKSUM_TCPUDP;
- if (offloads_supported & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
+ if (offloads_supported & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)
flags |= EFX_TXQ_CKSUM_INNER_TCPUDP;
}
- if (txq_info->offloads & (DEV_TX_OFFLOAD_TCP_TSO |
- DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO))
+ if (txq_info->offloads & (RTE_ETH_TX_OFFLOAD_TCP_TSO |
+ RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO))
flags |= EFX_TXQ_FATSOV2;
rc = efx_tx_qcreate(sa->nic, txq->hw_index, 0, &txq->mem,
/*
* Here VLAN TCI is expected to be zero in case if no
- * DEV_TX_OFFLOAD_VLAN_INSERT capability is advertised;
+ * RTE_ETH_TX_OFFLOAD_VLAN_INSERT capability is advertised;
* if the calling app ignores the absence of
- * DEV_TX_OFFLOAD_VLAN_INSERT and pushes VLAN TCI, then
+ * RTE_ETH_TX_OFFLOAD_VLAN_INSERT and pushes VLAN TCI, then
* TX_ERROR will occur
*/
pkt_descs += sfc_efx_tx_maybe_insert_tag(txq, m_seg, &pend);
.hw_fw_caps = SFC_DP_HW_FW_CAP_TX_EFX,
},
.features = 0,
- .dev_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_MULTI_SEGS,
- .queue_offload_capa = DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO,
+ .dev_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS,
+ .queue_offload_capa = RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_TSO,
.qsize_up_rings = sfc_efx_tx_qsize_up_rings,
.qcreate = sfc_efx_tx_qcreate,
.qdestroy = sfc_efx_tx_qdestroy,
return status;
/* Link UP */
- dev->data->dev_link.link_status = ETH_LINK_UP;
+ dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
return 0;
}
struct pmd_internals *p = dev->data->dev_private;
/* Link DOWN */
- dev->data->dev_link.link_status = ETH_LINK_DOWN;
+ dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
/* Firmware */
softnic_pipeline_disable_all(p);
/* dev->data */
dev->data->dev_private = dev_private;
- dev->data->dev_link.link_speed = ETH_SPEED_NUM_100G;
- dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
- dev->data->dev_link.link_autoneg = ETH_LINK_FIXED;
- dev->data->dev_link.link_status = ETH_LINK_DOWN;
+ dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_100G;
+ dev->data->dev_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+ dev->data->dev_link.link_autoneg = RTE_ETH_LINK_FIXED;
+ dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
dev->data->mac_addrs = ð_addr;
dev->data->promiscuous = 1;
dev->data->numa_node = params->cpu_id;
eth_dev_configure(struct rte_eth_dev *dev)
{
struct rte_eth_dev_data *data = dev->data;
- if (data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
+ if (data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
dev->rx_pkt_burst = eth_szedata2_rx_scattered;
data->scattered_rx = 1;
} else {
dev_info->max_rx_queues = internals->max_rx_queues;
dev_info->max_tx_queues = internals->max_tx_queues;
dev_info->min_rx_bufsize = 0;
- dev_info->rx_offload_capa = DEV_RX_OFFLOAD_SCATTER;
+ dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_SCATTER;
dev_info->tx_offload_capa = 0;
dev_info->rx_queue_offload_capa = 0;
dev_info->tx_queue_offload_capa = 0;
- dev_info->speed_capa = ETH_LINK_SPEED_100G;
+ dev_info->speed_capa = RTE_ETH_LINK_SPEED_100G;
return 0;
}
memset(&link, 0, sizeof(link));
- link.link_speed = ETH_SPEED_NUM_100G;
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
- link.link_status = ETH_LINK_UP;
- link.link_autoneg = ETH_LINK_FIXED;
+ link.link_speed = RTE_ETH_SPEED_NUM_100G;
+ link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+ link.link_status = RTE_ETH_LINK_UP;
+ link.link_autoneg = RTE_ETH_LINK_FIXED;
rte_eth_linkstatus_set(dev, &link);
return 0;
#define TAP_IOV_DEFAULT_MAX 1024
-#define TAP_RX_OFFLOAD (DEV_RX_OFFLOAD_SCATTER | \
- DEV_RX_OFFLOAD_IPV4_CKSUM | \
- DEV_RX_OFFLOAD_UDP_CKSUM | \
- DEV_RX_OFFLOAD_TCP_CKSUM)
+#define TAP_RX_OFFLOAD (RTE_ETH_RX_OFFLOAD_SCATTER | \
+ RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | \
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM | \
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM)
-#define TAP_TX_OFFLOAD (DEV_TX_OFFLOAD_MULTI_SEGS | \
- DEV_TX_OFFLOAD_IPV4_CKSUM | \
- DEV_TX_OFFLOAD_UDP_CKSUM | \
- DEV_TX_OFFLOAD_TCP_CKSUM | \
- DEV_TX_OFFLOAD_TCP_TSO)
+#define TAP_TX_OFFLOAD (RTE_ETH_TX_OFFLOAD_MULTI_SEGS | \
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_TCP_TSO)
static int tap_devices_count;
static volatile uint32_t tap_trigger; /* Rx trigger */
static struct rte_eth_link pmd_link = {
- .link_speed = ETH_SPEED_NUM_10G,
- .link_duplex = ETH_LINK_FULL_DUPLEX,
- .link_status = ETH_LINK_DOWN,
- .link_autoneg = ETH_LINK_FIXED,
+ .link_speed = RTE_ETH_SPEED_NUM_10G,
+ .link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+ .link_status = RTE_ETH_LINK_DOWN,
+ .link_autoneg = RTE_ETH_LINK_FIXED,
};
static void
len = readv(process_private->rxq_fds[rxq->queue_id],
*rxq->iovecs,
- 1 + (rxq->rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ?
+ 1 + (rxq->rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER ?
rxq->nb_rx_desc : 1));
if (len < (int)sizeof(struct tun_pi))
break;
seg->next = NULL;
mbuf->packet_type = rte_net_get_ptype(mbuf, NULL,
RTE_PTYPE_ALL_MASK);
- if (rxq->rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+ if (rxq->rxmode->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
tap_verify_csum(mbuf);
/* account for the receive frame */
struct pmd_internals *pmd = dev->data->dev_private;
struct ifreq ifr = { .ifr_flags = IFF_UP };
- dev->data->dev_link.link_status = ETH_LINK_DOWN;
+ dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
return tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_ONLY);
}
struct pmd_internals *pmd = dev->data->dev_private;
struct ifreq ifr = { .ifr_flags = IFF_UP };
- dev->data->dev_link.link_status = ETH_LINK_UP;
+ dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
return tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
}
uint32_t speed = pmd_link.link_speed;
uint32_t capa = 0;
- if (speed >= ETH_SPEED_NUM_10M)
- capa |= ETH_LINK_SPEED_10M;
- if (speed >= ETH_SPEED_NUM_100M)
- capa |= ETH_LINK_SPEED_100M;
- if (speed >= ETH_SPEED_NUM_1G)
- capa |= ETH_LINK_SPEED_1G;
- if (speed >= ETH_SPEED_NUM_5G)
- capa |= ETH_LINK_SPEED_2_5G;
- if (speed >= ETH_SPEED_NUM_5G)
- capa |= ETH_LINK_SPEED_5G;
- if (speed >= ETH_SPEED_NUM_10G)
- capa |= ETH_LINK_SPEED_10G;
- if (speed >= ETH_SPEED_NUM_20G)
- capa |= ETH_LINK_SPEED_20G;
- if (speed >= ETH_SPEED_NUM_25G)
- capa |= ETH_LINK_SPEED_25G;
- if (speed >= ETH_SPEED_NUM_40G)
- capa |= ETH_LINK_SPEED_40G;
- if (speed >= ETH_SPEED_NUM_50G)
- capa |= ETH_LINK_SPEED_50G;
- if (speed >= ETH_SPEED_NUM_56G)
- capa |= ETH_LINK_SPEED_56G;
- if (speed >= ETH_SPEED_NUM_100G)
- capa |= ETH_LINK_SPEED_100G;
+ if (speed >= RTE_ETH_SPEED_NUM_10M)
+ capa |= RTE_ETH_LINK_SPEED_10M;
+ if (speed >= RTE_ETH_SPEED_NUM_100M)
+ capa |= RTE_ETH_LINK_SPEED_100M;
+ if (speed >= RTE_ETH_SPEED_NUM_1G)
+ capa |= RTE_ETH_LINK_SPEED_1G;
+ if (speed >= RTE_ETH_SPEED_NUM_5G)
+ capa |= RTE_ETH_LINK_SPEED_2_5G;
+ if (speed >= RTE_ETH_SPEED_NUM_5G)
+ capa |= RTE_ETH_LINK_SPEED_5G;
+ if (speed >= RTE_ETH_SPEED_NUM_10G)
+ capa |= RTE_ETH_LINK_SPEED_10G;
+ if (speed >= RTE_ETH_SPEED_NUM_20G)
+ capa |= RTE_ETH_LINK_SPEED_20G;
+ if (speed >= RTE_ETH_SPEED_NUM_25G)
+ capa |= RTE_ETH_LINK_SPEED_25G;
+ if (speed >= RTE_ETH_SPEED_NUM_40G)
+ capa |= RTE_ETH_LINK_SPEED_40G;
+ if (speed >= RTE_ETH_SPEED_NUM_50G)
+ capa |= RTE_ETH_LINK_SPEED_50G;
+ if (speed >= RTE_ETH_SPEED_NUM_56G)
+ capa |= RTE_ETH_LINK_SPEED_56G;
+ if (speed >= RTE_ETH_SPEED_NUM_100G)
+ capa |= RTE_ETH_LINK_SPEED_100G;
return capa;
}
tap_ioctl(pmd, SIOCGIFFLAGS, &ifr, 0, REMOTE_ONLY);
if (!(ifr.ifr_flags & IFF_UP) ||
!(ifr.ifr_flags & IFF_RUNNING)) {
- dev_link->link_status = ETH_LINK_DOWN;
+ dev_link->link_status = RTE_ETH_LINK_DOWN;
return 0;
}
}
tap_ioctl(pmd, SIOCGIFFLAGS, &ifr, 0, LOCAL_ONLY);
dev_link->link_status =
((ifr.ifr_flags & IFF_UP) && (ifr.ifr_flags & IFF_RUNNING) ?
- ETH_LINK_UP :
- ETH_LINK_DOWN);
+ RTE_ETH_LINK_UP :
+ RTE_ETH_LINK_DOWN);
return 0;
}
int ret;
/* initialize GSO context */
- gso_types = DEV_TX_OFFLOAD_TCP_TSO;
+ gso_types = RTE_ETH_TX_OFFLOAD_TCP_TSO;
if (!pmd->gso_ctx_mp) {
/*
* Create private mbuf pool with TAP_GSO_MBUF_SEG_SIZE
offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
txq->csum = !!(offloads &
- (DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM));
+ (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM));
ret = tap_setup_queue(dev, internals, tx_queue_id, 0);
if (ret == -1)
tap_flow_ctrl_get(struct rte_eth_dev *dev __rte_unused,
struct rte_eth_fc_conf *fc_conf)
{
- fc_conf->mode = RTE_FC_NONE;
+ fc_conf->mode = RTE_ETH_FC_NONE;
return 0;
}
tap_flow_ctrl_set(struct rte_eth_dev *dev __rte_unused,
struct rte_eth_fc_conf *fc_conf)
{
- if (fc_conf->mode != RTE_FC_NONE)
+ if (fc_conf->mode != RTE_ETH_FC_NONE)
return -ENOTSUP;
return 0;
}
}
}
}
- pmd_link.link_speed = ETH_SPEED_NUM_10G;
+ pmd_link.link_speed = RTE_ETH_SPEED_NUM_10G;
TAP_LOG(DEBUG, "Initializing pmd_tun for %s", name);
return 0;
}
- speed = ETH_SPEED_NUM_10G;
+ speed = RTE_ETH_SPEED_NUM_10G;
/* use tap%d which causes kernel to choose next available */
strlcpy(tap_name, DEFAULT_TAP_NAME "%d", RTE_ETH_NAME_MAX_LEN);
#define TAP_RSS_HASH_KEY_SIZE 40
/* Supported RSS */
-#define TAP_RSS_HF_MASK (~(ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP))
+#define TAP_RSS_HF_MASK (~(RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP | RTE_ETH_RSS_TCP))
/* hashed fields for RSS */
enum hash_field {
{
memset(link, 0, sizeof(*link));
- link->link_status = nic->link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
+ link->link_status = nic->link_up ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
if (nic->duplex == NICVF_HALF_DUPLEX)
- link->link_duplex = ETH_LINK_HALF_DUPLEX;
+ link->link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
else if (nic->duplex == NICVF_FULL_DUPLEX)
- link->link_duplex = ETH_LINK_FULL_DUPLEX;
+ link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
link->link_speed = nic->speed;
- link->link_autoneg = ETH_LINK_AUTONEG;
+ link->link_autoneg = RTE_ETH_LINK_AUTONEG;
}
static void
/* rte_eth_link_get() might need to wait up to 9 seconds */
for (i = 0; i < MAX_CHECK_TIME; i++) {
nicvf_link_status_update(nic, &link);
- if (link.link_status == ETH_LINK_UP)
+ if (link.link_status == RTE_ETH_LINK_UP)
break;
rte_delay_ms(CHECK_INTERVAL);
}
{
uint64_t nic_rss = 0;
- if (ethdev_rss & ETH_RSS_IPV4)
+ if (ethdev_rss & RTE_ETH_RSS_IPV4)
nic_rss |= RSS_IP_ENA;
- if (ethdev_rss & ETH_RSS_IPV6)
+ if (ethdev_rss & RTE_ETH_RSS_IPV6)
nic_rss |= RSS_IP_ENA;
- if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_UDP)
+ if (ethdev_rss & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA);
- if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_TCP)
+ if (ethdev_rss & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA);
- if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_UDP)
+ if (ethdev_rss & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA);
- if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_TCP)
+ if (ethdev_rss & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA);
- if (ethdev_rss & ETH_RSS_PORT)
+ if (ethdev_rss & RTE_ETH_RSS_PORT)
nic_rss |= RSS_L2_EXTENDED_HASH_ENA;
if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
- if (ethdev_rss & ETH_RSS_VXLAN)
+ if (ethdev_rss & RTE_ETH_RSS_VXLAN)
nic_rss |= RSS_TUN_VXLAN_ENA;
- if (ethdev_rss & ETH_RSS_GENEVE)
+ if (ethdev_rss & RTE_ETH_RSS_GENEVE)
nic_rss |= RSS_TUN_GENEVE_ENA;
- if (ethdev_rss & ETH_RSS_NVGRE)
+ if (ethdev_rss & RTE_ETH_RSS_NVGRE)
nic_rss |= RSS_TUN_NVGRE_ENA;
}
uint64_t ethdev_rss = 0;
if (nic_rss & RSS_IP_ENA)
- ethdev_rss |= (ETH_RSS_IPV4 | ETH_RSS_IPV6);
+ ethdev_rss |= (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6);
if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_TCP_ENA))
- ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_TCP |
- ETH_RSS_NONFRAG_IPV6_TCP);
+ ethdev_rss |= (RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP);
if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_UDP_ENA))
- ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_UDP |
- ETH_RSS_NONFRAG_IPV6_UDP);
+ ethdev_rss |= (RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP);
if (nic_rss & RSS_L2_EXTENDED_HASH_ENA)
- ethdev_rss |= ETH_RSS_PORT;
+ ethdev_rss |= RTE_ETH_RSS_PORT;
if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
if (nic_rss & RSS_TUN_VXLAN_ENA)
- ethdev_rss |= ETH_RSS_VXLAN;
+ ethdev_rss |= RTE_ETH_RSS_VXLAN;
if (nic_rss & RSS_TUN_GENEVE_ENA)
- ethdev_rss |= ETH_RSS_GENEVE;
+ ethdev_rss |= RTE_ETH_RSS_GENEVE;
if (nic_rss & RSS_TUN_NVGRE_ENA)
- ethdev_rss |= ETH_RSS_NVGRE;
+ ethdev_rss |= RTE_ETH_RSS_NVGRE;
}
return ethdev_rss;
}
return ret;
/* Copy RETA table */
- for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) {
- for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+ for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_ETH_RETA_GROUP_SIZE); i++) {
+ for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
if ((reta_conf[i].mask >> j) & 0x01)
reta_conf[i].reta[j] = tbl[j];
}
return ret;
/* Copy RETA table */
- for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) {
- for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+ for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_ETH_RETA_GROUP_SIZE); i++) {
+ for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
if ((reta_conf[i].mask >> j) & 0x01)
tbl[j] = reta_conf[i].reta[j];
}
dev->data->nb_rx_queues,
dev->data->dev_conf.lpbk_mode, rsshf);
- if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE)
+ if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_NONE)
ret = nicvf_rss_term(nic);
- else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
+ else if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS)
ret = nicvf_rss_config(nic, dev->data->nb_rx_queues, rsshf);
if (ret)
PMD_INIT_LOG(ERR, "Failed to configure RSS %d", ret);
for (i = 0; i < dev->data->nb_tx_queues; i++) {
txq = dev->data->tx_queues[i];
- if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS) {
+ if (txq->offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS) {
multiseg = true;
break;
}
offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
txq->offloads = offloads;
- is_single_pool = !!(offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE);
+ is_single_pool = !!(offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE);
/* Choose optimum free threshold value for multipool case */
if (!is_single_pool) {
PMD_INIT_FUNC_TRACE();
/* Autonegotiation may be disabled */
- dev_info->speed_capa = ETH_LINK_SPEED_FIXED;
- dev_info->speed_capa |= ETH_LINK_SPEED_10M | ETH_LINK_SPEED_100M |
- ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
+ dev_info->speed_capa = RTE_ETH_LINK_SPEED_FIXED;
+ dev_info->speed_capa |= RTE_ETH_LINK_SPEED_10M | RTE_ETH_LINK_SPEED_100M |
+ RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G;
if (nicvf_hw_version(nic) != PCI_SUB_DEVICE_ID_CN81XX_NICVF)
- dev_info->speed_capa |= ETH_LINK_SPEED_40G;
+ dev_info->speed_capa |= RTE_ETH_LINK_SPEED_40G;
dev_info->min_rx_bufsize = RTE_ETHER_MIN_MTU;
dev_info->max_rx_pktlen = NIC_HW_MAX_MTU + RTE_ETHER_HDR_LEN;
dev_info->default_txconf = (struct rte_eth_txconf) {
.tx_free_thresh = NICVF_DEFAULT_TX_FREE_THRESH,
- .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE |
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM,
+ .offloads = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE |
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM,
};
return 0;
nic->rbdr->tail, nb_rbdr_desc, nic->vf_id);
/* Configure VLAN Strip */
- mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
- ETH_VLAN_EXTEND_MASK;
+ mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+ RTE_ETH_VLAN_EXTEND_MASK;
ret = nicvf_vlan_offload_config(dev, mask);
/* Based on the packet type(IPv4 or IPv6), the nicvf HW aligns L3 data
/* Setup scatter mode if needed by jumbo */
if (dev->data->mtu + (uint32_t)NIC_HW_L2_OVERHEAD + 2 * VLAN_TAG_SIZE > buffsz)
dev->data->scattered_rx = 1;
- if ((rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) != 0)
+ if ((rx_conf->offloads & RTE_ETH_RX_OFFLOAD_SCATTER) != 0)
dev->data->scattered_rx = 1;
/* Setup MTU */
PMD_INIT_FUNC_TRACE();
- if (rxmode->mq_mode & ETH_MQ_RX_RSS_FLAG)
- rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+ if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+ rxmode->offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
if (!rte_eal_has_hugepages()) {
PMD_INIT_LOG(INFO, "Huge page is not configured");
return -EINVAL;
}
- if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
- rxmode->mq_mode != ETH_MQ_RX_RSS) {
+ if (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE &&
+ rxmode->mq_mode != RTE_ETH_MQ_RX_RSS) {
PMD_INIT_LOG(INFO, "Unsupported rx qmode %d", rxmode->mq_mode);
return -EINVAL;
}
return -EINVAL;
}
- if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
+ if (conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
PMD_INIT_LOG(INFO, "Setting link speed/duplex not supported");
return -EINVAL;
}
}
}
- if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
nic->offload_cksum = 1;
PMD_INIT_LOG(DEBUG, "Configured ethdev port%d hwcap=0x%" PRIx64,
struct rte_eth_rxmode *rxmode;
struct nicvf *nic = nicvf_pmd_priv(dev);
rxmode = &dev->data->dev_conf.rxmode;
- if (mask & ETH_VLAN_STRIP_MASK) {
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
nicvf_vlan_hw_strip(nic, true);
else
nicvf_vlan_hw_strip(nic, false);
#define NICVF_UNKNOWN_DUPLEX 0xff
#define NICVF_RSS_OFFLOAD_PASS1 ( \
- ETH_RSS_PORT | \
- ETH_RSS_IPV4 | \
- ETH_RSS_NONFRAG_IPV4_TCP | \
- ETH_RSS_NONFRAG_IPV4_UDP | \
- ETH_RSS_IPV6 | \
- ETH_RSS_NONFRAG_IPV6_TCP | \
- ETH_RSS_NONFRAG_IPV6_UDP)
+ RTE_ETH_RSS_PORT | \
+ RTE_ETH_RSS_IPV4 | \
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+ RTE_ETH_RSS_IPV6 | \
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP)
#define NICVF_RSS_OFFLOAD_TUNNEL ( \
- ETH_RSS_VXLAN | \
- ETH_RSS_GENEVE | \
- ETH_RSS_NVGRE)
+ RTE_ETH_RSS_VXLAN | \
+ RTE_ETH_RSS_GENEVE | \
+ RTE_ETH_RSS_NVGRE)
#define NICVF_TX_OFFLOAD_CAPA ( \
- DEV_TX_OFFLOAD_IPV4_CKSUM | \
- DEV_TX_OFFLOAD_UDP_CKSUM | \
- DEV_TX_OFFLOAD_TCP_CKSUM | \
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
- DEV_TX_OFFLOAD_MBUF_FAST_FREE | \
- DEV_TX_OFFLOAD_MULTI_SEGS)
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE | \
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
#define NICVF_RX_OFFLOAD_CAPA ( \
- DEV_RX_OFFLOAD_CHECKSUM | \
- DEV_RX_OFFLOAD_VLAN_STRIP | \
- DEV_RX_OFFLOAD_SCATTER | \
- DEV_RX_OFFLOAD_RSS_HASH)
+ RTE_ETH_RX_OFFLOAD_CHECKSUM | \
+ RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \
+ RTE_ETH_RX_OFFLOAD_SCATTER | \
+ RTE_ETH_RX_OFFLOAD_RSS_HASH)
#define NICVF_DEFAULT_RX_FREE_THRESH 224
#define NICVF_DEFAULT_TX_FREE_THRESH 224
rxbal = rd32(hw, TXGBE_RXBAL(rxq->reg_idx));
rxbah = rd32(hw, TXGBE_RXBAH(rxq->reg_idx));
rxcfg = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
- if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+ if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
restart = (rxcfg & TXGBE_RXCFG_ENA) &&
!(rxcfg & TXGBE_RXCFG_VLAN);
rxcfg |= TXGBE_RXCFG_VLAN;
vlan_ext = (portctrl & TXGBE_PORTCTL_VLANEXT);
qinq = vlan_ext && (portctrl & TXGBE_PORTCTL_QINQ);
switch (vlan_type) {
- case ETH_VLAN_TYPE_INNER:
+ case RTE_ETH_VLAN_TYPE_INNER:
if (vlan_ext) {
wr32m(hw, TXGBE_VLANCTL,
TXGBE_VLANCTL_TPID_MASK,
TXGBE_TAGTPID_LSB(tpid));
}
break;
- case ETH_VLAN_TYPE_OUTER:
+ case RTE_ETH_VLAN_TYPE_OUTER:
if (vlan_ext) {
/* Only the high 16-bits is valid */
wr32m(hw, TXGBE_EXTAG,
if (on) {
rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
- rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
} else {
rxq->vlan_flags = PKT_RX_VLAN;
- rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+ rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
}
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
- if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
txgbe_vlan_strip_queue_set(dev, i, 1);
else
txgbe_vlan_strip_queue_set(dev, i, 0);
struct rte_eth_rxmode *rxmode;
struct txgbe_rx_queue *rxq;
- if (mask & ETH_VLAN_STRIP_MASK) {
+ if (mask & RTE_ETH_VLAN_STRIP_MASK) {
rxmode = &dev->data->dev_conf.rxmode;
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
- rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
}
else
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
- rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+ rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
}
}
}
struct rte_eth_rxmode *rxmode;
rxmode = &dev->data->dev_conf.rxmode;
- if (mask & ETH_VLAN_STRIP_MASK)
+ if (mask & RTE_ETH_VLAN_STRIP_MASK)
txgbe_vlan_hw_strip_config(dev);
- if (mask & ETH_VLAN_FILTER_MASK) {
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+ if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
txgbe_vlan_hw_filter_enable(dev);
else
txgbe_vlan_hw_filter_disable(dev);
}
- if (mask & ETH_VLAN_EXTEND_MASK) {
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+ if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
txgbe_vlan_hw_extend_enable(dev);
else
txgbe_vlan_hw_extend_disable(dev);
}
- if (mask & ETH_QINQ_STRIP_MASK) {
- if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
+ if (mask & RTE_ETH_QINQ_STRIP_MASK) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
txgbe_qinq_hw_strip_enable(dev);
else
txgbe_qinq_hw_strip_disable(dev);
switch (nb_rx_q) {
case 1:
case 2:
- RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
+ RTE_ETH_DEV_SRIOV(dev).active = RTE_ETH_64_POOLS;
break;
case 4:
- RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
+ RTE_ETH_DEV_SRIOV(dev).active = RTE_ETH_32_POOLS;
break;
default:
return -EINVAL;
if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
/* check multi-queue mode */
switch (dev_conf->rxmode.mq_mode) {
- case ETH_MQ_RX_VMDQ_DCB:
- PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
+ case RTE_ETH_MQ_RX_VMDQ_DCB:
+ PMD_INIT_LOG(INFO, "RTE_ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
break;
- case ETH_MQ_RX_VMDQ_DCB_RSS:
+ case RTE_ETH_MQ_RX_VMDQ_DCB_RSS:
/* DCB/RSS VMDQ in SRIOV mode, not implement yet */
PMD_INIT_LOG(ERR, "SRIOV active,"
" unsupported mq_mode rx %d.",
dev_conf->rxmode.mq_mode);
return -EINVAL;
- case ETH_MQ_RX_RSS:
- case ETH_MQ_RX_VMDQ_RSS:
- dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
+ case RTE_ETH_MQ_RX_RSS:
+ case RTE_ETH_MQ_RX_VMDQ_RSS:
+ dev->data->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_RSS;
if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
if (txgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
PMD_INIT_LOG(ERR, "SRIOV is active,"
return -EINVAL;
}
break;
- case ETH_MQ_RX_VMDQ_ONLY:
- case ETH_MQ_RX_NONE:
+ case RTE_ETH_MQ_RX_VMDQ_ONLY:
+ case RTE_ETH_MQ_RX_NONE:
/* if nothing mq mode configure, use default scheme */
dev->data->dev_conf.rxmode.mq_mode =
- ETH_MQ_RX_VMDQ_ONLY;
+ RTE_ETH_MQ_RX_VMDQ_ONLY;
break;
- default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
+ default: /* RTE_ETH_MQ_RX_DCB, RTE_ETH_MQ_RX_DCB_RSS or RTE_ETH_MQ_TX_DCB*/
/* SRIOV only works in VMDq enable mode */
PMD_INIT_LOG(ERR, "SRIOV is active,"
" wrong mq_mode rx %d.",
}
switch (dev_conf->txmode.mq_mode) {
- case ETH_MQ_TX_VMDQ_DCB:
- PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
- dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
+ case RTE_ETH_MQ_TX_VMDQ_DCB:
+ PMD_INIT_LOG(INFO, "RTE_ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
+ dev->data->dev_conf.txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB;
break;
- default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
+ default: /* RTE_ETH_MQ_TX_VMDQ_ONLY or RTE_ETH_MQ_TX_NONE */
dev->data->dev_conf.txmode.mq_mode =
- ETH_MQ_TX_VMDQ_ONLY;
+ RTE_ETH_MQ_TX_VMDQ_ONLY;
break;
}
return -EINVAL;
}
} else {
- if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
+ if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB_RSS) {
PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
" not supported.");
return -EINVAL;
}
/* check configuration for vmdb+dcb mode */
- if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
+ if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB) {
const struct rte_eth_vmdq_dcb_conf *conf;
if (nb_rx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
return -EINVAL;
}
conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
- if (!(conf->nb_queue_pools == ETH_16_POOLS ||
- conf->nb_queue_pools == ETH_32_POOLS)) {
+ if (!(conf->nb_queue_pools == RTE_ETH_16_POOLS ||
+ conf->nb_queue_pools == RTE_ETH_32_POOLS)) {
PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
" nb_queue_pools must be %d or %d.",
- ETH_16_POOLS, ETH_32_POOLS);
+ RTE_ETH_16_POOLS, RTE_ETH_32_POOLS);
return -EINVAL;
}
}
- if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+ if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) {
const struct rte_eth_vmdq_dcb_tx_conf *conf;
if (nb_tx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
return -EINVAL;
}
conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
- if (!(conf->nb_queue_pools == ETH_16_POOLS ||
- conf->nb_queue_pools == ETH_32_POOLS)) {
+ if (!(conf->nb_queue_pools == RTE_ETH_16_POOLS ||
+ conf->nb_queue_pools == RTE_ETH_32_POOLS)) {
PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
" nb_queue_pools != %d and"
" nb_queue_pools != %d.",
- ETH_16_POOLS, ETH_32_POOLS);
+ RTE_ETH_16_POOLS, RTE_ETH_32_POOLS);
return -EINVAL;
}
}
/* For DCB mode check our configuration before we go further */
- if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
+ if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_DCB) {
const struct rte_eth_dcb_rx_conf *conf;
conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
- if (!(conf->nb_tcs == ETH_4_TCS ||
- conf->nb_tcs == ETH_8_TCS)) {
+ if (!(conf->nb_tcs == RTE_ETH_4_TCS ||
+ conf->nb_tcs == RTE_ETH_8_TCS)) {
PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
" and nb_tcs != %d.",
- ETH_4_TCS, ETH_8_TCS);
+ RTE_ETH_4_TCS, RTE_ETH_8_TCS);
return -EINVAL;
}
}
- if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+ if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_DCB) {
const struct rte_eth_dcb_tx_conf *conf;
conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
- if (!(conf->nb_tcs == ETH_4_TCS ||
- conf->nb_tcs == ETH_8_TCS)) {
+ if (!(conf->nb_tcs == RTE_ETH_4_TCS ||
+ conf->nb_tcs == RTE_ETH_8_TCS)) {
PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
" and nb_tcs != %d.",
- ETH_4_TCS, ETH_8_TCS);
+ RTE_ETH_4_TCS, RTE_ETH_8_TCS);
return -EINVAL;
}
}
PMD_INIT_FUNC_TRACE();
- if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
- dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+ if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+ dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
/* multiple queue mode checking */
ret = txgbe_check_mq_mode(dev);
goto error;
}
- mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
- ETH_VLAN_EXTEND_MASK;
+ mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+ RTE_ETH_VLAN_EXTEND_MASK;
err = txgbe_vlan_offload_config(dev, mask);
if (err) {
PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
goto error;
}
- if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
+ if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_ONLY) {
/* Enable vlan filtering for VMDq */
txgbe_vmdq_vlan_hw_filter_enable(dev);
}
if (err)
goto error;
- allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
- ETH_LINK_SPEED_10G;
+ allowed_speeds = RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G |
+ RTE_ETH_LINK_SPEED_10G;
link_speeds = &dev->data->dev_conf.link_speeds;
if (((*link_speeds) >> 1) & ~(allowed_speeds >> 1)) {
}
speed = 0x0;
- if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
+ if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
speed = (TXGBE_LINK_SPEED_100M_FULL |
TXGBE_LINK_SPEED_1GB_FULL |
TXGBE_LINK_SPEED_10GB_FULL);
} else {
- if (*link_speeds & ETH_LINK_SPEED_10G)
+ if (*link_speeds & RTE_ETH_LINK_SPEED_10G)
speed |= TXGBE_LINK_SPEED_10GB_FULL;
- if (*link_speeds & ETH_LINK_SPEED_5G)
+ if (*link_speeds & RTE_ETH_LINK_SPEED_5G)
speed |= TXGBE_LINK_SPEED_5GB_FULL;
- if (*link_speeds & ETH_LINK_SPEED_2_5G)
+ if (*link_speeds & RTE_ETH_LINK_SPEED_2_5G)
speed |= TXGBE_LINK_SPEED_2_5GB_FULL;
- if (*link_speeds & ETH_LINK_SPEED_1G)
+ if (*link_speeds & RTE_ETH_LINK_SPEED_1G)
speed |= TXGBE_LINK_SPEED_1GB_FULL;
- if (*link_speeds & ETH_LINK_SPEED_100M)
+ if (*link_speeds & RTE_ETH_LINK_SPEED_100M)
speed |= TXGBE_LINK_SPEED_100M_FULL;
}
dev_info->max_mac_addrs = hw->mac.num_rar_entries;
dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
dev_info->max_vfs = pci_dev->max_vfs;
- dev_info->max_vmdq_pools = ETH_64_POOLS;
+ dev_info->max_vmdq_pools = RTE_ETH_64_POOLS;
dev_info->vmdq_queue_num = dev_info->max_rx_queues;
dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
dev_info->tx_desc_lim = tx_desc_lim;
dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
- dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
+ dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL;
- dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
- dev_info->speed_capa |= ETH_LINK_SPEED_100M;
+ dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G;
+ dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100M;
/* Driver-preferred Rx/Tx parameters */
dev_info->default_rxportconf.burst_size = 32;
int wait = 1;
memset(&link, 0, sizeof(link));
- link.link_status = ETH_LINK_DOWN;
- link.link_speed = ETH_SPEED_NUM_NONE;
- link.link_duplex = ETH_LINK_HALF_DUPLEX;
+ link.link_status = RTE_ETH_LINK_DOWN;
+ link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+ link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
link.link_autoneg = !(dev->data->dev_conf.link_speeds &
- ETH_LINK_SPEED_FIXED);
+ RTE_ETH_LINK_AUTONEG);
hw->mac.get_link_status = true;
err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
if (err != 0) {
- link.link_speed = ETH_SPEED_NUM_100M;
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_speed = RTE_ETH_SPEED_NUM_100M;
+ link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
return rte_eth_linkstatus_set(dev, &link);
}
}
intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
- link.link_status = ETH_LINK_UP;
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_status = RTE_ETH_LINK_UP;
+ link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
switch (link_speed) {
default:
case TXGBE_LINK_SPEED_UNKNOWN:
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
- link.link_speed = ETH_SPEED_NUM_100M;
+ link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+ link.link_speed = RTE_ETH_SPEED_NUM_100M;
break;
case TXGBE_LINK_SPEED_100M_FULL:
- link.link_speed = ETH_SPEED_NUM_100M;
+ link.link_speed = RTE_ETH_SPEED_NUM_100M;
break;
case TXGBE_LINK_SPEED_1GB_FULL:
- link.link_speed = ETH_SPEED_NUM_1G;
+ link.link_speed = RTE_ETH_SPEED_NUM_1G;
break;
case TXGBE_LINK_SPEED_2_5GB_FULL:
- link.link_speed = ETH_SPEED_NUM_2_5G;
+ link.link_speed = RTE_ETH_SPEED_NUM_2_5G;
break;
case TXGBE_LINK_SPEED_5GB_FULL:
- link.link_speed = ETH_SPEED_NUM_5G;
+ link.link_speed = RTE_ETH_SPEED_NUM_5G;
break;
case TXGBE_LINK_SPEED_10GB_FULL:
- link.link_speed = ETH_SPEED_NUM_10G;
+ link.link_speed = RTE_ETH_SPEED_NUM_10G;
break;
}
PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
(int)(dev->data->port_id),
(unsigned int)link.link_speed,
- link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+ link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
"full-duplex" : "half-duplex");
} else {
PMD_INIT_LOG(INFO, " Port %d: Link Down",
tx_pause = 0;
if (rx_pause && tx_pause)
- fc_conf->mode = RTE_FC_FULL;
+ fc_conf->mode = RTE_ETH_FC_FULL;
else if (rx_pause)
- fc_conf->mode = RTE_FC_RX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
else if (tx_pause)
- fc_conf->mode = RTE_FC_TX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
else
- fc_conf->mode = RTE_FC_NONE;
+ fc_conf->mode = RTE_ETH_FC_NONE;
return 0;
}
return -ENOTSUP;
}
- if (reta_size != ETH_RSS_RETA_SIZE_128) {
+ if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
"(%d) doesn't match the number hardware can supported "
- "(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
+ "(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
return -EINVAL;
}
for (i = 0; i < reta_size; i += 4) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
if (!mask)
continue;
PMD_INIT_FUNC_TRACE();
- if (reta_size != ETH_RSS_RETA_SIZE_128) {
+ if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
"(%d) doesn't match the number hardware can supported "
- "(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
+ "(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
return -EINVAL;
}
for (i = 0; i < reta_size; i += 4) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
if (!mask)
continue;
return -ENOTSUP;
if (on) {
- for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
+ for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
uta_info->uta_shadow[i] = ~0;
wr32(hw, TXGBE_UCADDRTBL(i), ~0);
}
} else {
- for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
+ for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
uta_info->uta_shadow[i] = 0;
wr32(hw, TXGBE_UCADDRTBL(i), 0);
}
{
uint32_t new_val = orig_val;
- if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
+ if (rx_mask & RTE_ETH_VMDQ_ACCEPT_UNTAG)
new_val |= TXGBE_POOLETHCTL_UTA;
- if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
+ if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_MC)
new_val |= TXGBE_POOLETHCTL_MCHA;
- if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
+ if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_UC)
new_val |= TXGBE_POOLETHCTL_UCHA;
- if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
+ if (rx_mask & RTE_ETH_VMDQ_ACCEPT_BROADCAST)
new_val |= TXGBE_POOLETHCTL_BCA;
- if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
+ if (rx_mask & RTE_ETH_VMDQ_ACCEPT_MULTICAST)
new_val |= TXGBE_POOLETHCTL_MCP;
return new_val;
rte_eth_linkstatus_get(dev, &link);
switch (link.link_speed) {
- case ETH_SPEED_NUM_100M:
+ case RTE_ETH_SPEED_NUM_100M:
incval = TXGBE_INCVAL_100;
shift = TXGBE_INCVAL_SHIFT_100;
break;
- case ETH_SPEED_NUM_1G:
+ case RTE_ETH_SPEED_NUM_1G:
incval = TXGBE_INCVAL_1GB;
shift = TXGBE_INCVAL_SHIFT_1GB;
break;
- case ETH_SPEED_NUM_10G:
+ case RTE_ETH_SPEED_NUM_10G:
default:
incval = TXGBE_INCVAL_10GB;
shift = TXGBE_INCVAL_SHIFT_10GB;
uint8_t nb_tcs;
uint8_t i, j;
- if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
+ if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_DCB_FLAG)
dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
else
dcb_info->nb_tcs = 1;
if (dcb_config->vt_mode) { /* vt is enabled */
struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
&dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
- for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+ for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
if (RTE_ETH_DEV_SRIOV(dev).active > 0) {
for (j = 0; j < nb_tcs; j++) {
} else { /* vt is disabled */
struct rte_eth_dcb_rx_conf *rx_conf =
&dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
- for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+ for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
- if (dcb_info->nb_tcs == ETH_4_TCS) {
+ if (dcb_info->nb_tcs == RTE_ETH_4_TCS) {
for (i = 0; i < dcb_info->nb_tcs; i++) {
dcb_info->tc_queue.tc_rxq[0][i].base = i * 32;
dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
- } else if (dcb_info->nb_tcs == ETH_8_TCS) {
+ } else if (dcb_info->nb_tcs == RTE_ETH_8_TCS) {
for (i = 0; i < dcb_info->nb_tcs; i++) {
dcb_info->tc_queue.tc_rxq[0][i].base = i * 16;
dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
}
switch (l2_tunnel->l2_tunnel_type) {
- case RTE_L2_TUNNEL_TYPE_E_TAG:
+ case RTE_ETH_L2_TUNNEL_TYPE_E_TAG:
ret = txgbe_e_tag_filter_add(dev, l2_tunnel);
break;
default:
return ret;
switch (l2_tunnel->l2_tunnel_type) {
- case RTE_L2_TUNNEL_TYPE_E_TAG:
+ case RTE_ETH_L2_TUNNEL_TYPE_E_TAG:
ret = txgbe_e_tag_filter_del(dev, l2_tunnel);
break;
default:
return -EINVAL;
switch (udp_tunnel->prot_type) {
- case RTE_TUNNEL_TYPE_VXLAN:
+ case RTE_ETH_TUNNEL_TYPE_VXLAN:
if (udp_tunnel->udp_port == 0) {
PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed.");
ret = -EINVAL;
}
wr32(hw, TXGBE_VXLANPORT, udp_tunnel->udp_port);
break;
- case RTE_TUNNEL_TYPE_GENEVE:
+ case RTE_ETH_TUNNEL_TYPE_GENEVE:
if (udp_tunnel->udp_port == 0) {
PMD_DRV_LOG(ERR, "Add Geneve port 0 is not allowed.");
ret = -EINVAL;
}
wr32(hw, TXGBE_GENEVEPORT, udp_tunnel->udp_port);
break;
- case RTE_TUNNEL_TYPE_TEREDO:
+ case RTE_ETH_TUNNEL_TYPE_TEREDO:
if (udp_tunnel->udp_port == 0) {
PMD_DRV_LOG(ERR, "Add Teredo port 0 is not allowed.");
ret = -EINVAL;
}
wr32(hw, TXGBE_TEREDOPORT, udp_tunnel->udp_port);
break;
- case RTE_TUNNEL_TYPE_VXLAN_GPE:
+ case RTE_ETH_TUNNEL_TYPE_VXLAN_GPE:
if (udp_tunnel->udp_port == 0) {
PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed.");
ret = -EINVAL;
return -EINVAL;
switch (udp_tunnel->prot_type) {
- case RTE_TUNNEL_TYPE_VXLAN:
+ case RTE_ETH_TUNNEL_TYPE_VXLAN:
cur_port = (uint16_t)rd32(hw, TXGBE_VXLANPORT);
if (cur_port != udp_tunnel->udp_port) {
PMD_DRV_LOG(ERR, "Port %u does not exist.",
}
wr32(hw, TXGBE_VXLANPORT, 0);
break;
- case RTE_TUNNEL_TYPE_GENEVE:
+ case RTE_ETH_TUNNEL_TYPE_GENEVE:
cur_port = (uint16_t)rd32(hw, TXGBE_GENEVEPORT);
if (cur_port != udp_tunnel->udp_port) {
PMD_DRV_LOG(ERR, "Port %u does not exist.",
}
wr32(hw, TXGBE_GENEVEPORT, 0);
break;
- case RTE_TUNNEL_TYPE_TEREDO:
+ case RTE_ETH_TUNNEL_TYPE_TEREDO:
cur_port = (uint16_t)rd32(hw, TXGBE_TEREDOPORT);
if (cur_port != udp_tunnel->udp_port) {
PMD_DRV_LOG(ERR, "Port %u does not exist.",
}
wr32(hw, TXGBE_TEREDOPORT, 0);
break;
- case RTE_TUNNEL_TYPE_VXLAN_GPE:
+ case RTE_ETH_TUNNEL_TYPE_VXLAN_GPE:
cur_port = (uint16_t)rd32(hw, TXGBE_VXLANPORTGPE);
if (cur_port != udp_tunnel->udp_port) {
PMD_DRV_LOG(ERR, "Port %u does not exist.",
#define TXGBE_ETH_OVERHEAD (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN)
#define TXGBE_RSS_OFFLOAD_ALL ( \
- ETH_RSS_IPV4 | \
- ETH_RSS_NONFRAG_IPV4_TCP | \
- ETH_RSS_NONFRAG_IPV4_UDP | \
- ETH_RSS_IPV6 | \
- ETH_RSS_NONFRAG_IPV6_TCP | \
- ETH_RSS_NONFRAG_IPV6_UDP | \
- ETH_RSS_IPV6_EX | \
- ETH_RSS_IPV6_TCP_EX | \
- ETH_RSS_IPV6_UDP_EX)
+ RTE_ETH_RSS_IPV4 | \
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+ RTE_ETH_RSS_IPV6 | \
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+ RTE_ETH_RSS_IPV6_EX | \
+ RTE_ETH_RSS_IPV6_TCP_EX | \
+ RTE_ETH_RSS_IPV6_UDP_EX)
#define TXGBE_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET
#define TXGBE_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET
dev_info->max_mac_addrs = hw->mac.num_rar_entries;
dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
dev_info->max_vfs = pci_dev->max_vfs;
- dev_info->max_vmdq_pools = ETH_64_POOLS;
+ dev_info->max_vmdq_pools = RTE_ETH_64_POOLS;
dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
dev_info->rx_queue_offload_capa);
dev_info->tx_queue_offload_capa = txgbe_get_tx_queue_offloads(dev);
dev_info->tx_offload_capa = txgbe_get_tx_port_offloads(dev);
dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
- dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
+ dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL;
dev_info->default_rxconf = (struct rte_eth_rxconf) {
PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
dev->data->port_id);
- if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
- dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+ if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+ dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
/*
* VF has no ability to enable/disable HW CRC
* Keep the persistent behavior the same as Host PF
*/
#ifndef RTE_LIBRTE_TXGBE_PF_DISABLE_STRIP_CRC
- if (conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+ if (conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
- conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_KEEP_CRC;
+ conf->rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_KEEP_CRC;
}
#else
- if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)) {
+ if (!(conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)) {
PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
- conf->rxmode.offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
+ conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
}
#endif
txgbevf_set_vfta_all(dev, 1);
/* Set HW strip */
- mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
- ETH_VLAN_EXTEND_MASK;
+ mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+ RTE_ETH_VLAN_EXTEND_MASK;
err = txgbevf_vlan_offload_config(dev, mask);
if (err) {
PMD_INIT_LOG(ERR, "Unable to set VLAN offload (%d)", err);
int on = 0;
/* VF function only support hw strip feature, others are not support */
- if (mask & ETH_VLAN_STRIP_MASK) {
+ if (mask & RTE_ETH_VLAN_STRIP_MASK) {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
- on = !!(rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+ on = !!(rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
txgbevf_vlan_strip_queue_set(dev, i, on);
}
}
* flexbytes matching field, and drop queue (only for perfect matching mode).
*/
static inline int
-configure_fdir_flags(const struct rte_fdir_conf *conf,
+configure_fdir_flags(const struct rte_eth_fdir_conf *conf,
uint32_t *fdirctrl, uint32_t *flex)
{
*fdirctrl = 0;
*flex = 0;
switch (conf->pballoc) {
- case RTE_FDIR_PBALLOC_64K:
+ case RTE_ETH_FDIR_PBALLOC_64K:
/* 8k - 1 signature filters */
*fdirctrl |= TXGBE_FDIRCTL_BUF_64K;
break;
- case RTE_FDIR_PBALLOC_128K:
+ case RTE_ETH_FDIR_PBALLOC_128K:
/* 16k - 1 signature filters */
*fdirctrl |= TXGBE_FDIRCTL_BUF_128K;
break;
- case RTE_FDIR_PBALLOC_256K:
+ case RTE_ETH_FDIR_PBALLOC_256K:
/* 32k - 1 signature filters */
*fdirctrl |= TXGBE_FDIRCTL_BUF_256K;
break;
static uint32_t
atr_compute_perfect_hash(struct txgbe_atr_input *input,
- enum rte_fdir_pballoc_type pballoc)
+ enum rte_eth_fdir_pballoc_type pballoc)
{
uint32_t bucket_hash;
bucket_hash = txgbe_atr_compute_hash(input,
TXGBE_ATR_BUCKET_HASH_KEY);
- if (pballoc == RTE_FDIR_PBALLOC_256K)
+ if (pballoc == RTE_ETH_FDIR_PBALLOC_256K)
bucket_hash &= PERFECT_BUCKET_256KB_HASH_MASK;
- else if (pballoc == RTE_FDIR_PBALLOC_128K)
+ else if (pballoc == RTE_ETH_FDIR_PBALLOC_128K)
bucket_hash &= PERFECT_BUCKET_128KB_HASH_MASK;
else
bucket_hash &= PERFECT_BUCKET_64KB_HASH_MASK;
*/
static uint32_t
atr_compute_signature_hash(struct txgbe_atr_input *input,
- enum rte_fdir_pballoc_type pballoc)
+ enum rte_eth_fdir_pballoc_type pballoc)
{
uint32_t bucket_hash, sig_hash;
bucket_hash = txgbe_atr_compute_hash(input,
TXGBE_ATR_BUCKET_HASH_KEY);
- if (pballoc == RTE_FDIR_PBALLOC_256K)
+ if (pballoc == RTE_ETH_FDIR_PBALLOC_256K)
bucket_hash &= SIG_BUCKET_256KB_HASH_MASK;
- else if (pballoc == RTE_FDIR_PBALLOC_128K)
+ else if (pballoc == RTE_ETH_FDIR_PBALLOC_128K)
bucket_hash &= SIG_BUCKET_128KB_HASH_MASK;
else
bucket_hash &= SIG_BUCKET_64KB_HASH_MASK;
return -rte_errno;
}
- filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
+ filter->l2_tunnel_type = RTE_ETH_L2_TUNNEL_TYPE_E_TAG;
/**
* grp and e_cid_base are bit fields and only use 14 bits.
* e-tag id is taken as little endian by HW.
aead_xform = &conf->crypto_xform->aead;
if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
- if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) {
+ if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
ic_session->op = TXGBE_OP_AUTHENTICATED_DECRYPTION;
} else {
PMD_DRV_LOG(ERR, "IPsec decryption not enabled\n");
return -ENOTSUP;
}
} else {
- if (dev_conf->txmode.offloads & DEV_TX_OFFLOAD_SECURITY) {
+ if (dev_conf->txmode.offloads & RTE_ETH_TX_OFFLOAD_SECURITY) {
ic_session->op = TXGBE_OP_AUTHENTICATED_ENCRYPTION;
} else {
PMD_DRV_LOG(ERR, "IPsec encryption not enabled\n");
tx_offloads = dev->data->dev_conf.txmode.offloads;
/* sanity checks */
- if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
PMD_DRV_LOG(ERR, "RSC and IPsec not supported");
return -1;
}
- if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
PMD_DRV_LOG(ERR, "HW CRC strip needs to be enabled for IPsec");
return -1;
}
reg |= TXGBE_SECRXCTL_CRCSTRIP;
wr32(hw, TXGBE_SECRXCTL, reg);
- if (rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
wr32m(hw, TXGBE_SECRXCTL, TXGBE_SECRXCTL_ODSA, 0);
reg = rd32m(hw, TXGBE_SECRXCTL, TXGBE_SECRXCTL_ODSA);
if (reg != 0) {
return -1;
}
}
- if (tx_offloads & DEV_TX_OFFLOAD_SECURITY) {
+ if (tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY) {
wr32(hw, TXGBE_SECTXCTL, TXGBE_SECTXCTL_STFWD);
reg = rd32(hw, TXGBE_SECTXCTL);
if (reg != TXGBE_SECTXCTL_STFWD) {
memset(uta_info, 0, sizeof(struct txgbe_uta_info));
hw->mac.mc_filter_type = 0;
- if (vf_num >= ETH_32_POOLS) {
+ if (vf_num >= RTE_ETH_32_POOLS) {
nb_queue = 2;
- RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_64_POOLS;
- } else if (vf_num >= ETH_16_POOLS) {
+ RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_64_POOLS;
+ } else if (vf_num >= RTE_ETH_16_POOLS) {
nb_queue = 4;
- RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_32_POOLS;
+ RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_32_POOLS;
} else {
nb_queue = 8;
- RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_16_POOLS;
+ RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_16_POOLS;
}
RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = nb_queue;
gcr_ext &= ~TXGBE_PORTCTL_NUMVT_MASK;
switch (RTE_ETH_DEV_SRIOV(eth_dev).active) {
- case ETH_64_POOLS:
+ case RTE_ETH_64_POOLS:
gcr_ext |= TXGBE_PORTCTL_NUMVT_64;
break;
- case ETH_32_POOLS:
+ case RTE_ETH_32_POOLS:
gcr_ext |= TXGBE_PORTCTL_NUMVT_32;
break;
- case ETH_16_POOLS:
+ case RTE_ETH_16_POOLS:
gcr_ext |= TXGBE_PORTCTL_NUMVT_16;
break;
}
/* Notify VF of number of DCB traffic classes */
eth_conf = ð_dev->data->dev_conf;
switch (eth_conf->txmode.mq_mode) {
- case ETH_MQ_TX_NONE:
- case ETH_MQ_TX_DCB:
+ case RTE_ETH_MQ_TX_NONE:
+ case RTE_ETH_MQ_TX_DCB:
PMD_DRV_LOG(ERR, "PF must work with virtualization for VF %u"
", but its tx mode = %d\n", vf,
eth_conf->txmode.mq_mode);
return -1;
- case ETH_MQ_TX_VMDQ_DCB:
+ case RTE_ETH_MQ_TX_VMDQ_DCB:
vmdq_dcb_tx_conf = ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
switch (vmdq_dcb_tx_conf->nb_queue_pools) {
- case ETH_16_POOLS:
- num_tcs = ETH_8_TCS;
+ case RTE_ETH_16_POOLS:
+ num_tcs = RTE_ETH_8_TCS;
break;
- case ETH_32_POOLS:
- num_tcs = ETH_4_TCS;
+ case RTE_ETH_32_POOLS:
+ num_tcs = RTE_ETH_4_TCS;
break;
default:
return -1;
}
break;
- /* ETH_MQ_TX_VMDQ_ONLY, DCB not enabled */
- case ETH_MQ_TX_VMDQ_ONLY:
+ /* RTE_ETH_MQ_TX_VMDQ_ONLY, DCB not enabled */
+ case RTE_ETH_MQ_TX_VMDQ_ONLY:
hw = TXGBE_DEV_HW(eth_dev);
vmvir = rd32(hw, TXGBE_POOLTAG(vf));
vlana = vmvir & TXGBE_POOLTAG_ACT_MASK;
uint64_t
txgbe_get_rx_queue_offloads(struct rte_eth_dev *dev __rte_unused)
{
- return DEV_RX_OFFLOAD_VLAN_STRIP;
+ return RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
}
uint64_t
struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
struct rte_eth_dev_sriov *sriov = &RTE_ETH_DEV_SRIOV(dev);
- offloads = DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_KEEP_CRC |
- DEV_RX_OFFLOAD_VLAN_FILTER |
- DEV_RX_OFFLOAD_RSS_HASH |
- DEV_RX_OFFLOAD_SCATTER;
+ offloads = RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_KEEP_CRC |
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+ RTE_ETH_RX_OFFLOAD_RSS_HASH |
+ RTE_ETH_RX_OFFLOAD_SCATTER;
if (!txgbe_is_vf(dev))
- offloads |= (DEV_RX_OFFLOAD_VLAN_FILTER |
- DEV_RX_OFFLOAD_QINQ_STRIP |
- DEV_RX_OFFLOAD_VLAN_EXTEND);
+ offloads |= (RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+ RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+ RTE_ETH_RX_OFFLOAD_VLAN_EXTEND);
/*
* RSC is only supported by PF devices in a non-SR-IOV
* mode.
*/
if (hw->mac.type == txgbe_mac_raptor && !sriov->active)
- offloads |= DEV_RX_OFFLOAD_TCP_LRO;
+ offloads |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
if (hw->mac.type == txgbe_mac_raptor)
- offloads |= DEV_RX_OFFLOAD_MACSEC_STRIP;
+ offloads |= RTE_ETH_RX_OFFLOAD_MACSEC_STRIP;
- offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+ offloads |= RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM;
#ifdef RTE_LIB_SECURITY
if (dev->security_ctx)
- offloads |= DEV_RX_OFFLOAD_SECURITY;
+ offloads |= RTE_ETH_RX_OFFLOAD_SECURITY;
#endif
return offloads;
uint64_t tx_offload_capa;
tx_offload_capa =
- DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_SCTP_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO |
- DEV_TX_OFFLOAD_UDP_TSO |
- DEV_TX_OFFLOAD_UDP_TNL_TSO |
- DEV_TX_OFFLOAD_IP_TNL_TSO |
- DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GRE_TNL_TSO |
- DEV_TX_OFFLOAD_IPIP_TNL_TSO |
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
- DEV_TX_OFFLOAD_MULTI_SEGS;
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_TSO |
+ RTE_ETH_TX_OFFLOAD_UDP_TSO |
+ RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
if (!txgbe_is_vf(dev))
- tx_offload_capa |= DEV_TX_OFFLOAD_QINQ_INSERT;
+ tx_offload_capa |= RTE_ETH_TX_OFFLOAD_QINQ_INSERT;
- tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
+ tx_offload_capa |= RTE_ETH_TX_OFFLOAD_MACSEC_INSERT;
- tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
+ tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
#ifdef RTE_LIB_SECURITY
if (dev->security_ctx)
- tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
+ tx_offload_capa |= RTE_ETH_TX_OFFLOAD_SECURITY;
#endif
return tx_offload_capa;
}
txq->tx_deferred_start = tx_conf->tx_deferred_start;
#ifdef RTE_LIB_SECURITY
txq->using_ipsec = !!(dev->data->dev_conf.txmode.offloads &
- DEV_TX_OFFLOAD_SECURITY);
+ RTE_ETH_TX_OFFLOAD_SECURITY);
#endif
/* Modification to set tail pointer for virtual function
rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
rxq->port_id = dev->data->port_id;
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
rxq->crc_len = RTE_ETHER_CRC_LEN;
else
rxq->crc_len = 0;
if (hw->mac.type == txgbe_mac_raptor_vf) {
mrqc = rd32(hw, TXGBE_VFPLCFG);
mrqc &= ~TXGBE_VFPLCFG_RSSMASK;
- if (rss_hf & ETH_RSS_IPV4)
+ if (rss_hf & RTE_ETH_RSS_IPV4)
mrqc |= TXGBE_VFPLCFG_RSSIPV4;
- if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
mrqc |= TXGBE_VFPLCFG_RSSIPV4TCP;
- if (rss_hf & ETH_RSS_IPV6 ||
- rss_hf & ETH_RSS_IPV6_EX)
+ if (rss_hf & RTE_ETH_RSS_IPV6 ||
+ rss_hf & RTE_ETH_RSS_IPV6_EX)
mrqc |= TXGBE_VFPLCFG_RSSIPV6;
- if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP ||
- rss_hf & ETH_RSS_IPV6_TCP_EX)
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP ||
+ rss_hf & RTE_ETH_RSS_IPV6_TCP_EX)
mrqc |= TXGBE_VFPLCFG_RSSIPV6TCP;
- if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
mrqc |= TXGBE_VFPLCFG_RSSIPV4UDP;
- if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP ||
- rss_hf & ETH_RSS_IPV6_UDP_EX)
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP ||
+ rss_hf & RTE_ETH_RSS_IPV6_UDP_EX)
mrqc |= TXGBE_VFPLCFG_RSSIPV6UDP;
if (rss_hf)
} else {
mrqc = rd32(hw, TXGBE_RACTL);
mrqc &= ~TXGBE_RACTL_RSSMASK;
- if (rss_hf & ETH_RSS_IPV4)
+ if (rss_hf & RTE_ETH_RSS_IPV4)
mrqc |= TXGBE_RACTL_RSSIPV4;
- if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
mrqc |= TXGBE_RACTL_RSSIPV4TCP;
- if (rss_hf & ETH_RSS_IPV6 ||
- rss_hf & ETH_RSS_IPV6_EX)
+ if (rss_hf & RTE_ETH_RSS_IPV6 ||
+ rss_hf & RTE_ETH_RSS_IPV6_EX)
mrqc |= TXGBE_RACTL_RSSIPV6;
- if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP ||
- rss_hf & ETH_RSS_IPV6_TCP_EX)
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP ||
+ rss_hf & RTE_ETH_RSS_IPV6_TCP_EX)
mrqc |= TXGBE_RACTL_RSSIPV6TCP;
- if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
mrqc |= TXGBE_RACTL_RSSIPV4UDP;
- if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP ||
- rss_hf & ETH_RSS_IPV6_UDP_EX)
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP ||
+ rss_hf & RTE_ETH_RSS_IPV6_UDP_EX)
mrqc |= TXGBE_RACTL_RSSIPV6UDP;
if (rss_hf)
if (hw->mac.type == txgbe_mac_raptor_vf) {
mrqc = rd32(hw, TXGBE_VFPLCFG);
if (mrqc & TXGBE_VFPLCFG_RSSIPV4)
- rss_hf |= ETH_RSS_IPV4;
+ rss_hf |= RTE_ETH_RSS_IPV4;
if (mrqc & TXGBE_VFPLCFG_RSSIPV4TCP)
- rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
if (mrqc & TXGBE_VFPLCFG_RSSIPV6)
- rss_hf |= ETH_RSS_IPV6 |
- ETH_RSS_IPV6_EX;
+ rss_hf |= RTE_ETH_RSS_IPV6 |
+ RTE_ETH_RSS_IPV6_EX;
if (mrqc & TXGBE_VFPLCFG_RSSIPV6TCP)
- rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP |
- ETH_RSS_IPV6_TCP_EX;
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+ RTE_ETH_RSS_IPV6_TCP_EX;
if (mrqc & TXGBE_VFPLCFG_RSSIPV4UDP)
- rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
if (mrqc & TXGBE_VFPLCFG_RSSIPV6UDP)
- rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP |
- ETH_RSS_IPV6_UDP_EX;
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+ RTE_ETH_RSS_IPV6_UDP_EX;
if (!(mrqc & TXGBE_VFPLCFG_RSSENA))
rss_hf = 0;
} else {
mrqc = rd32(hw, TXGBE_RACTL);
if (mrqc & TXGBE_RACTL_RSSIPV4)
- rss_hf |= ETH_RSS_IPV4;
+ rss_hf |= RTE_ETH_RSS_IPV4;
if (mrqc & TXGBE_RACTL_RSSIPV4TCP)
- rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
if (mrqc & TXGBE_RACTL_RSSIPV6)
- rss_hf |= ETH_RSS_IPV6 |
- ETH_RSS_IPV6_EX;
+ rss_hf |= RTE_ETH_RSS_IPV6 |
+ RTE_ETH_RSS_IPV6_EX;
if (mrqc & TXGBE_RACTL_RSSIPV6TCP)
- rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP |
- ETH_RSS_IPV6_TCP_EX;
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+ RTE_ETH_RSS_IPV6_TCP_EX;
if (mrqc & TXGBE_RACTL_RSSIPV4UDP)
- rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
if (mrqc & TXGBE_RACTL_RSSIPV6UDP)
- rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP |
- ETH_RSS_IPV6_UDP_EX;
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+ RTE_ETH_RSS_IPV6_UDP_EX;
if (!(mrqc & TXGBE_RACTL_RSSENA))
rss_hf = 0;
}
*/
if (adapter->rss_reta_updated == 0) {
reta = 0;
- for (i = 0, j = 0; i < ETH_RSS_RETA_SIZE_128; i++, j++) {
+ for (i = 0, j = 0; i < RTE_ETH_RSS_RETA_SIZE_128; i++, j++) {
if (j == dev->data->nb_rx_queues)
j = 0;
reta = (reta >> 8) | LS32(j, 24, 0xFF);
cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
num_pools = cfg->nb_queue_pools;
/* Check we have a valid number of pools */
- if (num_pools != ETH_16_POOLS && num_pools != ETH_32_POOLS) {
+ if (num_pools != RTE_ETH_16_POOLS && num_pools != RTE_ETH_32_POOLS) {
txgbe_rss_disable(dev);
return;
}
/* 16 pools -> 8 traffic classes, 32 pools -> 4 traffic classes */
- nb_tcs = (uint8_t)(ETH_VMDQ_DCB_NUM_QUEUES / (int)num_pools);
+ nb_tcs = (uint8_t)(RTE_ETH_VMDQ_DCB_NUM_QUEUES / (int)num_pools);
/*
* split rx buffer up into sections, each for 1 traffic class
wr32(hw, TXGBE_PBRXSIZE(i), rxpbsize);
}
/* zero alloc all unused TCs */
- for (i = nb_tcs; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ for (i = nb_tcs; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
uint32_t rxpbsize = rd32(hw, TXGBE_PBRXSIZE(i));
rxpbsize &= (~(0x3FF << 10));
wr32(hw, TXGBE_PBRXSIZE(i), rxpbsize);
}
- if (num_pools == ETH_16_POOLS) {
+ if (num_pools == RTE_ETH_16_POOLS) {
mrqc = TXGBE_PORTCTL_NUMTC_8;
mrqc |= TXGBE_PORTCTL_NUMVT_16;
} else {
wr32(hw, TXGBE_POOLCTL, vt_ctl);
queue_mapping = 0;
- for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+ for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
/*
* mapping is done with 3 bits per priority,
* so shift by i*3 each time
wr32(hw, TXGBE_VLANTBL(i), 0xFFFFFFFF);
wr32(hw, TXGBE_POOLRXENA(0),
- num_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
+ num_pools == RTE_ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
wr32(hw, TXGBE_ETHADDRIDX, 0);
wr32(hw, TXGBE_ETHADDRASSL, 0xFFFFFFFF);
/*PF VF Transmit Enable*/
wr32(hw, TXGBE_POOLTXENA(0),
vmdq_tx_conf->nb_queue_pools ==
- ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
+ RTE_ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
/*Configure general DCB TX parameters*/
txgbe_dcb_tx_hw_config(dev, dcb_config);
uint8_t i, j;
/* convert rte_eth_conf.rx_adv_conf to struct txgbe_dcb_config */
- if (vmdq_rx_conf->nb_queue_pools == ETH_16_POOLS) {
- dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
- dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
+ if (vmdq_rx_conf->nb_queue_pools == RTE_ETH_16_POOLS) {
+ dcb_config->num_tcs.pg_tcs = RTE_ETH_8_TCS;
+ dcb_config->num_tcs.pfc_tcs = RTE_ETH_8_TCS;
} else {
- dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
- dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
+ dcb_config->num_tcs.pg_tcs = RTE_ETH_4_TCS;
+ dcb_config->num_tcs.pfc_tcs = RTE_ETH_4_TCS;
}
/* Initialize User Priority to Traffic Class mapping */
}
/* User Priority to Traffic Class mapping */
- for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
j = vmdq_rx_conf->dcb_tc[i];
tc = &dcb_config->tc_config[j];
tc->path[TXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
uint8_t i, j;
/* convert rte_eth_conf.rx_adv_conf to struct txgbe_dcb_config */
- if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS) {
- dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
- dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
+ if (vmdq_tx_conf->nb_queue_pools == RTE_ETH_16_POOLS) {
+ dcb_config->num_tcs.pg_tcs = RTE_ETH_8_TCS;
+ dcb_config->num_tcs.pfc_tcs = RTE_ETH_8_TCS;
} else {
- dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
- dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
+ dcb_config->num_tcs.pg_tcs = RTE_ETH_4_TCS;
+ dcb_config->num_tcs.pfc_tcs = RTE_ETH_4_TCS;
}
/* Initialize User Priority to Traffic Class mapping */
}
/* User Priority to Traffic Class mapping */
- for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
j = vmdq_tx_conf->dcb_tc[i];
tc = &dcb_config->tc_config[j];
tc->path[TXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
}
/* User Priority to Traffic Class mapping */
- for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
j = rx_conf->dcb_tc[i];
tc = &dcb_config->tc_config[j];
tc->path[TXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
}
/* User Priority to Traffic Class mapping */
- for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
j = tx_conf->dcb_tc[i];
tc = &dcb_config->tc_config[j];
tc->path[TXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
struct txgbe_bw_conf *bw_conf = TXGBE_DEV_BW_CONF(dev);
switch (dev->data->dev_conf.rxmode.mq_mode) {
- case ETH_MQ_RX_VMDQ_DCB:
+ case RTE_ETH_MQ_RX_VMDQ_DCB:
dcb_config->vt_mode = true;
config_dcb_rx = DCB_RX_CONFIG;
/*
/*Configure general VMDQ and DCB RX parameters*/
txgbe_vmdq_dcb_configure(dev);
break;
- case ETH_MQ_RX_DCB:
- case ETH_MQ_RX_DCB_RSS:
+ case RTE_ETH_MQ_RX_DCB:
+ case RTE_ETH_MQ_RX_DCB_RSS:
dcb_config->vt_mode = false;
config_dcb_rx = DCB_RX_CONFIG;
/* Get dcb TX configuration parameters from rte_eth_conf */
break;
}
switch (dev->data->dev_conf.txmode.mq_mode) {
- case ETH_MQ_TX_VMDQ_DCB:
+ case RTE_ETH_MQ_TX_VMDQ_DCB:
dcb_config->vt_mode = true;
config_dcb_tx = DCB_TX_CONFIG;
/* get DCB and VT TX configuration parameters
txgbe_vmdq_dcb_hw_tx_config(dev, dcb_config);
break;
- case ETH_MQ_TX_DCB:
+ case RTE_ETH_MQ_TX_DCB:
dcb_config->vt_mode = false;
config_dcb_tx = DCB_TX_CONFIG;
/* get DCB TX configuration parameters from rte_eth_conf */
nb_tcs = dcb_config->num_tcs.pfc_tcs;
/* Unpack map */
txgbe_dcb_unpack_map_cee(dcb_config, TXGBE_DCB_RX_CONFIG, map);
- if (nb_tcs == ETH_4_TCS) {
+ if (nb_tcs == RTE_ETH_4_TCS) {
/* Avoid un-configured priority mapping to TC0 */
uint8_t j = 4;
uint8_t mask = 0xFF;
- for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
+ for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
mask = (uint8_t)(mask & (~(1 << map[i])));
for (i = 0; mask && (i < TXGBE_DCB_TC_MAX); i++) {
- if ((mask & 0x1) && j < ETH_DCB_NUM_USER_PRIORITIES)
+ if ((mask & 0x1) && j < RTE_ETH_DCB_NUM_USER_PRIORITIES)
map[j++] = i;
mask >>= 1;
}
wr32(hw, TXGBE_PBRXSIZE(i), rxpbsize);
/* zero alloc all unused TCs */
- for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+ for (; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
wr32(hw, TXGBE_PBRXSIZE(i), 0);
}
if (config_dcb_tx) {
wr32(hw, TXGBE_PBTXDMATH(i), txpbthresh);
}
/* Clear unused TCs, if any, to zero buffer size*/
- for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ for (; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
wr32(hw, TXGBE_PBTXSIZE(i), 0);
wr32(hw, TXGBE_PBTXDMATH(i), 0);
}
txgbe_dcb_config_tc_stats_raptor(hw, dcb_config);
/* Check if the PFC is supported */
- if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
+ if (dev->data->dev_conf.dcb_capability_en & RTE_ETH_DCB_PFC_SUPPORT) {
pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
for (i = 0; i < nb_tcs; i++) {
/* If the TC count is 8,
tc->pfc = txgbe_dcb_pfc_enabled;
}
txgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
- if (dcb_config->num_tcs.pfc_tcs == ETH_4_TCS)
+ if (dcb_config->num_tcs.pfc_tcs == RTE_ETH_4_TCS)
pfc_en &= 0x0F;
ret = txgbe_dcb_config_pfc(hw, pfc_en, map);
}
PMD_INIT_FUNC_TRACE();
/* check support mq_mode for DCB */
- if (dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB &&
- dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB &&
- dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB_RSS)
+ if (dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_VMDQ_DCB &&
+ dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_DCB &&
+ dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_DCB_RSS)
return;
- if (dev->data->nb_rx_queues > ETH_DCB_NUM_QUEUES)
+ if (dev->data->nb_rx_queues > RTE_ETH_DCB_NUM_QUEUES)
return;
/** Configure DCB hardware **/
/* pool enabling for receive - 64 */
wr32(hw, TXGBE_POOLRXENA(0), UINT32_MAX);
- if (num_pools == ETH_64_POOLS)
+ if (num_pools == RTE_ETH_64_POOLS)
wr32(hw, TXGBE_POOLRXENA(1), UINT32_MAX);
/*
mrqc = rd32(hw, TXGBE_PORTCTL);
mrqc &= ~(TXGBE_PORTCTL_NUMTC_MASK | TXGBE_PORTCTL_NUMVT_MASK);
switch (RTE_ETH_DEV_SRIOV(dev).active) {
- case ETH_64_POOLS:
+ case RTE_ETH_64_POOLS:
mrqc |= TXGBE_PORTCTL_NUMVT_64;
break;
- case ETH_32_POOLS:
+ case RTE_ETH_32_POOLS:
mrqc |= TXGBE_PORTCTL_NUMVT_32;
break;
mrqc = rd32(hw, TXGBE_PORTCTL);
mrqc &= ~(TXGBE_PORTCTL_NUMTC_MASK | TXGBE_PORTCTL_NUMVT_MASK);
switch (RTE_ETH_DEV_SRIOV(dev).active) {
- case ETH_64_POOLS:
+ case RTE_ETH_64_POOLS:
mrqc |= TXGBE_PORTCTL_NUMVT_64;
break;
- case ETH_32_POOLS:
+ case RTE_ETH_32_POOLS:
mrqc |= TXGBE_PORTCTL_NUMVT_32;
break;
- case ETH_16_POOLS:
+ case RTE_ETH_16_POOLS:
mrqc |= TXGBE_PORTCTL_NUMVT_16;
break;
default:
* any DCB/RSS w/o VMDq multi-queue setting
*/
switch (dev->data->dev_conf.rxmode.mq_mode) {
- case ETH_MQ_RX_RSS:
- case ETH_MQ_RX_DCB_RSS:
- case ETH_MQ_RX_VMDQ_RSS:
+ case RTE_ETH_MQ_RX_RSS:
+ case RTE_ETH_MQ_RX_DCB_RSS:
+ case RTE_ETH_MQ_RX_VMDQ_RSS:
txgbe_rss_configure(dev);
break;
- case ETH_MQ_RX_VMDQ_DCB:
+ case RTE_ETH_MQ_RX_VMDQ_DCB:
txgbe_vmdq_dcb_configure(dev);
break;
- case ETH_MQ_RX_VMDQ_ONLY:
+ case RTE_ETH_MQ_RX_VMDQ_ONLY:
txgbe_vmdq_rx_hw_configure(dev);
break;
- case ETH_MQ_RX_NONE:
+ case RTE_ETH_MQ_RX_NONE:
default:
/* if mq_mode is none, disable rss mode.*/
txgbe_rss_disable(dev);
* Support RSS together with SRIOV.
*/
switch (dev->data->dev_conf.rxmode.mq_mode) {
- case ETH_MQ_RX_RSS:
- case ETH_MQ_RX_VMDQ_RSS:
+ case RTE_ETH_MQ_RX_RSS:
+ case RTE_ETH_MQ_RX_VMDQ_RSS:
txgbe_config_vf_rss(dev);
break;
- case ETH_MQ_RX_VMDQ_DCB:
- case ETH_MQ_RX_DCB:
+ case RTE_ETH_MQ_RX_VMDQ_DCB:
+ case RTE_ETH_MQ_RX_DCB:
/* In SRIOV, the configuration is the same as VMDq case */
txgbe_vmdq_dcb_configure(dev);
break;
/* DCB/RSS together with SRIOV is not supported */
- case ETH_MQ_RX_VMDQ_DCB_RSS:
- case ETH_MQ_RX_DCB_RSS:
+ case RTE_ETH_MQ_RX_VMDQ_DCB_RSS:
+ case RTE_ETH_MQ_RX_DCB_RSS:
PMD_INIT_LOG(ERR,
"Could not support DCB/RSS with VMDq & SRIOV");
return -1;
* SRIOV inactive scheme
* any DCB w/o VMDq multi-queue setting
*/
- if (dev->data->dev_conf.txmode.mq_mode == ETH_MQ_TX_VMDQ_ONLY)
+ if (dev->data->dev_conf.txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_ONLY)
txgbe_vmdq_tx_hw_configure(hw);
else
wr32m(hw, TXGBE_PORTCTL, TXGBE_PORTCTL_NUMVT_MASK, 0);
* SRIOV active scheme
* FIXME if support DCB together with VMDq & SRIOV
*/
- case ETH_64_POOLS:
+ case RTE_ETH_64_POOLS:
mtqc = TXGBE_PORTCTL_NUMVT_64;
break;
- case ETH_32_POOLS:
+ case RTE_ETH_32_POOLS:
mtqc = TXGBE_PORTCTL_NUMVT_32;
break;
- case ETH_16_POOLS:
+ case RTE_ETH_16_POOLS:
mtqc = TXGBE_PORTCTL_NUMVT_16;
break;
default:
/* Sanity check */
dev->dev_ops->dev_infos_get(dev, &dev_info);
- if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO)
+ if (dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_TCP_LRO)
rsc_capable = true;
- if (!rsc_capable && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
+ if (!rsc_capable && (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)) {
PMD_INIT_LOG(CRIT, "LRO is requested on HW that doesn't "
"support it");
return -EINVAL;
/* RSC global configuration */
- if ((rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC) &&
- (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
+ if ((rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) &&
+ (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)) {
PMD_INIT_LOG(CRIT, "LRO can't be enabled when HW CRC "
"is disabled");
return -EINVAL;
}
rfctl = rd32(hw, TXGBE_PSRCTL);
- if (rsc_capable && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
+ if (rsc_capable && (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO))
rfctl &= ~TXGBE_PSRCTL_RSCDIA;
else
rfctl |= TXGBE_PSRCTL_RSCDIA;
wr32(hw, TXGBE_PSRCTL, rfctl);
/* If LRO hasn't been requested - we are done here. */
- if (!(rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
+ if (!(rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO))
return 0;
/* Set PSRCTL.RSCACK bit */
struct txgbe_rx_queue *rxq = dev->data->rx_queues[i];
rxq->using_ipsec = !!(dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_SECURITY);
+ RTE_ETH_RX_OFFLOAD_SECURITY);
}
#endif
}
* Configure CRC stripping, if any.
*/
hlreg0 = rd32(hw, TXGBE_SECRXCTL);
- if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
hlreg0 &= ~TXGBE_SECRXCTL_CRCSTRIP;
else
hlreg0 |= TXGBE_SECRXCTL_CRCSTRIP;
* Assume no header split and no VLAN strip support
* on any Rx queue first .
*/
- rx_conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+ rx_conf->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
/* Setup RX queues */
for (i = 0; i < dev->data->nb_rx_queues; i++) {
* Reset crc_len in case it was changed after queue setup by a
* call to configure.
*/
- if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
rxq->crc_len = RTE_ETHER_CRC_LEN;
else
rxq->crc_len = 0;
if (dev->data->mtu + TXGBE_ETH_OVERHEAD +
2 * TXGBE_VLAN_TAG_SIZE > buf_size)
dev->data->scattered_rx = 1;
- if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
- rx_conf->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+ rx_conf->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
}
- if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
+ if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
dev->data->scattered_rx = 1;
/*
*/
rxcsum = rd32(hw, TXGBE_PSRCTL);
rxcsum |= TXGBE_PSRCTL_PCSD;
- if (rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+ if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
rxcsum |= TXGBE_PSRCTL_L4CSUM;
else
rxcsum &= ~TXGBE_PSRCTL_L4CSUM;
if (hw->mac.type == txgbe_mac_raptor) {
rdrxctl = rd32(hw, TXGBE_SECRXCTL);
- if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
rdrxctl &= ~TXGBE_SECRXCTL_CRCSTRIP;
else
rdrxctl |= TXGBE_SECRXCTL_CRCSTRIP;
txgbe_setup_loopback_link_raptor(hw);
#ifdef RTE_LIB_SECURITY
- if ((dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) ||
- (dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_SECURITY)) {
+ if ((dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SECURITY) ||
+ (dev->data->dev_conf.txmode.offloads & RTE_ETH_TX_OFFLOAD_SECURITY)) {
ret = txgbe_crypto_enable_ipsec(dev);
if (ret != 0) {
PMD_DRV_LOG(ERR,
* Assume no header split and no VLAN strip support
* on any Rx queue first .
*/
- rxmode->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+ rxmode->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
/* Set PSR type for VF RSS according to max Rx queue */
psrtype = TXGBE_VFPLCFG_PSRL4HDR |
*/
wr32(hw, TXGBE_RXCFG(i), srrctl);
- if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ||
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER ||
/* It adds dual VLAN length for supporting dual VLAN */
(dev->data->mtu + TXGBE_ETH_OVERHEAD +
2 * TXGBE_VLAN_TAG_SIZE) > buf_size) {
dev->data->scattered_rx = 1;
}
- if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
- rxmode->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+ rxmode->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
}
/*
* little-endian order.
*/
reta = 0;
- for (i = 0, j = 0; i < ETH_RSS_RETA_SIZE_128; i++, j++) {
+ for (i = 0, j = 0; i < RTE_ETH_RSS_RETA_SIZE_128; i++, j++) {
if (j == conf->conf.queue_num)
j = 0;
reta = (reta >> 8) | LS32(conf->conf.queue[j], 24, 0xFF);
uint8_t rx_deferred_start; /**< not in global dev start. */
/** flags to set in mbuf when a vlan is detected. */
uint64_t vlan_flags;
- uint64_t offloads; /**< Rx offloads with DEV_RX_OFFLOAD_* */
+ uint64_t offloads; /**< Rx offloads with RTE_ETH_RX_OFFLOAD_* */
/** need to alloc dummy mbuf, for wraparound when scanning hw ring */
struct rte_mbuf fake_mbuf;
/** hold packets to return to application */
uint8_t pthresh; /**< Prefetch threshold register. */
uint8_t hthresh; /**< Host threshold register. */
uint8_t wthresh; /**< Write-back threshold reg. */
- uint64_t offloads; /* Tx offload flags of DEV_TX_OFFLOAD_* */
+ uint64_t offloads; /* Tx offload flags of RTE_ETH_TX_OFFLOAD_* */
uint32_t ctx_curr; /**< Hardware context states. */
/** Hardware context0 history. */
struct txgbe_ctx_info ctx_cache[TXGBE_CTX_NUM];
uint8_t nb_tcs = 0;
eth_conf = &dev->data->dev_conf;
- if (eth_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+ if (eth_conf->txmode.mq_mode == RTE_ETH_MQ_TX_DCB) {
nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs;
- } else if (eth_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+ } else if (eth_conf->txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) {
if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools ==
- ETH_32_POOLS)
- nb_tcs = ETH_4_TCS;
+ RTE_ETH_32_POOLS)
+ nb_tcs = RTE_ETH_4_TCS;
else
- nb_tcs = ETH_8_TCS;
+ nb_tcs = RTE_ETH_8_TCS;
} else {
nb_tcs = 1;
}
if (vf_num) {
/* no DCB */
if (nb_tcs == 1) {
- if (vf_num >= ETH_32_POOLS) {
+ if (vf_num >= RTE_ETH_32_POOLS) {
*nb = 2;
*base = vf_num * 2;
- } else if (vf_num >= ETH_16_POOLS) {
+ } else if (vf_num >= RTE_ETH_16_POOLS) {
*nb = 4;
*base = vf_num * 4;
} else {
}
} else {
/* VT off */
- if (nb_tcs == ETH_8_TCS) {
+ if (nb_tcs == RTE_ETH_8_TCS) {
switch (tc_node_no) {
case 0:
*base = 0;
static struct rte_eth_link pmd_link = {
.link_speed = 10000,
- .link_duplex = ETH_LINK_FULL_DUPLEX,
- .link_status = ETH_LINK_DOWN
+ .link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+ .link_status = RTE_ETH_LINK_DOWN
};
struct rte_vhost_vring_state {
rte_vhost_get_mtu(vid, ð_dev->data->mtu);
- eth_dev->data->dev_link.link_status = ETH_LINK_UP;
+ eth_dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
rte_atomic32_set(&internal->dev_attached, 1);
update_queuing_status(eth_dev);
rte_atomic32_set(&internal->dev_attached, 0);
update_queuing_status(eth_dev);
- eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+ eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
if (eth_dev->data->rx_queues && eth_dev->data->tx_queues) {
for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
if (vhost_driver_setup(dev) < 0)
return -1;
- internal->vlan_strip = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+ internal->vlan_strip = !!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
return 0;
}
dev_info->max_tx_queues = internal->max_queues;
dev_info->min_rx_bufsize = 0;
- dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS |
- DEV_TX_OFFLOAD_VLAN_INSERT;
- dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
+ dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
+ dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
return 0;
}
virtio_dev_close(struct rte_eth_dev *dev)
{
struct virtio_hw *hw = dev->data->dev_private;
- struct rte_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
+ struct rte_eth_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
PMD_INIT_LOG(DEBUG, "virtio_dev_close");
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2],
hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
- if (hw->speed == ETH_SPEED_NUM_UNKNOWN) {
+ if (hw->speed == RTE_ETH_SPEED_NUM_UNKNOWN) {
if (virtio_with_feature(hw, VIRTIO_NET_F_SPEED_DUPLEX)) {
config = &local_config;
virtio_read_dev_config(hw,
}
}
if (hw->duplex == DUPLEX_UNKNOWN)
- hw->duplex = ETH_LINK_FULL_DUPLEX;
+ hw->duplex = RTE_ETH_LINK_FULL_DUPLEX;
PMD_INIT_LOG(DEBUG, "link speed = %d, duplex = %d",
hw->speed, hw->duplex);
if (virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VQ)) {
eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
{
struct virtio_hw *hw = eth_dev->data->dev_private;
- uint32_t speed = ETH_SPEED_NUM_UNKNOWN;
+ uint32_t speed = RTE_ETH_SPEED_NUM_UNKNOWN;
int vectorized = 0;
int ret;
virtio_dev_speed_capa_get(uint32_t speed)
{
switch (speed) {
- case ETH_SPEED_NUM_10G:
- return ETH_LINK_SPEED_10G;
- case ETH_SPEED_NUM_20G:
- return ETH_LINK_SPEED_20G;
- case ETH_SPEED_NUM_25G:
- return ETH_LINK_SPEED_25G;
- case ETH_SPEED_NUM_40G:
- return ETH_LINK_SPEED_40G;
- case ETH_SPEED_NUM_50G:
- return ETH_LINK_SPEED_50G;
- case ETH_SPEED_NUM_56G:
- return ETH_LINK_SPEED_56G;
- case ETH_SPEED_NUM_100G:
- return ETH_LINK_SPEED_100G;
- case ETH_SPEED_NUM_200G:
- return ETH_LINK_SPEED_200G;
+ case RTE_ETH_SPEED_NUM_10G:
+ return RTE_ETH_LINK_SPEED_10G;
+ case RTE_ETH_SPEED_NUM_20G:
+ return RTE_ETH_LINK_SPEED_20G;
+ case RTE_ETH_SPEED_NUM_25G:
+ return RTE_ETH_LINK_SPEED_25G;
+ case RTE_ETH_SPEED_NUM_40G:
+ return RTE_ETH_LINK_SPEED_40G;
+ case RTE_ETH_SPEED_NUM_50G:
+ return RTE_ETH_LINK_SPEED_50G;
+ case RTE_ETH_SPEED_NUM_56G:
+ return RTE_ETH_LINK_SPEED_56G;
+ case RTE_ETH_SPEED_NUM_100G:
+ return RTE_ETH_LINK_SPEED_100G;
+ case RTE_ETH_SPEED_NUM_200G:
+ return RTE_ETH_LINK_SPEED_200G;
default:
return 0;
}
PMD_INIT_LOG(DEBUG, "configure");
req_features = VIRTIO_PMD_DEFAULT_GUEST_FEATURES;
- if (rxmode->mq_mode != ETH_MQ_RX_NONE) {
+ if (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE) {
PMD_DRV_LOG(ERR,
"Unsupported Rx multi queue mode %d",
rxmode->mq_mode);
return -EINVAL;
}
- if (txmode->mq_mode != ETH_MQ_TX_NONE) {
+ if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) {
PMD_DRV_LOG(ERR,
"Unsupported Tx multi queue mode %d",
txmode->mq_mode);
hw->max_rx_pkt_len = ether_hdr_len + rxmode->mtu;
- if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM))
+ if (rx_offloads & (RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM))
req_features |= (1ULL << VIRTIO_NET_F_GUEST_CSUM);
- if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)
req_features |=
(1ULL << VIRTIO_NET_F_GUEST_TSO4) |
(1ULL << VIRTIO_NET_F_GUEST_TSO6);
- if (tx_offloads & (DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM))
+ if (tx_offloads & (RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM))
req_features |= (1ULL << VIRTIO_NET_F_CSUM);
- if (tx_offloads & DEV_TX_OFFLOAD_TCP_TSO)
+ if (tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO)
req_features |=
(1ULL << VIRTIO_NET_F_HOST_TSO4) |
(1ULL << VIRTIO_NET_F_HOST_TSO6);
return ret;
}
- if ((rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM)) &&
+ if ((rx_offloads & (RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM)) &&
!virtio_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM)) {
PMD_DRV_LOG(ERR,
"rx checksum not available on this host");
return -ENOTSUP;
}
- if ((rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) &&
+ if ((rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) &&
(!virtio_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
!virtio_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6))) {
PMD_DRV_LOG(ERR,
if (virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VQ))
virtio_dev_cq_start(dev);
- if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
hw->vlan_strip = 1;
- hw->rx_ol_scatter = (rx_offloads & DEV_RX_OFFLOAD_SCATTER);
+ hw->rx_ol_scatter = (rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER);
- if ((rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) &&
+ if ((rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) &&
!virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
PMD_DRV_LOG(ERR,
"vlan filtering not available on this host");
hw->use_vec_rx = 0;
}
- if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
PMD_DRV_LOG(INFO,
"disabled packed ring vectorized rx for TCP_LRO enabled");
hw->use_vec_rx = 0;
hw->use_vec_rx = 0;
}
- if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_TCP_LRO |
- DEV_RX_OFFLOAD_VLAN_STRIP)) {
+ if (rx_offloads & (RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_LRO |
+ RTE_ETH_RX_OFFLOAD_VLAN_STRIP)) {
PMD_DRV_LOG(INFO,
"disabled split ring vectorized rx for offloading enabled");
hw->use_vec_rx = 0;
{
struct virtio_hw *hw = dev->data->dev_private;
struct rte_eth_link link;
- struct rte_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
+ struct rte_eth_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
PMD_INIT_LOG(DEBUG, "stop");
dev->data->dev_started = 0;
memset(&link, 0, sizeof(link));
link.link_duplex = hw->duplex;
link.link_speed = hw->speed;
- link.link_autoneg = ETH_LINK_AUTONEG;
+ link.link_autoneg = RTE_ETH_LINK_AUTONEG;
if (!hw->started) {
- link.link_status = ETH_LINK_DOWN;
- link.link_speed = ETH_SPEED_NUM_NONE;
+ link.link_status = RTE_ETH_LINK_DOWN;
+ link.link_speed = RTE_ETH_SPEED_NUM_NONE;
} else if (virtio_with_feature(hw, VIRTIO_NET_F_STATUS)) {
PMD_INIT_LOG(DEBUG, "Get link status from hw");
virtio_read_dev_config(hw,
offsetof(struct virtio_net_config, status),
&status, sizeof(status));
if ((status & VIRTIO_NET_S_LINK_UP) == 0) {
- link.link_status = ETH_LINK_DOWN;
- link.link_speed = ETH_SPEED_NUM_NONE;
+ link.link_status = RTE_ETH_LINK_DOWN;
+ link.link_speed = RTE_ETH_SPEED_NUM_NONE;
PMD_INIT_LOG(DEBUG, "Port %d is down",
dev->data->port_id);
} else {
- link.link_status = ETH_LINK_UP;
+ link.link_status = RTE_ETH_LINK_UP;
PMD_INIT_LOG(DEBUG, "Port %d is up",
dev->data->port_id);
}
} else {
- link.link_status = ETH_LINK_UP;
+ link.link_status = RTE_ETH_LINK_UP;
}
return rte_eth_linkstatus_set(dev, &link);
struct virtio_hw *hw = dev->data->dev_private;
uint64_t offloads = rxmode->offloads;
- if (mask & ETH_VLAN_FILTER_MASK) {
- if ((offloads & DEV_RX_OFFLOAD_VLAN_FILTER) &&
+ if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+ if ((offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) &&
!virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
PMD_DRV_LOG(NOTICE,
}
}
- if (mask & ETH_VLAN_STRIP_MASK)
- hw->vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+ if (mask & RTE_ETH_VLAN_STRIP_MASK)
+ hw->vlan_strip = !!(offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
return 0;
}
dev_info->max_mtu = hw->max_mtu;
host_features = VIRTIO_OPS(hw)->get_features(hw);
- dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
+ dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
if (host_features & (1ULL << VIRTIO_NET_F_MRG_RXBUF))
- dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_SCATTER;
+ dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_SCATTER;
if (host_features & (1ULL << VIRTIO_NET_F_GUEST_CSUM)) {
dev_info->rx_offload_capa |=
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM;
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM;
}
if (host_features & (1ULL << VIRTIO_NET_F_CTRL_VLAN))
- dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_VLAN_FILTER;
+ dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
tso_mask = (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
(1ULL << VIRTIO_NET_F_GUEST_TSO6);
if ((host_features & tso_mask) == tso_mask)
- dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
+ dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
- dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS |
- DEV_TX_OFFLOAD_VLAN_INSERT;
+ dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
if (host_features & (1ULL << VIRTIO_NET_F_CSUM)) {
dev_info->tx_offload_capa |=
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM;
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
}
tso_mask = (1ULL << VIRTIO_NET_F_HOST_TSO4) |
(1ULL << VIRTIO_NET_F_HOST_TSO6);
if ((host_features & tso_mask) == tso_mask)
- dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
+ dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
if (host_features & (1ULL << VIRTIO_F_RING_PACKED)) {
/*
#define VMXNET3_TX_MAX_SEG UINT8_MAX
#define VMXNET3_TX_OFFLOAD_CAP \
- (DEV_TX_OFFLOAD_VLAN_INSERT | \
- DEV_TX_OFFLOAD_TCP_CKSUM | \
- DEV_TX_OFFLOAD_UDP_CKSUM | \
- DEV_TX_OFFLOAD_TCP_TSO | \
- DEV_TX_OFFLOAD_MULTI_SEGS)
+ (RTE_ETH_TX_OFFLOAD_VLAN_INSERT | \
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_TCP_TSO | \
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
#define VMXNET3_RX_OFFLOAD_CAP \
- (DEV_RX_OFFLOAD_VLAN_STRIP | \
- DEV_RX_OFFLOAD_VLAN_FILTER | \
- DEV_RX_OFFLOAD_SCATTER | \
- DEV_RX_OFFLOAD_UDP_CKSUM | \
- DEV_RX_OFFLOAD_TCP_CKSUM | \
- DEV_RX_OFFLOAD_TCP_LRO | \
- DEV_RX_OFFLOAD_RSS_HASH)
+ (RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER | \
+ RTE_ETH_RX_OFFLOAD_SCATTER | \
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM | \
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM | \
+ RTE_ETH_RX_OFFLOAD_TCP_LRO | \
+ RTE_ETH_RX_OFFLOAD_RSS_HASH)
int vmxnet3_segs_dynfield_offset = -1;
/* set the initial link status */
memset(&link, 0, sizeof(link));
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
- link.link_speed = ETH_SPEED_NUM_10G;
- link.link_autoneg = ETH_LINK_FIXED;
+ link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+ link.link_speed = RTE_ETH_SPEED_NUM_10G;
+ link.link_autoneg = RTE_ETH_LINK_FIXED;
rte_eth_linkstatus_set(eth_dev, &link);
return 0;
PMD_INIT_FUNC_TRACE();
- if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
- dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+ if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+ dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
if (dev->data->nb_tx_queues > VMXNET3_MAX_TX_QUEUES ||
dev->data->nb_rx_queues > VMXNET3_MAX_RX_QUEUES) {
hw->queueDescPA = mz->iova;
hw->queue_desc_len = (uint16_t)size;
- if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
+ if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
/* Allocate memory structure for UPT1_RSSConf and configure */
mz = gpa_zone_reserve(dev, sizeof(struct VMXNET3_RSSConf),
"rss_conf", rte_socket_id(),
devRead->rxFilterConf.rxMode = 0;
/* Setting up feature flags */
- if (rx_offloads & DEV_RX_OFFLOAD_CHECKSUM)
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
devRead->misc.uptFeatures |= VMXNET3_F_RXCSUM;
- if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
devRead->misc.uptFeatures |= VMXNET3_F_LRO;
devRead->misc.maxNumRxSG = 0;
}
- if (port_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
+ if (port_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
ret = vmxnet3_rss_configure(dev);
if (ret != VMXNET3_SUCCESS)
return ret;
}
ret = vmxnet3_dev_vlan_offload_set(dev,
- ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK);
+ RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK);
if (ret)
return ret;
}
if (VMXNET3_VERSION_GE_4(hw) &&
- dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
+ dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
/* Check for additional RSS */
ret = vmxnet3_v4_rss_configure(dev);
if (ret != VMXNET3_SUCCESS) {
/* Clear recorded link status */
memset(&link, 0, sizeof(link));
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
- link.link_speed = ETH_SPEED_NUM_10G;
- link.link_autoneg = ETH_LINK_FIXED;
+ link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+ link.link_speed = RTE_ETH_SPEED_NUM_10G;
+ link.link_autoneg = RTE_ETH_LINK_FIXED;
rte_eth_linkstatus_set(dev, &link);
hw->adapter_stopped = 1;
dev_info->max_rx_pktlen = 16384; /* includes CRC, cf MAXFRS register */
dev_info->min_mtu = VMXNET3_MIN_MTU;
dev_info->max_mtu = VMXNET3_MAX_MTU;
- dev_info->speed_capa = ETH_LINK_SPEED_10G;
+ dev_info->speed_capa = RTE_ETH_LINK_SPEED_10G;
dev_info->max_mac_addrs = VMXNET3_MAX_MAC_ADDRS;
dev_info->flow_type_rss_offloads = VMXNET3_RSS_OFFLOAD_ALL;
ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
if (ret & 0x1)
- link.link_status = ETH_LINK_UP;
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
- link.link_speed = ETH_SPEED_NUM_10G;
- link.link_autoneg = ETH_LINK_FIXED;
+ link.link_status = RTE_ETH_LINK_UP;
+ link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+ link.link_speed = RTE_ETH_SPEED_NUM_10G;
+ link.link_autoneg = RTE_ETH_LINK_FIXED;
return rte_eth_linkstatus_set(dev, &link);
}
uint32_t *vf_table = hw->shared->devRead.rxFilterConf.vfTable;
uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
- if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE);
else
memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE);
uint32_t *vf_table = devRead->rxFilterConf.vfTable;
uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
- if (mask & ETH_VLAN_STRIP_MASK) {
- if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
else
devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN;
VMXNET3_CMD_UPDATE_FEATURE);
}
- if (mask & ETH_VLAN_FILTER_MASK) {
- if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+ if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE);
else
memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE);
VMXNET3_MAX_RX_QUEUES + 1)
#define VMXNET3_RSS_OFFLOAD_ALL ( \
- ETH_RSS_IPV4 | \
- ETH_RSS_NONFRAG_IPV4_TCP | \
- ETH_RSS_IPV6 | \
- ETH_RSS_NONFRAG_IPV6_TCP)
+ RTE_ETH_RSS_IPV4 | \
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+ RTE_ETH_RSS_IPV6 | \
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP)
#define VMXNET3_V4_RSS_MASK ( \
- ETH_RSS_NONFRAG_IPV4_UDP | \
- ETH_RSS_NONFRAG_IPV6_UDP)
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP)
#define VMXNET3_MANDATORY_V4_RSS ( \
- ETH_RSS_NONFRAG_IPV4_TCP | \
- ETH_RSS_NONFRAG_IPV6_TCP)
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP)
/* RSS configuration structure - shared with device through GPA */
typedef struct VMXNET3_RSSConf {
rss_hf = port_rss_conf->rss_hf &
(VMXNET3_V4_RSS_MASK | VMXNET3_RSS_OFFLOAD_ALL);
- if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
cmdInfo->setRSSFields |= VMXNET3_RSS_FIELDS_TCPIP4;
- if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
cmdInfo->setRSSFields |= VMXNET3_RSS_FIELDS_TCPIP6;
- if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
cmdInfo->setRSSFields |= VMXNET3_RSS_FIELDS_UDPIP4;
- if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
cmdInfo->setRSSFields |= VMXNET3_RSS_FIELDS_UDPIP6;
VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
/* loading hashType */
dev_rss_conf->hashType = 0;
rss_hf = port_rss_conf->rss_hf & VMXNET3_RSS_OFFLOAD_ALL;
- if (rss_hf & ETH_RSS_IPV4)
+ if (rss_hf & RTE_ETH_RSS_IPV4)
dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_IPV4;
- if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_TCP_IPV4;
- if (rss_hf & ETH_RSS_IPV6)
+ if (rss_hf & RTE_ETH_RSS_IPV6)
dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_IPV6;
- if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_TCP_IPV6;
return VMXNET3_SUCCESS;
static const struct rte_eth_conf port_conf = {
.rxmode = {
- .mq_mode = ETH_MQ_RX_NONE,
+ .mq_mode = RTE_ETH_MQ_RX_NONE,
.split_hdr_size = 0,
},
.txmode = {
- .mq_mode = ETH_MQ_TX_NONE,
+ .mq_mode = RTE_ETH_MQ_TX_NONE,
},
};
if (link_get_err >= 0 && link.link_status) {
const char *dp = (link.link_duplex ==
- ETH_LINK_FULL_DUPLEX) ?
+ RTE_ETH_LINK_FULL_DUPLEX) ?
"full-duplex" : "half-duplex";
printf("\nPort %u Link Up - speed %s - %s\n",
port_id,
static struct rte_eth_conf port_conf = {
.rxmode = {
- .mq_mode = ETH_MQ_RX_NONE,
+ .mq_mode = RTE_ETH_MQ_RX_NONE,
.split_hdr_size = 0,
},
.rx_adv_conf = {
.rss_conf = {
.rss_key = NULL,
- .rss_hf = ETH_RSS_IP,
+ .rss_hf = RTE_ETH_RSS_IP,
},
},
.txmode = {
- .mq_mode = ETH_MQ_TX_NONE,
+ .mq_mode = RTE_ETH_MQ_TX_NONE,
},
};
"Error during getting device (port %u) info: %s\n",
portid, strerror(-retval));
- if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
local_port_conf.txmode.offloads |=
- DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
dev_info.flow_type_rss_offloads;
"Error during getting device (port %u) info: %s\n",
BOND_PORT, strerror(-retval));
- if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
local_port_conf.txmode.offloads |=
- DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
retval = rte_eth_dev_configure(BOND_PORT, 1, 1, &local_port_conf);
if (retval != 0)
rte_exit(EXIT_FAILURE, "port %u: configuration failed (res=%d)\n",
static const struct rte_eth_conf port_conf_default = {
.rxmode = {
- .mq_mode = ETH_MQ_RX_RSS,
+ .mq_mode = RTE_ETH_MQ_RX_RSS,
},
.txmode = {
- .mq_mode = ETH_MQ_TX_NONE,
+ .mq_mode = RTE_ETH_MQ_TX_NONE,
},
.rx_adv_conf = {
.rss_conf = {
- .rss_hf = ETH_RSS_IP | ETH_RSS_UDP |
- ETH_RSS_TCP | ETH_RSS_SCTP,
+ .rss_hf = RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP |
+ RTE_ETH_RSS_TCP | RTE_ETH_RSS_SCTP,
}
},
};
return retval;
}
- if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
port_conf.txmode.offloads |=
- DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
port_conf.rx_adv_conf.rss_conf.rss_hf &=
dev_info.flow_type_rss_offloads;
int ret;
memset(&cfg_port, 0, sizeof(cfg_port));
- cfg_port.txmode.mq_mode = ETH_MQ_TX_NONE;
+ cfg_port.txmode.mq_mode = RTE_ETH_MQ_TX_NONE;
for (idx_port = 0; idx_port < cnt_ports; idx_port++) {
struct app_port *ptr_port = &app_cfg->ports[idx_port];
pause_param->tx_pause = 0;
pause_param->rx_pause = 0;
switch (fc_conf.mode) {
- case RTE_FC_RX_PAUSE:
+ case RTE_ETH_FC_RX_PAUSE:
pause_param->rx_pause = 1;
break;
- case RTE_FC_TX_PAUSE:
+ case RTE_ETH_FC_TX_PAUSE:
pause_param->tx_pause = 1;
break;
- case RTE_FC_FULL:
+ case RTE_ETH_FC_FULL:
pause_param->rx_pause = 1;
pause_param->tx_pause = 1;
default:
if (pause_param->tx_pause) {
if (pause_param->rx_pause)
- fc_conf.mode = RTE_FC_FULL;
+ fc_conf.mode = RTE_ETH_FC_FULL;
else
- fc_conf.mode = RTE_FC_TX_PAUSE;
+ fc_conf.mode = RTE_ETH_FC_TX_PAUSE;
} else {
if (pause_param->rx_pause)
- fc_conf.mode = RTE_FC_RX_PAUSE;
+ fc_conf.mode = RTE_ETH_FC_RX_PAUSE;
else
- fc_conf.mode = RTE_FC_NONE;
+ fc_conf.mode = RTE_ETH_FC_NONE;
}
status = rte_eth_dev_flow_ctrl_set(port_id, &fc_conf);
for (vf = 0; vf < num_vfs; vf++) {
#ifdef RTE_NET_IXGBE
rte_pmd_ixgbe_set_vf_rxmode(port_id, vf,
- ETH_VMDQ_ACCEPT_UNTAG, 0);
+ RTE_ETH_VMDQ_ACCEPT_UNTAG, 0);
#endif
}
/* Enable Rx vlan filter, VF unspport status is discard */
- ret = rte_eth_dev_set_vlan_offload(port_id, ETH_VLAN_FILTER_MASK);
+ ret = rte_eth_dev_set_vlan_offload(port_id, RTE_ETH_VLAN_FILTER_MASK);
if (ret != 0)
return ret;
struct rte_eth_rxconf rx_conf;
static const struct rte_eth_conf port_conf_default = {
.rxmode = {
- .mq_mode = ETH_MQ_RX_RSS,
+ .mq_mode = RTE_ETH_MQ_RX_RSS,
},
.rx_adv_conf = {
.rss_conf = {
- .rss_hf = ETH_RSS_IP |
- ETH_RSS_TCP |
- ETH_RSS_UDP,
+ .rss_hf = RTE_ETH_RSS_IP |
+ RTE_ETH_RSS_TCP |
+ RTE_ETH_RSS_UDP,
}
}
};
return retval;
}
- if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
port_conf.txmode.offloads |=
- DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
- if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_RSS_HASH)
- port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+ if (dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_RSS_HASH)
+ port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
rx_conf = dev_info.default_rxconf;
rx_conf.offloads = port_conf.rxmode.offloads;
struct rte_eth_rxconf rx_conf;
static const struct rte_eth_conf port_conf_default = {
.rxmode = {
- .mq_mode = ETH_MQ_RX_RSS,
+ .mq_mode = RTE_ETH_MQ_RX_RSS,
},
.rx_adv_conf = {
.rss_conf = {
- .rss_hf = ETH_RSS_IP |
- ETH_RSS_TCP |
- ETH_RSS_UDP,
+ .rss_hf = RTE_ETH_RSS_IP |
+ RTE_ETH_RSS_TCP |
+ RTE_ETH_RSS_UDP,
}
}
};
return retval;
}
- if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
port_conf.txmode.offloads |=
- DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
rx_conf = dev_info.default_rxconf;
rx_conf.offloads = port_conf.rxmode.offloads;
return retval;
}
- if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
port_conf.txmode.offloads |=
- DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
/* Configure the Ethernet device. */
retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
memset(&link, 0, sizeof(link));
do {
link_get_err = rte_eth_link_get(port_id, &link);
- if (link_get_err == 0 && link.link_status == ETH_LINK_UP)
+ if (link_get_err == 0 && link.link_status == RTE_ETH_LINK_UP)
break;
rte_delay_ms(CHECK_INTERVAL);
} while (--rep_cnt);
if (link_get_err < 0)
rte_exit(EXIT_FAILURE, ":: error: link get is failing: %s\n",
rte_strerror(-link_get_err));
- if (link.link_status == ETH_LINK_DOWN)
+ if (link.link_status == RTE_ETH_LINK_DOWN)
rte_exit(EXIT_FAILURE, ":: error: link is still down\n");
}
},
.txmode = {
.offloads =
- DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_SCTP_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO,
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_TSO,
},
};
struct rte_eth_txconf txq_conf;
/* Configuring port to use RSS for multiple RX queues. 8< */
static const struct rte_eth_conf port_conf = {
.rxmode = {
- .mq_mode = ETH_MQ_RX_RSS,
+ .mq_mode = RTE_ETH_MQ_RX_RSS,
},
.rx_adv_conf = {
.rss_conf = {
.rss_key = NULL,
- .rss_hf = ETH_RSS_PROTO_MASK,
+ .rss_hf = RTE_ETH_RSS_PROTO_MASK,
}
}
};
local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
dev_info.flow_type_rss_offloads;
- if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
local_port_conf.txmode.offloads |=
- DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
ret = rte_eth_dev_configure(portid, nb_queues, 1, &local_port_conf);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Cannot configure device:"
.mtu = JUMBO_FRAME_MAX_SIZE - RTE_ETHER_HDR_LEN -
RTE_ETHER_CRC_LEN,
.split_hdr_size = 0,
- .offloads = (DEV_RX_OFFLOAD_CHECKSUM |
- DEV_RX_OFFLOAD_SCATTER),
+ .offloads = (RTE_ETH_RX_OFFLOAD_CHECKSUM |
+ RTE_ETH_RX_OFFLOAD_SCATTER),
},
.txmode = {
- .mq_mode = ETH_MQ_TX_NONE,
- .offloads = (DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_MULTI_SEGS),
+ .mq_mode = RTE_ETH_MQ_TX_NONE,
+ .offloads = (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS),
},
};
continue;
}
/* clear all_ports_up flag if any link down */
- if (link.link_status == ETH_LINK_DOWN) {
+ if (link.link_status == RTE_ETH_LINK_DOWN) {
all_ports_up = 0;
break;
}
static struct rte_eth_conf port_conf_default = {
.link_speeds = 0,
.rxmode = {
- .mq_mode = ETH_MQ_RX_NONE,
+ .mq_mode = RTE_ETH_MQ_RX_NONE,
.mtu = 9000 - (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN), /* Jumbo frame MTU */
.split_hdr_size = 0, /* Header split buffer size */
},
},
},
.txmode = {
- .mq_mode = ETH_MQ_TX_NONE,
+ .mq_mode = RTE_ETH_MQ_TX_NONE,
},
.lpbk_mode = 0,
};
-#define RETA_CONF_SIZE (ETH_RSS_RETA_SIZE_512 / RTE_RETA_GROUP_SIZE)
+#define RETA_CONF_SIZE (RTE_ETH_RSS_RETA_SIZE_512 / RTE_ETH_RETA_GROUP_SIZE)
static int
rss_setup(uint16_t port_id,
memset(reta_conf, 0, sizeof(reta_conf));
for (i = 0; i < reta_size; i++)
- reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX;
+ reta_conf[i / RTE_ETH_RETA_GROUP_SIZE].mask = UINT64_MAX;
for (i = 0; i < reta_size; i++) {
- uint32_t reta_id = i / RTE_RETA_GROUP_SIZE;
- uint32_t reta_pos = i % RTE_RETA_GROUP_SIZE;
+ uint32_t reta_id = i / RTE_ETH_RETA_GROUP_SIZE;
+ uint32_t reta_pos = i % RTE_ETH_RETA_GROUP_SIZE;
uint32_t rss_qs_pos = i % rss->n_queues;
reta_conf[reta_id].reta[reta_pos] =
rss = params->rx.rss;
if (rss) {
if ((port_info.reta_size == 0) ||
- (port_info.reta_size > ETH_RSS_RETA_SIZE_512))
+ (port_info.reta_size > RTE_ETH_RSS_RETA_SIZE_512))
return NULL;
if ((rss->n_queues == 0) ||
/* Port */
memcpy(&port_conf, &port_conf_default, sizeof(port_conf));
if (rss) {
- port_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
+ port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_RSS;
port_conf.rx_adv_conf.rss_conf.rss_hf =
- (ETH_RSS_IP | ETH_RSS_TCP | ETH_RSS_UDP) &
+ (RTE_ETH_RSS_IP | RTE_ETH_RSS_TCP | RTE_ETH_RSS_UDP) &
port_info.flow_type_rss_offloads;
}
if (rte_eth_link_get(link->port_id, &link_params) < 0)
return 0;
- return (link_params.link_status == ETH_LINK_DOWN) ? 0 : 1;
+ return (link_params.link_status == RTE_ETH_LINK_DOWN) ? 0 : 1;
}
static struct rte_eth_conf port_conf = {
.rxmode = {
- .mq_mode = ETH_MQ_RX_RSS,
+ .mq_mode = RTE_ETH_MQ_RX_RSS,
.mtu = JUMBO_FRAME_MAX_SIZE - RTE_ETHER_HDR_LEN -
RTE_ETHER_CRC_LEN,
.split_hdr_size = 0,
- .offloads = DEV_RX_OFFLOAD_CHECKSUM,
+ .offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
},
.rx_adv_conf = {
.rss_conf = {
.rss_key = NULL,
- .rss_hf = ETH_RSS_IP,
+ .rss_hf = RTE_ETH_RSS_IP,
},
},
.txmode = {
- .mq_mode = ETH_MQ_TX_NONE,
- .offloads = (DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_MULTI_SEGS),
+ .mq_mode = RTE_ETH_MQ_TX_NONE,
+ .offloads = (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS),
},
};
continue;
}
/* clear all_ports_up flag if any link down */
- if (link.link_status == ETH_LINK_DOWN) {
+ if (link.link_status == RTE_ETH_LINK_DOWN) {
all_ports_up = 0;
break;
}
n_tx_queue = nb_lcores;
if (n_tx_queue > MAX_TX_QUEUE_PER_PORT)
n_tx_queue = MAX_TX_QUEUE_PER_PORT;
- if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
local_port_conf.txmode.offloads |=
- DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
dev_info.flow_type_rss_offloads;
static struct rte_eth_conf port_conf = {
.rxmode = {
- .mq_mode = ETH_MQ_RX_RSS,
+ .mq_mode = RTE_ETH_MQ_RX_RSS,
.split_hdr_size = 0,
- .offloads = DEV_RX_OFFLOAD_CHECKSUM,
+ .offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
},
.rx_adv_conf = {
.rss_conf = {
.rss_key = NULL,
- .rss_hf = ETH_RSS_IP | ETH_RSS_UDP |
- ETH_RSS_TCP | ETH_RSS_SCTP,
+ .rss_hf = RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP |
+ RTE_ETH_RSS_TCP | RTE_ETH_RSS_SCTP,
},
},
.txmode = {
- .mq_mode = ETH_MQ_TX_NONE,
+ .mq_mode = RTE_ETH_MQ_TX_NONE,
},
};
" \"parallel\" : Parallel\n"
" --" CMD_LINE_OPT_RX_OFFLOAD
": bitmask of the RX HW offload capabilities to enable/use\n"
- " (DEV_RX_OFFLOAD_*)\n"
+ " (RTE_ETH_RX_OFFLOAD_*)\n"
" --" CMD_LINE_OPT_TX_OFFLOAD
": bitmask of the TX HW offload capabilities to enable/use\n"
- " (DEV_TX_OFFLOAD_*)\n"
+ " (RTE_ETH_TX_OFFLOAD_*)\n"
" --" CMD_LINE_OPT_REASSEMBLE " NUM"
": max number of entries in reassemble(fragment) table\n"
" (zero (default value) disables reassembly)\n"
continue;
}
/* clear all_ports_up flag if any link down */
- if (link.link_status == ETH_LINK_DOWN) {
+ if (link.link_status == RTE_ETH_LINK_DOWN) {
all_ports_up = 0;
break;
}
local_port_conf.rxmode.mtu = mtu_size;
if (multi_seg_required()) {
- local_port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_SCATTER;
- local_port_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+ local_port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
+ local_port_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
}
local_port_conf.rxmode.offloads |= req_rx_offloads;
portid, local_port_conf.txmode.offloads,
dev_info.tx_offload_capa);
- if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
local_port_conf.txmode.offloads |=
- DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
- if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM)
- local_port_conf.txmode.offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM;
+ if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
+ local_port_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
printf("port %u configurng rx_offloads=0x%" PRIx64
", tx_offloads=0x%" PRIx64 "\n",
/* Pre-populate pkt offloads based on capabilities */
qconf->outbound.ipv4_offloads = PKT_TX_IPV4;
qconf->outbound.ipv6_offloads = PKT_TX_IPV6;
- if (local_port_conf.txmode.offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
+ if (local_port_conf.txmode.offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
qconf->outbound.ipv4_offloads |= PKT_TX_IP_CKSUM;
tx_queueid++;
struct rte_flow *flow;
int ret;
- if (!(rx_offloads & DEV_RX_OFFLOAD_SECURITY))
+ if (!(rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY))
return;
/* Add the default rte_flow to enable SECURITY for all ESP packets */
if (inbound) {
if ((dev_info.rx_offload_capa &
- DEV_RX_OFFLOAD_SECURITY) == 0) {
+ RTE_ETH_RX_OFFLOAD_SECURITY) == 0) {
RTE_LOG(WARNING, PORT,
"hardware RX IPSec offload is not supported\n");
return -EINVAL;
} else { /* outbound */
if ((dev_info.tx_offload_capa &
- DEV_TX_OFFLOAD_SECURITY) == 0) {
+ RTE_ETH_TX_OFFLOAD_SECURITY) == 0) {
RTE_LOG(WARNING, PORT,
"hardware TX IPSec offload is not supported\n");
return -EINVAL;
rule_type ==
RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
&& rule->portid == port_id)
- *rx_offloads |= DEV_RX_OFFLOAD_SECURITY;
+ *rx_offloads |= RTE_ETH_RX_OFFLOAD_SECURITY;
}
/* Check for outbound rules that use offloads and use this port */
rule_type ==
RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
&& rule->portid == port_id)
- *tx_offloads |= DEV_TX_OFFLOAD_SECURITY;
+ *tx_offloads |= RTE_ETH_TX_OFFLOAD_SECURITY;
}
return 0;
}
.split_hdr_size = 0,
},
.txmode = {
- .mq_mode = ETH_MQ_TX_NONE,
- .offloads = DEV_TX_OFFLOAD_MULTI_SEGS,
+ .mq_mode = RTE_ETH_MQ_TX_NONE,
+ .offloads = RTE_ETH_TX_OFFLOAD_MULTI_SEGS,
},
};
continue;
}
/* clear all_ports_up flag if any link down */
- if (link.link_status == ETH_LINK_DOWN) {
+ if (link.link_status == RTE_ETH_LINK_DOWN) {
all_ports_up = 0;
break;
}
/* Options for configuring ethernet port */
static struct rte_eth_conf port_conf = {
.txmode = {
- .mq_mode = ETH_MQ_TX_NONE,
+ .mq_mode = RTE_ETH_MQ_TX_NONE,
},
};
"Error during getting device (port %u) info: %s\n",
port, strerror(-ret));
- if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
local_port_conf.txmode.offloads |=
- DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
ret = rte_eth_dev_configure(port, 1, 1, &local_port_conf);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Could not configure port%u (%d)\n",
continue;
}
/* clear all_ports_up flag if any link down */
- if (link.link_status == ETH_LINK_DOWN) {
+ if (link.link_status == RTE_ETH_LINK_DOWN) {
all_ports_up = 0;
break;
}
static struct rte_eth_conf port_conf = {
.rxmode = {
- .mq_mode = ETH_MQ_RX_NONE,
+ .mq_mode = RTE_ETH_MQ_RX_NONE,
.split_hdr_size = 0,
},
.txmode = {
- .mq_mode = ETH_MQ_TX_NONE,
+ .mq_mode = RTE_ETH_MQ_TX_NONE,
},
};
continue;
}
/* clear all_ports_up flag if any link down */
- if (link.link_status == ETH_LINK_DOWN) {
+ if (link.link_status == RTE_ETH_LINK_DOWN) {
all_ports_up = 0;
break;
}
return retval;
}
- if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
local_port_conf.txmode.offloads |=
- DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
retval = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
if (retval < 0) {
printf("Cannot configure device: err=%d, port=%u\n",
.split_hdr_size = 0,
},
.txmode = {
- .mq_mode = ETH_MQ_TX_NONE,
+ .mq_mode = RTE_ETH_MQ_TX_NONE,
},
};
uint16_t nb_ports_available = 0;
int ret;
if (rsrc->event_mode) {
- port_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
+ port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_RSS;
port_conf.rx_adv_conf.rss_conf.rss_key = NULL;
- port_conf.rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IP;
+ port_conf.rx_adv_conf.rss_conf.rss_hf = RTE_ETH_RSS_IP;
}
/* Initialise each port */
local_port_conf.rx_adv_conf.rss_conf.rss_hf);
}
- if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
local_port_conf.txmode.offloads |=
- DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
/* Configure RX and TX queue. 8< */
ret = rte_eth_dev_configure(port_id, 1, 1, &local_port_conf);
if (ret < 0)
continue;
}
/* clear all_ports_up flag if any link down */
- if (link.link_status == ETH_LINK_DOWN) {
+ if (link.link_status == RTE_ETH_LINK_DOWN) {
all_ports_up = 0;
break;
}
.split_hdr_size = 0,
},
.txmode = {
- .mq_mode = ETH_MQ_TX_NONE,
+ .mq_mode = RTE_ETH_MQ_TX_NONE,
},
};
continue;
}
/* clear all_ports_up flag if any link down */
- if (link.link_status == ETH_LINK_DOWN) {
+ if (link.link_status == RTE_ETH_LINK_DOWN) {
all_ports_up = 0;
break;
}
"Error during getting device (port %u) info: %s\n",
portid, strerror(-ret));
- if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
local_port_conf.txmode.offloads |=
- DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
/* Configure the RX and TX queues. 8< */
ret = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
if (ret < 0)
.split_hdr_size = 0,
},
.txmode = {
- .mq_mode = ETH_MQ_TX_NONE,
+ .mq_mode = RTE_ETH_MQ_TX_NONE,
},
};
continue;
}
/* clear all_ports_up flag if any link down */
- if (link.link_status == ETH_LINK_DOWN) {
+ if (link.link_status == RTE_ETH_LINK_DOWN) {
all_ports_up = 0;
break;
}
"Error during getting device (port %u) info: %s\n",
portid, strerror(-ret));
- if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
local_port_conf.txmode.offloads |=
- DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
ret = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
if (ret < 0)
rte_exit(EXIT_FAILURE,
.split_hdr_size = 0,
},
.txmode = {
- .mq_mode = ETH_MQ_TX_NONE,
+ .mq_mode = RTE_ETH_MQ_TX_NONE,
},
};
continue;
}
/* clear all_ports_up flag if any link down */
- if (link.link_status == ETH_LINK_DOWN) {
+ if (link.link_status == RTE_ETH_LINK_DOWN) {
all_ports_up = 0;
break;
}
"Error during getting device (port %u) info: %s\n",
portid, strerror(-ret));
- if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
local_port_conf.txmode.offloads |=
- DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
/* Configure the number of queues for a port. */
ret = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
if (ret < 0)
static struct rte_eth_conf port_conf = {
.rxmode = {
- .mq_mode = ETH_MQ_RX_RSS,
+ .mq_mode = RTE_ETH_MQ_RX_RSS,
.split_hdr_size = 0,
- .offloads = DEV_RX_OFFLOAD_CHECKSUM,
+ .offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
},
.rx_adv_conf = {
.rss_conf = {
.rss_key = NULL,
- .rss_hf = ETH_RSS_IP | ETH_RSS_UDP |
- ETH_RSS_TCP | ETH_RSS_SCTP,
+ .rss_hf = RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP |
+ RTE_ETH_RSS_TCP | RTE_ETH_RSS_SCTP,
},
},
.txmode = {
- .mq_mode = ETH_MQ_TX_NONE,
+ .mq_mode = RTE_ETH_MQ_TX_NONE,
},
};
continue;
}
/* clear all_ports_up flag if any link down */
- if (link.link_status == ETH_LINK_DOWN) {
+ if (link.link_status == RTE_ETH_LINK_DOWN) {
all_ports_up = 0;
break;
}
conf->rxmode.mtu = max_pkt_len - overhead_len;
if (conf->rxmode.mtu > RTE_ETHER_MTU)
- conf->txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+ conf->txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
return 0;
}
"Invalid max packet length: %u (port %u)\n",
max_pkt_len, portid);
- if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
local_port_conf.txmode.offloads |=
- DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
dev_info.flow_type_rss_offloads;
static struct rte_eth_conf port_conf = {
.rxmode = {
- .mq_mode = ETH_MQ_RX_RSS,
+ .mq_mode = RTE_ETH_MQ_RX_RSS,
.split_hdr_size = 0,
},
.rx_adv_conf = {
.rss_conf = {
.rss_key = NULL,
- .rss_hf = ETH_RSS_IP,
+ .rss_hf = RTE_ETH_RSS_IP,
},
},
.txmode = {
- .mq_mode = ETH_MQ_TX_NONE,
+ .mq_mode = RTE_ETH_MQ_TX_NONE,
},
};
continue;
}
/* Clear all_ports_up flag if any link down */
- if (link.link_status == ETH_LINK_DOWN) {
+ if (link.link_status == RTE_ETH_LINK_DOWN) {
all_ports_up = 0;
break;
}
conf->rxmode.mtu = max_pkt_len - overhead_len;
if (conf->rxmode.mtu > RTE_ETHER_MTU)
- conf->txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+ conf->txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
return 0;
}
"Invalid max packet length: %u (port %u)\n",
max_pkt_len, portid);
- if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
local_port_conf.txmode.offloads |=
- DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
dev_info.flow_type_rss_offloads;
static struct rte_eth_conf port_conf = {
.rxmode = {
- .mq_mode = ETH_MQ_RX_RSS,
+ .mq_mode = RTE_ETH_MQ_RX_RSS,
.split_hdr_size = 0,
- .offloads = DEV_RX_OFFLOAD_CHECKSUM,
+ .offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
},
.rx_adv_conf = {
.rss_conf = {
.rss_key = NULL,
- .rss_hf = ETH_RSS_UDP,
+ .rss_hf = RTE_ETH_RSS_UDP,
},
},
.txmode = {
- .mq_mode = ETH_MQ_TX_NONE,
+ .mq_mode = RTE_ETH_MQ_TX_NONE,
}
};
continue;
}
/* clear all_ports_up flag if any link down */
- if (link.link_status == ETH_LINK_DOWN) {
+ if (link.link_status == RTE_ETH_LINK_DOWN) {
all_ports_up = 0;
break;
}
conf->rxmode.mtu = max_pkt_len - overhead_len;
if (conf->rxmode.mtu > RTE_ETHER_MTU)
- conf->txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+ conf->txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
return 0;
}
"Invalid max packet length: %u (port %u)\n",
max_pkt_len, portid);
- if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
local_port_conf.txmode.offloads |=
- DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
dev_info.flow_type_rss_offloads;
rte_panic("Error during getting device (port %u) info:"
"%s\n", port_id, strerror(-ret));
- if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
local_port_conf.txmode.offloads |=
- DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
dev_info.flow_type_rss_offloads;
static struct rte_eth_conf port_conf = {
.rxmode = {
- .mq_mode = ETH_MQ_RX_RSS,
+ .mq_mode = RTE_ETH_MQ_RX_RSS,
.split_hdr_size = 0,
- .offloads = DEV_RX_OFFLOAD_CHECKSUM,
+ .offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
},
.rx_adv_conf = {
.rss_conf = {
.rss_key = NULL,
- .rss_hf = ETH_RSS_IP,
+ .rss_hf = RTE_ETH_RSS_IP,
},
},
.txmode = {
- .mq_mode = ETH_MQ_TX_NONE,
+ .mq_mode = RTE_ETH_MQ_TX_NONE,
},
};
continue;
}
/* clear all_ports_up flag if any link down */
- if (link.link_status == ETH_LINK_DOWN) {
+ if (link.link_status == RTE_ETH_LINK_DOWN) {
all_ports_up = 0;
break;
}
conf->rxmode.mtu = max_pkt_len - overhead_len;
if (conf->rxmode.mtu > RTE_ETHER_MTU)
- conf->txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+ conf->txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
return 0;
}
"Invalid max packet length: %u (port %u)\n",
max_pkt_len, portid);
- if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
local_port_conf.txmode.offloads |=
- DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
dev_info.flow_type_rss_offloads;
if (dev_info.max_rx_queues == 1)
- local_port_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
+ local_port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_NONE;
if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
port_conf.rx_adv_conf.rss_conf.rss_hf) {
.split_hdr_size = 0,
},
.txmode = {
- .mq_mode = ETH_MQ_TX_NONE,
+ .mq_mode = RTE_ETH_MQ_TX_NONE,
},
.intr_conf = {
.lsc = 1, /**< lsc interrupt feature enabled */
link_get_err < 0 ? "0" :
rte_eth_link_speed_to_str(link.link_speed),
link_get_err < 0 ? "Link get failed" :
- (link.link_duplex == ETH_LINK_FULL_DUPLEX ? \
+ (link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
"full-duplex" : "half-duplex"),
port_statistics[portid].tx,
port_statistics[portid].rx,
continue;
}
/* clear all_ports_up flag if any link down */
- if (link.link_status == ETH_LINK_DOWN) {
+ if (link.link_status == RTE_ETH_LINK_DOWN) {
all_ports_up = 0;
break;
}
"Error during getting device (port %u) info: %s\n",
portid, strerror(-ret));
- if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
local_port_conf.txmode.offloads |=
- DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
/* Configure RX and TX queues. 8< */
ret = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
if (ret < 0)
/* for port configuration all features are off by default */
const struct rte_eth_conf port_conf = {
.rxmode = {
- .mq_mode = ETH_MQ_RX_RSS
+ .mq_mode = RTE_ETH_MQ_RX_RSS
}
};
const uint16_t rx_rings = 1, tx_rings = num_clients;
continue;
}
/* clear all_ports_up flag if any link down */
- if (link.link_status == ETH_LINK_DOWN) {
+ if (link.link_status == RTE_ETH_LINK_DOWN) {
all_ports_up = 0;
break;
}
{
struct rte_eth_conf port_conf = {
.rxmode = {
- .mq_mode = ETH_MQ_RX_RSS,
+ .mq_mode = RTE_ETH_MQ_RX_RSS,
.split_hdr_size = 0,
- .offloads = DEV_RX_OFFLOAD_CHECKSUM,
+ .offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
},
.rx_adv_conf = {
.rss_conf = {
.rss_key = NULL,
- .rss_hf = ETH_RSS_IP,
+ .rss_hf = RTE_ETH_RSS_IP,
},
},
.txmode = {
- .mq_mode = ETH_MQ_TX_NONE,
+ .mq_mode = RTE_ETH_MQ_TX_NONE,
}
};
const uint16_t rx_rings = num_queues, tx_rings = num_queues;
info.default_rxconf.rx_drop_en = 1;
- if (info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ if (info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
port_conf.txmode.offloads |=
- DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
rss_hf_tmp = port_conf.rx_adv_conf.rss_conf.rss_hf;
port_conf.rx_adv_conf.rss_conf.rss_hf &= info.flow_type_rss_offloads;
continue;
}
/* clear all_ports_up flag if any link down */
- if (link.link_status == ETH_LINK_DOWN) {
+ if (link.link_status == RTE_ETH_LINK_DOWN) {
all_ports_up = 0;
break;
}
static struct rte_eth_conf eth_port_conf = {
.rxmode = {
- .mq_mode = ETH_MQ_RX_RSS,
+ .mq_mode = RTE_ETH_MQ_RX_RSS,
.split_hdr_size = 0,
},
.rx_adv_conf = {
.rss_conf = {
.rss_key = NULL,
- .rss_hf = ETH_RSS_IP,
+ .rss_hf = RTE_ETH_RSS_IP,
},
},
.txmode = {
- .mq_mode = ETH_MQ_TX_NONE,
+ .mq_mode = RTE_ETH_MQ_TX_NONE,
},
};
return ret;
}
- if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
port_conf.txmode.offloads |=
- DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
ret = rte_eth_dev_configure(port_id, rxRings, txRings, &port_conf);
if (ret != 0)
return ret;
static struct rte_eth_conf port_conf = {
.rxmode = {
- .mq_mode = ETH_MQ_RX_RSS,
+ .mq_mode = RTE_ETH_MQ_RX_RSS,
.split_hdr_size = 0,
- .offloads = DEV_RX_OFFLOAD_CHECKSUM,
+ .offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
},
.rx_adv_conf = {
.rss_conf = {
.rss_key = NULL,
- .rss_hf = ETH_RSS_TCP,
+ .rss_hf = RTE_ETH_RSS_TCP,
},
},
.txmode = {
- .mq_mode = ETH_MQ_TX_NONE,
+ .mq_mode = RTE_ETH_MQ_TX_NONE,
},
};
continue;
}
/* clear all_ports_up flag if any link down */
- if (link.link_status == ETH_LINK_DOWN) {
+ if (link.link_status == RTE_ETH_LINK_DOWN) {
all_ports_up = 0;
break;
}
conf->rxmode.mtu = max_pkt_len - overhead_len;
if (conf->rxmode.mtu > RTE_ETHER_MTU)
- conf->txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+ conf->txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
return 0;
}
"Invalid max packet length: %u (port %u)\n",
max_pkt_len, portid);
- if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
local_port_conf.txmode.offloads |=
- DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
dev_info.flow_type_rss_offloads;
static struct rte_eth_conf port_conf_default = {
.link_speeds = 0,
.rxmode = {
- .mq_mode = ETH_MQ_RX_NONE,
+ .mq_mode = RTE_ETH_MQ_RX_NONE,
.mtu = 9000 - (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN), /* Jumbo frame MTU */
.split_hdr_size = 0, /* Header split buffer size */
},
},
},
.txmode = {
- .mq_mode = ETH_MQ_TX_NONE,
+ .mq_mode = RTE_ETH_MQ_TX_NONE,
},
.lpbk_mode = 0,
};
-#define RETA_CONF_SIZE (ETH_RSS_RETA_SIZE_512 / RTE_RETA_GROUP_SIZE)
+#define RETA_CONF_SIZE (RTE_ETH_RSS_RETA_SIZE_512 / RTE_ETH_RETA_GROUP_SIZE)
static int
rss_setup(uint16_t port_id,
memset(reta_conf, 0, sizeof(reta_conf));
for (i = 0; i < reta_size; i++)
- reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX;
+ reta_conf[i / RTE_ETH_RETA_GROUP_SIZE].mask = UINT64_MAX;
for (i = 0; i < reta_size; i++) {
- uint32_t reta_id = i / RTE_RETA_GROUP_SIZE;
- uint32_t reta_pos = i % RTE_RETA_GROUP_SIZE;
+ uint32_t reta_id = i / RTE_ETH_RETA_GROUP_SIZE;
+ uint32_t reta_pos = i % RTE_ETH_RETA_GROUP_SIZE;
uint32_t rss_qs_pos = i % rss->n_queues;
reta_conf[reta_id].reta[reta_pos] =
rss = params->rx.rss;
if (rss) {
if ((port_info.reta_size == 0) ||
- (port_info.reta_size > ETH_RSS_RETA_SIZE_512))
+ (port_info.reta_size > RTE_ETH_RSS_RETA_SIZE_512))
return NULL;
if ((rss->n_queues == 0) ||
/* Port */
memcpy(&port_conf, &port_conf_default, sizeof(port_conf));
if (rss) {
- port_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
+ port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_RSS;
port_conf.rx_adv_conf.rss_conf.rss_hf =
- (ETH_RSS_IP | ETH_RSS_TCP | ETH_RSS_UDP) &
+ (RTE_ETH_RSS_IP | RTE_ETH_RSS_TCP | RTE_ETH_RSS_UDP) &
port_info.flow_type_rss_offloads;
}
if (rte_eth_link_get(link->port_id, &link_params) < 0)
return 0;
- return (link_params.link_status == ETH_LINK_DOWN) ? 0 : 1;
+ return (link_params.link_status == RTE_ETH_LINK_DOWN) ? 0 : 1;
}
struct link *
return retval;
}
- if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TIMESTAMP)
- port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
+ if (dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
+ port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
- if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
port_conf.txmode.offloads |=
- DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
/* Force full Tx path in the driver, required for IEEE1588 */
- port_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+ port_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
/* Configure the Ethernet device. */
retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
***/
static struct rte_eth_conf port_conf = {
.rxmode = {
- .mq_mode = ETH_MQ_RX_RSS,
+ .mq_mode = RTE_ETH_MQ_RX_RSS,
.split_hdr_size = 0,
- .offloads = DEV_RX_OFFLOAD_CHECKSUM,
+ .offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
},
.rx_adv_conf = {
.rss_conf = {
.rss_key = NULL,
- .rss_hf = ETH_RSS_IP,
+ .rss_hf = RTE_ETH_RSS_IP,
},
},
.txmode = {
- .mq_mode = ETH_DCB_NONE,
+ .mq_mode = RTE_ETH_MQ_TX_NONE,
},
};
"Error during getting device (port %u) info: %s\n",
port_rx, strerror(-ret));
- if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
- conf.txmode.offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
+ conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
conf.rx_adv_conf.rss_conf.rss_hf &= dev_info.flow_type_rss_offloads;
if (conf.rx_adv_conf.rss_conf.rss_hf !=
"Error during getting device (port %u) info: %s\n",
port_tx, strerror(-ret));
- if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
- conf.txmode.offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
+ conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
conf.rx_adv_conf.rss_conf.rss_hf &= dev_info.flow_type_rss_offloads;
if (conf.rx_adv_conf.rss_conf.rss_hf !=
.split_hdr_size = 0,
},
.txmode = {
- .mq_mode = ETH_DCB_NONE,
+ .mq_mode = RTE_ETH_MQ_TX_NONE,
},
};
"Error during getting device (port %u) info: %s\n",
portid, strerror(-ret));
- if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
local_port_conf.txmode.offloads |=
- DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
ret = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
if (ret < 0)
rte_exit(EXIT_FAILURE,
return retval;
}
- if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
port_conf.txmode.offloads |=
- DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
if (hw_timestamping) {
- if (!(dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TIMESTAMP)) {
+ if (!(dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_TIMESTAMP)) {
printf("\nERROR: Port %u does not support hardware timestamping\n"
, port);
return -1;
}
- port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
+ port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
rte_mbuf_dyn_rx_timestamp_register(&hwts_dynfield_offset, NULL);
if (hwts_dynfield_offset < 0) {
printf("ERROR: Failed to register timestamp field\n");
/* for port configuration all features are off by default */
struct rte_eth_conf port_conf = {
.rxmode = {
- .mq_mode = ETH_MQ_RX_RSS,
+ .mq_mode = RTE_ETH_MQ_RX_RSS,
},
};
const uint16_t rx_rings = 1, tx_rings = num_nodes;
if (retval != 0)
return retval;
- if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
port_conf.txmode.offloads |=
- DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
/*
* Standard DPDK port initialisation - config port, then set up
continue;
}
/* clear all_ports_up flag if any link down */
- if (link.link_status == ETH_LINK_DOWN) {
+ if (link.link_status == RTE_ETH_LINK_DOWN) {
all_ports_up = 0;
break;
}
return retval;
}
- if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
port_conf.txmode.offloads |=
- DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
/* Configure the Ethernet device. */
retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
/* empty vmdq configuration structure. Filled in programatically */
static struct rte_eth_conf vmdq_conf_default = {
.rxmode = {
- .mq_mode = ETH_MQ_RX_VMDQ_ONLY,
+ .mq_mode = RTE_ETH_MQ_RX_VMDQ_ONLY,
.split_hdr_size = 0,
/*
* VLAN strip is necessary for 1G NIC such as I350,
* this fixes bug of ipv4 forwarding in guest can't
* forward pakets from one virtio dev to another virtio dev.
*/
- .offloads = DEV_RX_OFFLOAD_VLAN_STRIP,
+ .offloads = RTE_ETH_RX_OFFLOAD_VLAN_STRIP,
},
.txmode = {
- .mq_mode = ETH_MQ_TX_NONE,
- .offloads = (DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_MULTI_SEGS |
- DEV_TX_OFFLOAD_TCP_TSO),
+ .mq_mode = RTE_ETH_MQ_TX_NONE,
+ .offloads = (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+ RTE_ETH_TX_OFFLOAD_TCP_TSO),
},
.rx_adv_conf = {
/*
* appropriate values
*/
.vmdq_rx_conf = {
- .nb_queue_pools = ETH_8_POOLS,
+ .nb_queue_pools = RTE_ETH_8_POOLS,
.enable_default_pool = 0,
.default_pool = 0,
.nb_pool_maps = 0,
return -1;
rx_rings = (uint16_t)dev_info.max_rx_queues;
- if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
port_conf.txmode.offloads |=
- DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
/* Configure ethernet device. */
retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
if (retval != 0) {
case 'P':
promiscuous = 1;
vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.rx_mode =
- ETH_VMDQ_ACCEPT_BROADCAST |
- ETH_VMDQ_ACCEPT_MULTICAST;
+ RTE_ETH_VMDQ_ACCEPT_BROADCAST |
+ RTE_ETH_VMDQ_ACCEPT_MULTICAST;
break;
case OPT_VM2VM_NUM:
return retval;
}
- if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
port_conf.txmode.offloads |=
- DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
/* Configure the Ethernet device. */
retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
continue;
}
/* clear all_ports_up flag if any link down */
- if (link.link_status == ETH_LINK_DOWN) {
+ if (link.link_status == RTE_ETH_LINK_DOWN) {
all_ports_up = 0;
break;
}
/* empty vmdq configuration structure. Filled in programatically */
static const struct rte_eth_conf vmdq_conf_default = {
.rxmode = {
- .mq_mode = ETH_MQ_RX_VMDQ_ONLY,
+ .mq_mode = RTE_ETH_MQ_RX_VMDQ_ONLY,
.split_hdr_size = 0,
},
.txmode = {
- .mq_mode = ETH_MQ_TX_NONE,
+ .mq_mode = RTE_ETH_MQ_TX_NONE,
},
.rx_adv_conf = {
/*
* appropriate values
*/
.vmdq_rx_conf = {
- .nb_queue_pools = ETH_8_POOLS,
+ .nb_queue_pools = RTE_ETH_8_POOLS,
.enable_default_pool = 0,
.default_pool = 0,
.nb_pool_maps = 0,
(void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_rx_conf, &conf,
sizeof(eth_conf->rx_adv_conf.vmdq_rx_conf)));
if (rss_enable) {
- eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
- eth_conf->rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IP |
- ETH_RSS_UDP |
- ETH_RSS_TCP |
- ETH_RSS_SCTP;
+ eth_conf->rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_RSS;
+ eth_conf->rx_adv_conf.rss_conf.rss_hf = RTE_ETH_RSS_IP |
+ RTE_ETH_RSS_UDP |
+ RTE_ETH_RSS_TCP |
+ RTE_ETH_RSS_SCTP;
}
return 0;
}
return retval;
}
- if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
port_conf.txmode.offloads |=
- DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
retval = rte_eth_dev_configure(port, rxRings, txRings, &port_conf);
if (retval != 0)
return retval;
static unsigned num_ports;
/* number of pools (if user does not specify any, 32 by default */
-static enum rte_eth_nb_pools num_pools = ETH_32_POOLS;
-static enum rte_eth_nb_tcs num_tcs = ETH_4_TCS;
+static enum rte_eth_nb_pools num_pools = RTE_ETH_32_POOLS;
+static enum rte_eth_nb_tcs num_tcs = RTE_ETH_4_TCS;
static uint16_t num_queues, num_vmdq_queues;
static uint16_t vmdq_pool_base, vmdq_queue_base;
static uint8_t rss_enable;
/* Empty vmdq+dcb configuration structure. Filled in programmatically. 8< */
static const struct rte_eth_conf vmdq_dcb_conf_default = {
.rxmode = {
- .mq_mode = ETH_MQ_RX_VMDQ_DCB,
+ .mq_mode = RTE_ETH_MQ_RX_VMDQ_DCB,
.split_hdr_size = 0,
},
.txmode = {
- .mq_mode = ETH_MQ_TX_VMDQ_DCB,
+ .mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB,
},
/*
* should be overridden separately in code with
*/
.rx_adv_conf = {
.vmdq_dcb_conf = {
- .nb_queue_pools = ETH_32_POOLS,
+ .nb_queue_pools = RTE_ETH_32_POOLS,
.enable_default_pool = 0,
.default_pool = 0,
.nb_pool_maps = 0,
.dcb_tc = {0},
},
.dcb_rx_conf = {
- .nb_tcs = ETH_4_TCS,
+ .nb_tcs = RTE_ETH_4_TCS,
/** Traffic class each UP mapped to. */
.dcb_tc = {0},
},
.vmdq_rx_conf = {
- .nb_queue_pools = ETH_32_POOLS,
+ .nb_queue_pools = RTE_ETH_32_POOLS,
.enable_default_pool = 0,
.default_pool = 0,
.nb_pool_maps = 0,
},
.tx_adv_conf = {
.vmdq_dcb_tx_conf = {
- .nb_queue_pools = ETH_32_POOLS,
+ .nb_queue_pools = RTE_ETH_32_POOLS,
.dcb_tc = {0},
},
},
conf.pool_map[i].pools = 1UL << i;
vmdq_conf.pool_map[i].pools = 1UL << i;
}
- for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
+ for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
conf.dcb_tc[i] = i % num_tcs;
dcb_conf.dcb_tc[i] = i % num_tcs;
tx_conf.dcb_tc[i] = i % num_tcs;
(void)(rte_memcpy(ð_conf->tx_adv_conf.vmdq_dcb_tx_conf, &tx_conf,
sizeof(tx_conf)));
if (rss_enable) {
- eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB_RSS;
- eth_conf->rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IP |
- ETH_RSS_UDP |
- ETH_RSS_TCP |
- ETH_RSS_SCTP;
+ eth_conf->rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_DCB_RSS;
+ eth_conf->rx_adv_conf.rss_conf.rss_hf = RTE_ETH_RSS_IP |
+ RTE_ETH_RSS_UDP |
+ RTE_ETH_RSS_TCP |
+ RTE_ETH_RSS_SCTP;
}
return 0;
}
return retval;
}
- if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
port_conf.txmode.offloads |=
- DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
rss_hf_tmp = port_conf.rx_adv_conf.rss_conf.rss_hf;
port_conf.rx_adv_conf.rss_conf.rss_hf &=
if (n != 16 && n != 32)
return -1;
if (n == 16)
- num_pools = ETH_16_POOLS;
+ num_pools = RTE_ETH_16_POOLS;
else
- num_pools = ETH_32_POOLS;
+ num_pools = RTE_ETH_32_POOLS;
return 0;
}
if (n != 4 && n != 8)
return -1;
if (n == 4)
- num_tcs = ETH_4_TCS;
+ num_tcs = RTE_ETH_4_TCS;
else
- num_tcs = ETH_8_TCS;
+ num_tcs = RTE_ETH_8_TCS;
return 0;
}
/** Device Ethernet link address. @see rte_eth_dev_release_port() */
struct rte_ether_addr *mac_addrs;
/** Bitmap associating MAC addresses to pools */
- uint64_t mac_pool_sel[ETH_NUM_RECEIVE_MAC_ADDR];
+ uint64_t mac_pool_sel[RTE_ETH_NUM_RECEIVE_MAC_ADDR];
/**
* Device Ethernet MAC addresses of hash filtering.
* @see rte_eth_dev_release_port()
/**
* filter type of tunneling packet
*/
-#define ETH_TUNNEL_FILTER_OMAC 0x01 /**< filter by outer MAC addr */
-#define ETH_TUNNEL_FILTER_OIP 0x02 /**< filter by outer IP Addr */
-#define ETH_TUNNEL_FILTER_TENID 0x04 /**< filter by tenant ID */
-#define ETH_TUNNEL_FILTER_IMAC 0x08 /**< filter by inner MAC addr */
-#define ETH_TUNNEL_FILTER_IVLAN 0x10 /**< filter by inner VLAN ID */
-#define ETH_TUNNEL_FILTER_IIP 0x20 /**< filter by inner IP addr */
-
-#define RTE_TUNNEL_FILTER_IMAC_IVLAN (ETH_TUNNEL_FILTER_IMAC | \
- ETH_TUNNEL_FILTER_IVLAN)
-#define RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID (ETH_TUNNEL_FILTER_IMAC | \
- ETH_TUNNEL_FILTER_IVLAN | \
- ETH_TUNNEL_FILTER_TENID)
-#define RTE_TUNNEL_FILTER_IMAC_TENID (ETH_TUNNEL_FILTER_IMAC | \
- ETH_TUNNEL_FILTER_TENID)
-#define RTE_TUNNEL_FILTER_OMAC_TENID_IMAC (ETH_TUNNEL_FILTER_OMAC | \
- ETH_TUNNEL_FILTER_TENID | \
- ETH_TUNNEL_FILTER_IMAC)
+#define RTE_ETH_TUNNEL_FILTER_OMAC 0x01 /**< filter by outer MAC addr */
+#define RTE_ETH_TUNNEL_FILTER_OIP 0x02 /**< filter by outer IP Addr */
+#define RTE_ETH_TUNNEL_FILTER_TENID 0x04 /**< filter by tenant ID */
+#define RTE_ETH_TUNNEL_FILTER_IMAC 0x08 /**< filter by inner MAC addr */
+#define RTE_ETH_TUNNEL_FILTER_IVLAN 0x10 /**< filter by inner VLAN ID */
+#define RTE_ETH_TUNNEL_FILTER_IIP 0x20 /**< filter by inner IP addr */
+
+#define RTE_ETH_TUNNEL_FILTER_IMAC_IVLAN (RTE_ETH_TUNNEL_FILTER_IMAC | \
+ RTE_ETH_TUNNEL_FILTER_IVLAN)
+#define RTE_ETH_TUNNEL_FILTER_IMAC_IVLAN_TENID (RTE_ETH_TUNNEL_FILTER_IMAC | \
+ RTE_ETH_TUNNEL_FILTER_IVLAN | \
+ RTE_ETH_TUNNEL_FILTER_TENID)
+#define RTE_ETH_TUNNEL_FILTER_IMAC_TENID (RTE_ETH_TUNNEL_FILTER_IMAC | \
+ RTE_ETH_TUNNEL_FILTER_TENID)
+#define RTE_ETH_TUNNEL_FILTER_OMAC_TENID_IMAC (RTE_ETH_TUNNEL_FILTER_OMAC | \
+ RTE_ETH_TUNNEL_FILTER_TENID | \
+ RTE_ETH_TUNNEL_FILTER_IMAC)
/**
* Select IPv4 or IPv6 for tunnel filters.
#define RTE_NB_TXQ_STATS RTE_DIM(eth_dev_txq_stats_strings)
#define RTE_RX_OFFLOAD_BIT2STR(_name) \
- { DEV_RX_OFFLOAD_##_name, #_name }
-
-#define RTE_ETH_RX_OFFLOAD_BIT2STR(_name) \
{ RTE_ETH_RX_OFFLOAD_##_name, #_name }
static const struct {
RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM),
RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
RTE_RX_OFFLOAD_BIT2STR(RSS_HASH),
- RTE_ETH_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT),
+ RTE_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT),
};
#undef RTE_RX_OFFLOAD_BIT2STR
#undef RTE_ETH_RX_OFFLOAD_BIT2STR
#define RTE_TX_OFFLOAD_BIT2STR(_name) \
- { DEV_TX_OFFLOAD_##_name, #_name }
+ { RTE_ETH_TX_OFFLOAD_##_name, #_name }
static const struct {
uint64_t offload;
rte_eth_speed_bitflag(uint32_t speed, int duplex)
{
switch (speed) {
- case ETH_SPEED_NUM_10M:
- return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
- case ETH_SPEED_NUM_100M:
- return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
- case ETH_SPEED_NUM_1G:
- return ETH_LINK_SPEED_1G;
- case ETH_SPEED_NUM_2_5G:
- return ETH_LINK_SPEED_2_5G;
- case ETH_SPEED_NUM_5G:
- return ETH_LINK_SPEED_5G;
- case ETH_SPEED_NUM_10G:
- return ETH_LINK_SPEED_10G;
- case ETH_SPEED_NUM_20G:
- return ETH_LINK_SPEED_20G;
- case ETH_SPEED_NUM_25G:
- return ETH_LINK_SPEED_25G;
- case ETH_SPEED_NUM_40G:
- return ETH_LINK_SPEED_40G;
- case ETH_SPEED_NUM_50G:
- return ETH_LINK_SPEED_50G;
- case ETH_SPEED_NUM_56G:
- return ETH_LINK_SPEED_56G;
- case ETH_SPEED_NUM_100G:
- return ETH_LINK_SPEED_100G;
- case ETH_SPEED_NUM_200G:
- return ETH_LINK_SPEED_200G;
+ case RTE_ETH_SPEED_NUM_10M:
+ return duplex ? RTE_ETH_LINK_SPEED_10M : RTE_ETH_LINK_SPEED_10M_HD;
+ case RTE_ETH_SPEED_NUM_100M:
+ return duplex ? RTE_ETH_LINK_SPEED_100M : RTE_ETH_LINK_SPEED_100M_HD;
+ case RTE_ETH_SPEED_NUM_1G:
+ return RTE_ETH_LINK_SPEED_1G;
+ case RTE_ETH_SPEED_NUM_2_5G:
+ return RTE_ETH_LINK_SPEED_2_5G;
+ case RTE_ETH_SPEED_NUM_5G:
+ return RTE_ETH_LINK_SPEED_5G;
+ case RTE_ETH_SPEED_NUM_10G:
+ return RTE_ETH_LINK_SPEED_10G;
+ case RTE_ETH_SPEED_NUM_20G:
+ return RTE_ETH_LINK_SPEED_20G;
+ case RTE_ETH_SPEED_NUM_25G:
+ return RTE_ETH_LINK_SPEED_25G;
+ case RTE_ETH_SPEED_NUM_40G:
+ return RTE_ETH_LINK_SPEED_40G;
+ case RTE_ETH_SPEED_NUM_50G:
+ return RTE_ETH_LINK_SPEED_50G;
+ case RTE_ETH_SPEED_NUM_56G:
+ return RTE_ETH_LINK_SPEED_56G;
+ case RTE_ETH_SPEED_NUM_100G:
+ return RTE_ETH_LINK_SPEED_100G;
+ case RTE_ETH_SPEED_NUM_200G:
+ return RTE_ETH_LINK_SPEED_200G;
default:
return 0;
}
* If LRO is enabled, check that the maximum aggregated packet
* size is supported by the configured device.
*/
- if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+ if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
uint32_t max_rx_pktlen;
uint32_t overhead_len;
}
/* Check if Rx RSS distribution is disabled but RSS hash is enabled. */
- if (((dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) == 0) &&
- (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) {
+ if (((dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) == 0) &&
+ (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) {
RTE_ETHDEV_LOG(ERR,
"Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n",
port_id,
- rte_eth_dev_rx_offload_name(DEV_RX_OFFLOAD_RSS_HASH));
+ rte_eth_dev_rx_offload_name(RTE_ETH_RX_OFFLOAD_RSS_HASH));
ret = -EINVAL;
goto rollback;
}
* size is supported by the configured device.
*/
/* Get the real Ethernet overhead length */
- if (local_conf.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+ if (local_conf.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
uint32_t overhead_len;
uint32_t max_rx_pktlen;
int ret;
rte_eth_link_speed_to_str(uint32_t link_speed)
{
switch (link_speed) {
- case ETH_SPEED_NUM_NONE: return "None";
- case ETH_SPEED_NUM_10M: return "10 Mbps";
- case ETH_SPEED_NUM_100M: return "100 Mbps";
- case ETH_SPEED_NUM_1G: return "1 Gbps";
- case ETH_SPEED_NUM_2_5G: return "2.5 Gbps";
- case ETH_SPEED_NUM_5G: return "5 Gbps";
- case ETH_SPEED_NUM_10G: return "10 Gbps";
- case ETH_SPEED_NUM_20G: return "20 Gbps";
- case ETH_SPEED_NUM_25G: return "25 Gbps";
- case ETH_SPEED_NUM_40G: return "40 Gbps";
- case ETH_SPEED_NUM_50G: return "50 Gbps";
- case ETH_SPEED_NUM_56G: return "56 Gbps";
- case ETH_SPEED_NUM_100G: return "100 Gbps";
- case ETH_SPEED_NUM_200G: return "200 Gbps";
- case ETH_SPEED_NUM_UNKNOWN: return "Unknown";
+ case RTE_ETH_SPEED_NUM_NONE: return "None";
+ case RTE_ETH_SPEED_NUM_10M: return "10 Mbps";
+ case RTE_ETH_SPEED_NUM_100M: return "100 Mbps";
+ case RTE_ETH_SPEED_NUM_1G: return "1 Gbps";
+ case RTE_ETH_SPEED_NUM_2_5G: return "2.5 Gbps";
+ case RTE_ETH_SPEED_NUM_5G: return "5 Gbps";
+ case RTE_ETH_SPEED_NUM_10G: return "10 Gbps";
+ case RTE_ETH_SPEED_NUM_20G: return "20 Gbps";
+ case RTE_ETH_SPEED_NUM_25G: return "25 Gbps";
+ case RTE_ETH_SPEED_NUM_40G: return "40 Gbps";
+ case RTE_ETH_SPEED_NUM_50G: return "50 Gbps";
+ case RTE_ETH_SPEED_NUM_56G: return "56 Gbps";
+ case RTE_ETH_SPEED_NUM_100G: return "100 Gbps";
+ case RTE_ETH_SPEED_NUM_200G: return "200 Gbps";
+ case RTE_ETH_SPEED_NUM_UNKNOWN: return "Unknown";
default: return "Invalid";
}
}
return -EINVAL;
}
- if (eth_link->link_status == ETH_LINK_DOWN)
+ if (eth_link->link_status == RTE_ETH_LINK_DOWN)
return snprintf(str, len, "Link down");
else
return snprintf(str, len, "Link up at %s %s %s",
rte_eth_link_speed_to_str(eth_link->link_speed),
- (eth_link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
+ (eth_link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
"FDX" : "HDX",
- (eth_link->link_autoneg == ETH_LINK_AUTONEG) ?
+ (eth_link->link_autoneg == RTE_ETH_LINK_AUTONEG) ?
"Autoneg" : "Fixed");
}
dev = &rte_eth_devices[port_id];
if (!(dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_VLAN_FILTER)) {
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) {
RTE_ETHDEV_LOG(ERR, "Port %u: VLAN-filtering disabled\n",
port_id);
return -ENOSYS;
dev_offloads = orig_offloads;
/* check which option changed by application */
- cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
- org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+ cur = !!(offload_mask & RTE_ETH_VLAN_STRIP_OFFLOAD);
+ org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
if (cur != org) {
if (cur)
- dev_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
else
- dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
- mask |= ETH_VLAN_STRIP_MASK;
+ dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+ mask |= RTE_ETH_VLAN_STRIP_MASK;
}
- cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
- org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER);
+ cur = !!(offload_mask & RTE_ETH_VLAN_FILTER_OFFLOAD);
+ org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER);
if (cur != org) {
if (cur)
- dev_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+ dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
else
- dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
- mask |= ETH_VLAN_FILTER_MASK;
+ dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
+ mask |= RTE_ETH_VLAN_FILTER_MASK;
}
- cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
- org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND);
+ cur = !!(offload_mask & RTE_ETH_VLAN_EXTEND_OFFLOAD);
+ org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND);
if (cur != org) {
if (cur)
- dev_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
+ dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
else
- dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
- mask |= ETH_VLAN_EXTEND_MASK;
+ dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
+ mask |= RTE_ETH_VLAN_EXTEND_MASK;
}
- cur = !!(offload_mask & ETH_QINQ_STRIP_OFFLOAD);
- org = !!(dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP);
+ cur = !!(offload_mask & RTE_ETH_QINQ_STRIP_OFFLOAD);
+ org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP);
if (cur != org) {
if (cur)
- dev_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
+ dev_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
else
- dev_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
- mask |= ETH_QINQ_STRIP_MASK;
+ dev_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
+ mask |= RTE_ETH_QINQ_STRIP_MASK;
}
/*no change*/
dev = &rte_eth_devices[port_id];
dev_offloads = &dev->data->dev_conf.rxmode.offloads;
- if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
- ret |= ETH_VLAN_STRIP_OFFLOAD;
+ if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+ ret |= RTE_ETH_VLAN_STRIP_OFFLOAD;
- if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
- ret |= ETH_VLAN_FILTER_OFFLOAD;
+ if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
+ ret |= RTE_ETH_VLAN_FILTER_OFFLOAD;
- if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
- ret |= ETH_VLAN_EXTEND_OFFLOAD;
+ if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
+ ret |= RTE_ETH_VLAN_EXTEND_OFFLOAD;
- if (*dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
- ret |= ETH_QINQ_STRIP_OFFLOAD;
+ if (*dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
+ ret |= RTE_ETH_QINQ_STRIP_OFFLOAD;
return ret;
}
return -EINVAL;
}
- if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
+ if (pfc_conf->priority > (RTE_ETH_DCB_NUM_USER_PRIORITIES - 1)) {
RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n");
return -EINVAL;
}
{
uint16_t i, num;
- num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
+ num = (reta_size + RTE_ETH_RETA_GROUP_SIZE - 1) / RTE_ETH_RETA_GROUP_SIZE;
for (i = 0; i < num; i++) {
if (reta_conf[i].mask)
return 0;
}
for (i = 0; i < reta_size; i++) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
if ((reta_conf[idx].mask & RTE_BIT64(shift)) &&
(reta_conf[idx].reta[shift] >= max_rxq)) {
RTE_ETHDEV_LOG(ERR,
return -EINVAL;
}
- if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
+ if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) {
RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
return -EINVAL;
}
return -EINVAL;
}
- if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
+ if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) {
RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
return -EINVAL;
}
port_id);
return -EINVAL;
}
- if (pool >= ETH_64_POOLS) {
- RTE_ETHDEV_LOG(ERR, "Pool ID must be 0-%d\n", ETH_64_POOLS - 1);
+ if (pool >= RTE_ETH_64_POOLS) {
+ RTE_ETHDEV_LOG(ERR, "Pool ID must be 0-%d\n", RTE_ETH_64_POOLS - 1);
return -EINVAL;
}
rte_tel_data_add_dict_string(d, status_str, "UP");
rte_tel_data_add_dict_u64(d, "speed", link.link_speed);
rte_tel_data_add_dict_string(d, "duplex",
- (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
+ (link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
"full-duplex" : "half-duplex");
return 0;
}
* field is not supported, its value is 0.
* All byte-related statistics do not include Ethernet FCS regardless
* of whether these bytes have been delivered to the application
- * (see DEV_RX_OFFLOAD_KEEP_CRC).
+ * (see RTE_ETH_RX_OFFLOAD_KEEP_CRC).
*/
struct rte_eth_stats {
uint64_t ipackets; /**< Total number of successfully received packets. */
/**@{@name Link speed capabilities
* Device supported speeds bitmap flags
*/
-#define ETH_LINK_SPEED_AUTONEG 0 /**< Autonegotiate (all speeds) */
-#define ETH_LINK_SPEED_FIXED RTE_BIT32(0) /**< Disable autoneg (fixed speed) */
-#define ETH_LINK_SPEED_10M_HD RTE_BIT32(1) /**< 10 Mbps half-duplex */
-#define ETH_LINK_SPEED_10M RTE_BIT32(2) /**< 10 Mbps full-duplex */
-#define ETH_LINK_SPEED_100M_HD RTE_BIT32(3) /**< 100 Mbps half-duplex */
-#define ETH_LINK_SPEED_100M RTE_BIT32(4) /**< 100 Mbps full-duplex */
-#define ETH_LINK_SPEED_1G RTE_BIT32(5) /**< 1 Gbps */
-#define ETH_LINK_SPEED_2_5G RTE_BIT32(6) /**< 2.5 Gbps */
-#define ETH_LINK_SPEED_5G RTE_BIT32(7) /**< 5 Gbps */
-#define ETH_LINK_SPEED_10G RTE_BIT32(8) /**< 10 Gbps */
-#define ETH_LINK_SPEED_20G RTE_BIT32(9) /**< 20 Gbps */
-#define ETH_LINK_SPEED_25G RTE_BIT32(10) /**< 25 Gbps */
-#define ETH_LINK_SPEED_40G RTE_BIT32(11) /**< 40 Gbps */
-#define ETH_LINK_SPEED_50G RTE_BIT32(12) /**< 50 Gbps */
-#define ETH_LINK_SPEED_56G RTE_BIT32(13) /**< 56 Gbps */
-#define ETH_LINK_SPEED_100G RTE_BIT32(14) /**< 100 Gbps */
-#define ETH_LINK_SPEED_200G RTE_BIT32(15) /**< 200 Gbps */
+#define RTE_ETH_LINK_SPEED_AUTONEG 0 /**< Autonegotiate (all speeds) */
+#define ETH_LINK_SPEED_AUTONEG RTE_ETH_LINK_SPEED_AUTONEG
+#define RTE_ETH_LINK_SPEED_FIXED RTE_BIT32(0) /**< Disable autoneg (fixed speed) */
+#define ETH_LINK_SPEED_FIXED RTE_ETH_LINK_SPEED_FIXED
+#define RTE_ETH_LINK_SPEED_10M_HD RTE_BIT32(1) /**< 10 Mbps half-duplex */
+#define ETH_LINK_SPEED_10M_HD RTE_ETH_LINK_SPEED_10M_HD
+#define RTE_ETH_LINK_SPEED_10M RTE_BIT32(2) /**< 10 Mbps full-duplex */
+#define ETH_LINK_SPEED_10M RTE_ETH_LINK_SPEED_10M
+#define RTE_ETH_LINK_SPEED_100M_HD RTE_BIT32(3) /**< 100 Mbps half-duplex */
+#define ETH_LINK_SPEED_100M_HD RTE_ETH_LINK_SPEED_100M_HD
+#define RTE_ETH_LINK_SPEED_100M RTE_BIT32(4) /**< 100 Mbps full-duplex */
+#define ETH_LINK_SPEED_100M RTE_ETH_LINK_SPEED_100M
+#define RTE_ETH_LINK_SPEED_1G RTE_BIT32(5) /**< 1 Gbps */
+#define ETH_LINK_SPEED_1G RTE_ETH_LINK_SPEED_1G
+#define RTE_ETH_LINK_SPEED_2_5G RTE_BIT32(6) /**< 2.5 Gbps */
+#define ETH_LINK_SPEED_2_5G RTE_ETH_LINK_SPEED_2_5G
+#define RTE_ETH_LINK_SPEED_5G RTE_BIT32(7) /**< 5 Gbps */
+#define ETH_LINK_SPEED_5G RTE_ETH_LINK_SPEED_5G
+#define RTE_ETH_LINK_SPEED_10G RTE_BIT32(8) /**< 10 Gbps */
+#define ETH_LINK_SPEED_10G RTE_ETH_LINK_SPEED_10G
+#define RTE_ETH_LINK_SPEED_20G RTE_BIT32(9) /**< 20 Gbps */
+#define ETH_LINK_SPEED_20G RTE_ETH_LINK_SPEED_20G
+#define RTE_ETH_LINK_SPEED_25G RTE_BIT32(10) /**< 25 Gbps */
+#define ETH_LINK_SPEED_25G RTE_ETH_LINK_SPEED_25G
+#define RTE_ETH_LINK_SPEED_40G RTE_BIT32(11) /**< 40 Gbps */
+#define ETH_LINK_SPEED_40G RTE_ETH_LINK_SPEED_40G
+#define RTE_ETH_LINK_SPEED_50G RTE_BIT32(12) /**< 50 Gbps */
+#define ETH_LINK_SPEED_50G RTE_ETH_LINK_SPEED_50G
+#define RTE_ETH_LINK_SPEED_56G RTE_BIT32(13) /**< 56 Gbps */
+#define ETH_LINK_SPEED_56G RTE_ETH_LINK_SPEED_56G
+#define RTE_ETH_LINK_SPEED_100G RTE_BIT32(14) /**< 100 Gbps */
+#define ETH_LINK_SPEED_100G RTE_ETH_LINK_SPEED_100G
+#define RTE_ETH_LINK_SPEED_200G RTE_BIT32(15) /**< 200 Gbps */
+#define ETH_LINK_SPEED_200G RTE_ETH_LINK_SPEED_200G
/**@}*/
/**@{@name Link speed
* Ethernet numeric link speeds in Mbps
*/
-#define ETH_SPEED_NUM_NONE 0 /**< Not defined */
-#define ETH_SPEED_NUM_10M 10 /**< 10 Mbps */
-#define ETH_SPEED_NUM_100M 100 /**< 100 Mbps */
-#define ETH_SPEED_NUM_1G 1000 /**< 1 Gbps */
-#define ETH_SPEED_NUM_2_5G 2500 /**< 2.5 Gbps */
-#define ETH_SPEED_NUM_5G 5000 /**< 5 Gbps */
-#define ETH_SPEED_NUM_10G 10000 /**< 10 Gbps */
-#define ETH_SPEED_NUM_20G 20000 /**< 20 Gbps */
-#define ETH_SPEED_NUM_25G 25000 /**< 25 Gbps */
-#define ETH_SPEED_NUM_40G 40000 /**< 40 Gbps */
-#define ETH_SPEED_NUM_50G 50000 /**< 50 Gbps */
-#define ETH_SPEED_NUM_56G 56000 /**< 56 Gbps */
-#define ETH_SPEED_NUM_100G 100000 /**< 100 Gbps */
-#define ETH_SPEED_NUM_200G 200000 /**< 200 Gbps */
-#define ETH_SPEED_NUM_UNKNOWN UINT32_MAX /**< Unknown */
+#define RTE_ETH_SPEED_NUM_NONE 0 /**< Not defined */
+#define ETH_SPEED_NUM_NONE RTE_ETH_SPEED_NUM_NONE
+#define RTE_ETH_SPEED_NUM_10M 10 /**< 10 Mbps */
+#define ETH_SPEED_NUM_10M RTE_ETH_SPEED_NUM_10M
+#define RTE_ETH_SPEED_NUM_100M 100 /**< 100 Mbps */
+#define ETH_SPEED_NUM_100M RTE_ETH_SPEED_NUM_100M
+#define RTE_ETH_SPEED_NUM_1G 1000 /**< 1 Gbps */
+#define ETH_SPEED_NUM_1G RTE_ETH_SPEED_NUM_1G
+#define RTE_ETH_SPEED_NUM_2_5G 2500 /**< 2.5 Gbps */
+#define ETH_SPEED_NUM_2_5G RTE_ETH_SPEED_NUM_2_5G
+#define RTE_ETH_SPEED_NUM_5G 5000 /**< 5 Gbps */
+#define ETH_SPEED_NUM_5G RTE_ETH_SPEED_NUM_5G
+#define RTE_ETH_SPEED_NUM_10G 10000 /**< 10 Gbps */
+#define ETH_SPEED_NUM_10G RTE_ETH_SPEED_NUM_10G
+#define RTE_ETH_SPEED_NUM_20G 20000 /**< 20 Gbps */
+#define ETH_SPEED_NUM_20G RTE_ETH_SPEED_NUM_20G
+#define RTE_ETH_SPEED_NUM_25G 25000 /**< 25 Gbps */
+#define ETH_SPEED_NUM_25G RTE_ETH_SPEED_NUM_25G
+#define RTE_ETH_SPEED_NUM_40G 40000 /**< 40 Gbps */
+#define ETH_SPEED_NUM_40G RTE_ETH_SPEED_NUM_40G
+#define RTE_ETH_SPEED_NUM_50G 50000 /**< 50 Gbps */
+#define ETH_SPEED_NUM_50G RTE_ETH_SPEED_NUM_50G
+#define RTE_ETH_SPEED_NUM_56G 56000 /**< 56 Gbps */
+#define ETH_SPEED_NUM_56G RTE_ETH_SPEED_NUM_56G
+#define RTE_ETH_SPEED_NUM_100G 100000 /**< 100 Gbps */
+#define ETH_SPEED_NUM_100G RTE_ETH_SPEED_NUM_100G
+#define RTE_ETH_SPEED_NUM_200G 200000 /**< 200 Gbps */
+#define ETH_SPEED_NUM_200G RTE_ETH_SPEED_NUM_200G
+#define RTE_ETH_SPEED_NUM_UNKNOWN UINT32_MAX /**< Unknown */
+#define ETH_SPEED_NUM_UNKNOWN RTE_ETH_SPEED_NUM_UNKNOWN
/**@}*/
/**
*/
__extension__
struct rte_eth_link {
- uint32_t link_speed; /**< ETH_SPEED_NUM_ */
- uint16_t link_duplex : 1; /**< ETH_LINK_[HALF/FULL]_DUPLEX */
- uint16_t link_autoneg : 1; /**< ETH_LINK_[AUTONEG/FIXED] */
- uint16_t link_status : 1; /**< ETH_LINK_[DOWN/UP] */
+ uint32_t link_speed; /**< RTE_ETH_SPEED_NUM_ */
+ uint16_t link_duplex : 1; /**< RTE_ETH_LINK_[HALF/FULL]_DUPLEX */
+ uint16_t link_autoneg : 1; /**< RTE_ETH_LINK_[AUTONEG/FIXED] */
+ uint16_t link_status : 1; /**< RTE_ETH_LINK_[DOWN/UP] */
} __rte_aligned(8); /**< aligned for atomic64 read/write */
/**@{@name Link negotiation
* Constants used in link management.
*/
-#define ETH_LINK_HALF_DUPLEX 0 /**< Half-duplex connection (see link_duplex). */
-#define ETH_LINK_FULL_DUPLEX 1 /**< Full-duplex connection (see link_duplex). */
-#define ETH_LINK_DOWN 0 /**< Link is down (see link_status). */
-#define ETH_LINK_UP 1 /**< Link is up (see link_status). */
-#define ETH_LINK_FIXED 0 /**< No autonegotiation (see link_autoneg). */
-#define ETH_LINK_AUTONEG 1 /**< Autonegotiated (see link_autoneg). */
+#define RTE_ETH_LINK_HALF_DUPLEX 0 /**< Half-duplex connection (see link_duplex). */
+#define ETH_LINK_HALF_DUPLEX RTE_ETH_LINK_HALF_DUPLEX
+#define RTE_ETH_LINK_FULL_DUPLEX 1 /**< Full-duplex connection (see link_duplex). */
+#define ETH_LINK_FULL_DUPLEX RTE_ETH_LINK_FULL_DUPLEX
+#define RTE_ETH_LINK_DOWN 0 /**< Link is down (see link_status). */
+#define ETH_LINK_DOWN RTE_ETH_LINK_DOWN
+#define RTE_ETH_LINK_UP 1 /**< Link is up (see link_status). */
+#define ETH_LINK_UP RTE_ETH_LINK_UP
+#define RTE_ETH_LINK_FIXED 0 /**< No autonegotiation (see link_autoneg). */
+#define ETH_LINK_FIXED RTE_ETH_LINK_FIXED
+#define RTE_ETH_LINK_AUTONEG 1 /**< Autonegotiated (see link_autoneg). */
+#define ETH_LINK_AUTONEG RTE_ETH_LINK_AUTONEG
#define RTE_ETH_LINK_MAX_STR_LEN 40 /**< Max length of default link string. */
/**@}*/
/**@{@name Multi-queue mode
* @see rte_eth_conf.rxmode.mq_mode.
*/
-#define ETH_MQ_RX_RSS_FLAG 0x1 /**< Enable RSS. @see rte_eth_rss_conf */
-#define ETH_MQ_RX_DCB_FLAG 0x2 /**< Enable DCB. */
-#define ETH_MQ_RX_VMDQ_FLAG 0x4 /**< Enable VMDq. */
+#define RTE_ETH_MQ_RX_RSS_FLAG 0x1 /**< Enable RSS. @see rte_eth_rss_conf */
+#define ETH_MQ_RX_RSS_FLAG RTE_ETH_MQ_RX_RSS_FLAG
+#define RTE_ETH_MQ_RX_DCB_FLAG 0x2 /**< Enable DCB. */
+#define ETH_MQ_RX_DCB_FLAG RTE_ETH_MQ_RX_DCB_FLAG
+#define RTE_ETH_MQ_RX_VMDQ_FLAG 0x4 /**< Enable VMDq. */
+#define ETH_MQ_RX_VMDQ_FLAG RTE_ETH_MQ_RX_VMDQ_FLAG
/**@}*/
/**
*/
enum rte_eth_rx_mq_mode {
/** None of DCB, RSS or VMDq mode */
- ETH_MQ_RX_NONE = 0,
+ RTE_ETH_MQ_RX_NONE = 0,
/** For Rx side, only RSS is on */
- ETH_MQ_RX_RSS = ETH_MQ_RX_RSS_FLAG,
+ RTE_ETH_MQ_RX_RSS = RTE_ETH_MQ_RX_RSS_FLAG,
/** For Rx side,only DCB is on. */
- ETH_MQ_RX_DCB = ETH_MQ_RX_DCB_FLAG,
+ RTE_ETH_MQ_RX_DCB = RTE_ETH_MQ_RX_DCB_FLAG,
/** Both DCB and RSS enable */
- ETH_MQ_RX_DCB_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_DCB_FLAG,
+ RTE_ETH_MQ_RX_DCB_RSS = RTE_ETH_MQ_RX_RSS_FLAG | RTE_ETH_MQ_RX_DCB_FLAG,
/** Only VMDq, no RSS nor DCB */
- ETH_MQ_RX_VMDQ_ONLY = ETH_MQ_RX_VMDQ_FLAG,
+ RTE_ETH_MQ_RX_VMDQ_ONLY = RTE_ETH_MQ_RX_VMDQ_FLAG,
/** RSS mode with VMDq */
- ETH_MQ_RX_VMDQ_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_VMDQ_FLAG,
+ RTE_ETH_MQ_RX_VMDQ_RSS = RTE_ETH_MQ_RX_RSS_FLAG | RTE_ETH_MQ_RX_VMDQ_FLAG,
/** Use VMDq+DCB to route traffic to queues */
- ETH_MQ_RX_VMDQ_DCB = ETH_MQ_RX_VMDQ_FLAG | ETH_MQ_RX_DCB_FLAG,
+ RTE_ETH_MQ_RX_VMDQ_DCB = RTE_ETH_MQ_RX_VMDQ_FLAG | RTE_ETH_MQ_RX_DCB_FLAG,
/** Enable both VMDq and DCB in VMDq */
- ETH_MQ_RX_VMDQ_DCB_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_DCB_FLAG |
- ETH_MQ_RX_VMDQ_FLAG,
+ RTE_ETH_MQ_RX_VMDQ_DCB_RSS = RTE_ETH_MQ_RX_RSS_FLAG | RTE_ETH_MQ_RX_DCB_FLAG |
+ RTE_ETH_MQ_RX_VMDQ_FLAG,
};
-/**
- * for Rx mq mode backward compatible
- */
-#define ETH_RSS ETH_MQ_RX_RSS
-#define VMDQ_DCB ETH_MQ_RX_VMDQ_DCB
-#define ETH_DCB_RX ETH_MQ_RX_DCB
+#define ETH_MQ_RX_NONE RTE_ETH_MQ_RX_NONE
+#define ETH_MQ_RX_RSS RTE_ETH_MQ_RX_RSS
+#define ETH_MQ_RX_DCB RTE_ETH_MQ_RX_DCB
+#define ETH_MQ_RX_DCB_RSS RTE_ETH_MQ_RX_DCB_RSS
+#define ETH_MQ_RX_VMDQ_ONLY RTE_ETH_MQ_RX_VMDQ_ONLY
+#define ETH_MQ_RX_VMDQ_RSS RTE_ETH_MQ_RX_VMDQ_RSS
+#define ETH_MQ_RX_VMDQ_DCB RTE_ETH_MQ_RX_VMDQ_DCB
+#define ETH_MQ_RX_VMDQ_DCB_RSS RTE_ETH_MQ_RX_VMDQ_DCB_RSS
/**
* A set of values to identify what method is to be used to transmit
* packets using multi-TCs.
*/
enum rte_eth_tx_mq_mode {
- ETH_MQ_TX_NONE = 0, /**< It is in neither DCB nor VT mode. */
- ETH_MQ_TX_DCB, /**< For Tx side,only DCB is on. */
- ETH_MQ_TX_VMDQ_DCB, /**< For Tx side,both DCB and VT is on. */
- ETH_MQ_TX_VMDQ_ONLY, /**< Only VT on, no DCB */
+ RTE_ETH_MQ_TX_NONE = 0, /**< It is in neither DCB nor VT mode. */
+ RTE_ETH_MQ_TX_DCB, /**< For Tx side,only DCB is on. */
+ RTE_ETH_MQ_TX_VMDQ_DCB, /**< For Tx side,both DCB and VT is on. */
+ RTE_ETH_MQ_TX_VMDQ_ONLY, /**< Only VT on, no DCB */
};
-
-/**
- * for Tx mq mode backward compatible
- */
-#define ETH_DCB_NONE ETH_MQ_TX_NONE
-#define ETH_VMDQ_DCB_TX ETH_MQ_TX_VMDQ_DCB
-#define ETH_DCB_TX ETH_MQ_TX_DCB
+#define ETH_MQ_TX_NONE RTE_ETH_MQ_TX_NONE
+#define ETH_MQ_TX_DCB RTE_ETH_MQ_TX_DCB
+#define ETH_MQ_TX_VMDQ_DCB RTE_ETH_MQ_TX_VMDQ_DCB
+#define ETH_MQ_TX_VMDQ_ONLY RTE_ETH_MQ_TX_VMDQ_ONLY
/**
* A structure used to configure the Rx features of an Ethernet port.
uint32_t max_lro_pkt_size;
uint16_t split_hdr_size; /**< hdr buf size (header_split enabled).*/
/**
- * Per-port Rx offloads to be set using DEV_RX_OFFLOAD_* flags.
+ * Per-port Rx offloads to be set using RTE_ETH_RX_OFFLOAD_* flags.
* Only offloads set on rx_offload_capa field on rte_eth_dev_info
* structure are allowed to be set.
*/
* Note that single VLAN is treated the same as inner VLAN.
*/
enum rte_vlan_type {
- ETH_VLAN_TYPE_UNKNOWN = 0,
- ETH_VLAN_TYPE_INNER, /**< Inner VLAN. */
- ETH_VLAN_TYPE_OUTER, /**< Single VLAN, or outer VLAN. */
- ETH_VLAN_TYPE_MAX,
+ RTE_ETH_VLAN_TYPE_UNKNOWN = 0,
+ RTE_ETH_VLAN_TYPE_INNER, /**< Inner VLAN. */
+ RTE_ETH_VLAN_TYPE_OUTER, /**< Single VLAN, or outer VLAN. */
+ RTE_ETH_VLAN_TYPE_MAX,
};
+#define ETH_VLAN_TYPE_UNKNOWN RTE_ETH_VLAN_TYPE_UNKNOWN
+#define ETH_VLAN_TYPE_INNER RTE_ETH_VLAN_TYPE_INNER
+#define ETH_VLAN_TYPE_OUTER RTE_ETH_VLAN_TYPE_OUTER
+#define ETH_VLAN_TYPE_MAX RTE_ETH_VLAN_TYPE_MAX
+
/**
* A structure used to describe a VLAN filter.
* If the bit corresponding to a VID is set, such VID is on.
* Below macros are defined for RSS offload types, they can be used to
* fill rte_eth_rss_conf.rss_hf or rte_flow_action_rss.types.
*/
-#define ETH_RSS_IPV4 RTE_BIT64(2)
-#define ETH_RSS_FRAG_IPV4 RTE_BIT64(3)
-#define ETH_RSS_NONFRAG_IPV4_TCP RTE_BIT64(4)
-#define ETH_RSS_NONFRAG_IPV4_UDP RTE_BIT64(5)
-#define ETH_RSS_NONFRAG_IPV4_SCTP RTE_BIT64(6)
-#define ETH_RSS_NONFRAG_IPV4_OTHER RTE_BIT64(7)
-#define ETH_RSS_IPV6 RTE_BIT64(8)
-#define ETH_RSS_FRAG_IPV6 RTE_BIT64(9)
-#define ETH_RSS_NONFRAG_IPV6_TCP RTE_BIT64(10)
-#define ETH_RSS_NONFRAG_IPV6_UDP RTE_BIT64(11)
-#define ETH_RSS_NONFRAG_IPV6_SCTP RTE_BIT64(12)
-#define ETH_RSS_NONFRAG_IPV6_OTHER RTE_BIT64(13)
-#define ETH_RSS_L2_PAYLOAD RTE_BIT64(14)
-#define ETH_RSS_IPV6_EX RTE_BIT64(15)
-#define ETH_RSS_IPV6_TCP_EX RTE_BIT64(16)
-#define ETH_RSS_IPV6_UDP_EX RTE_BIT64(17)
-#define ETH_RSS_PORT RTE_BIT64(18)
-#define ETH_RSS_VXLAN RTE_BIT64(19)
-#define ETH_RSS_GENEVE RTE_BIT64(20)
-#define ETH_RSS_NVGRE RTE_BIT64(21)
-#define ETH_RSS_GTPU RTE_BIT64(23)
-#define ETH_RSS_ETH RTE_BIT64(24)
-#define ETH_RSS_S_VLAN RTE_BIT64(25)
-#define ETH_RSS_C_VLAN RTE_BIT64(26)
-#define ETH_RSS_ESP RTE_BIT64(27)
-#define ETH_RSS_AH RTE_BIT64(28)
-#define ETH_RSS_L2TPV3 RTE_BIT64(29)
-#define ETH_RSS_PFCP RTE_BIT64(30)
-#define ETH_RSS_PPPOE RTE_BIT64(31)
-#define ETH_RSS_ECPRI RTE_BIT64(32)
-#define ETH_RSS_MPLS RTE_BIT64(33)
-#define ETH_RSS_IPV4_CHKSUM RTE_BIT64(34)
+#define RTE_ETH_RSS_IPV4 RTE_BIT64(2)
+#define ETH_RSS_IPV4 RTE_ETH_RSS_IPV4
+#define RTE_ETH_RSS_FRAG_IPV4 RTE_BIT64(3)
+#define ETH_RSS_FRAG_IPV4 RTE_ETH_RSS_FRAG_IPV4
+#define RTE_ETH_RSS_NONFRAG_IPV4_TCP RTE_BIT64(4)
+#define ETH_RSS_NONFRAG_IPV4_TCP RTE_ETH_RSS_NONFRAG_IPV4_TCP
+#define RTE_ETH_RSS_NONFRAG_IPV4_UDP RTE_BIT64(5)
+#define ETH_RSS_NONFRAG_IPV4_UDP RTE_ETH_RSS_NONFRAG_IPV4_UDP
+#define RTE_ETH_RSS_NONFRAG_IPV4_SCTP RTE_BIT64(6)
+#define ETH_RSS_NONFRAG_IPV4_SCTP RTE_ETH_RSS_NONFRAG_IPV4_SCTP
+#define RTE_ETH_RSS_NONFRAG_IPV4_OTHER RTE_BIT64(7)
+#define ETH_RSS_NONFRAG_IPV4_OTHER RTE_ETH_RSS_NONFRAG_IPV4_OTHER
+#define RTE_ETH_RSS_IPV6 RTE_BIT64(8)
+#define ETH_RSS_IPV6 RTE_ETH_RSS_IPV6
+#define RTE_ETH_RSS_FRAG_IPV6 RTE_BIT64(9)
+#define ETH_RSS_FRAG_IPV6 RTE_ETH_RSS_FRAG_IPV6
+#define RTE_ETH_RSS_NONFRAG_IPV6_TCP RTE_BIT64(10)
+#define ETH_RSS_NONFRAG_IPV6_TCP RTE_ETH_RSS_NONFRAG_IPV6_TCP
+#define RTE_ETH_RSS_NONFRAG_IPV6_UDP RTE_BIT64(11)
+#define ETH_RSS_NONFRAG_IPV6_UDP RTE_ETH_RSS_NONFRAG_IPV6_UDP
+#define RTE_ETH_RSS_NONFRAG_IPV6_SCTP RTE_BIT64(12)
+#define ETH_RSS_NONFRAG_IPV6_SCTP RTE_ETH_RSS_NONFRAG_IPV6_SCTP
+#define RTE_ETH_RSS_NONFRAG_IPV6_OTHER RTE_BIT64(13)
+#define ETH_RSS_NONFRAG_IPV6_OTHER RTE_ETH_RSS_NONFRAG_IPV6_OTHER
+#define RTE_ETH_RSS_L2_PAYLOAD RTE_BIT64(14)
+#define ETH_RSS_L2_PAYLOAD RTE_ETH_RSS_L2_PAYLOAD
+#define RTE_ETH_RSS_IPV6_EX RTE_BIT64(15)
+#define ETH_RSS_IPV6_EX RTE_ETH_RSS_IPV6_EX
+#define RTE_ETH_RSS_IPV6_TCP_EX RTE_BIT64(16)
+#define ETH_RSS_IPV6_TCP_EX RTE_ETH_RSS_IPV6_TCP_EX
+#define RTE_ETH_RSS_IPV6_UDP_EX RTE_BIT64(17)
+#define ETH_RSS_IPV6_UDP_EX RTE_ETH_RSS_IPV6_UDP_EX
+#define RTE_ETH_RSS_PORT RTE_BIT64(18)
+#define ETH_RSS_PORT RTE_ETH_RSS_PORT
+#define RTE_ETH_RSS_VXLAN RTE_BIT64(19)
+#define ETH_RSS_VXLAN RTE_ETH_RSS_VXLAN
+#define RTE_ETH_RSS_GENEVE RTE_BIT64(20)
+#define ETH_RSS_GENEVE RTE_ETH_RSS_GENEVE
+#define RTE_ETH_RSS_NVGRE RTE_BIT64(21)
+#define ETH_RSS_NVGRE RTE_ETH_RSS_NVGRE
+#define RTE_ETH_RSS_GTPU RTE_BIT64(23)
+#define ETH_RSS_GTPU RTE_ETH_RSS_GTPU
+#define RTE_ETH_RSS_ETH RTE_BIT64(24)
+#define ETH_RSS_ETH RTE_ETH_RSS_ETH
+#define RTE_ETH_RSS_S_VLAN RTE_BIT64(25)
+#define ETH_RSS_S_VLAN RTE_ETH_RSS_S_VLAN
+#define RTE_ETH_RSS_C_VLAN RTE_BIT64(26)
+#define ETH_RSS_C_VLAN RTE_ETH_RSS_C_VLAN
+#define RTE_ETH_RSS_ESP RTE_BIT64(27)
+#define ETH_RSS_ESP RTE_ETH_RSS_ESP
+#define RTE_ETH_RSS_AH RTE_BIT64(28)
+#define ETH_RSS_AH RTE_ETH_RSS_AH
+#define RTE_ETH_RSS_L2TPV3 RTE_BIT64(29)
+#define ETH_RSS_L2TPV3 RTE_ETH_RSS_L2TPV3
+#define RTE_ETH_RSS_PFCP RTE_BIT64(30)
+#define ETH_RSS_PFCP RTE_ETH_RSS_PFCP
+#define RTE_ETH_RSS_PPPOE RTE_BIT64(31)
+#define ETH_RSS_PPPOE RTE_ETH_RSS_PPPOE
+#define RTE_ETH_RSS_ECPRI RTE_BIT64(32)
+#define ETH_RSS_ECPRI RTE_ETH_RSS_ECPRI
+#define RTE_ETH_RSS_MPLS RTE_BIT64(33)
+#define ETH_RSS_MPLS RTE_ETH_RSS_MPLS
+#define RTE_ETH_RSS_IPV4_CHKSUM RTE_BIT64(34)
+#define ETH_RSS_IPV4_CHKSUM RTE_ETH_RSS_IPV4_CHKSUM
/**
* The ETH_RSS_L4_CHKSUM works on checksum field of any L4 header.
* checksum type for constructing the use of RSS offload bits.
*
* Due to above reason, some old APIs (and configuration) don't support
- * ETH_RSS_L4_CHKSUM. The rte_flow RSS API supports it.
+ * RTE_ETH_RSS_L4_CHKSUM. The rte_flow RSS API supports it.
*
* For the case that checksum is not used in an UDP header,
* it takes the reserved value 0 as input for the hash function.
*/
-#define ETH_RSS_L4_CHKSUM RTE_BIT64(35)
+#define RTE_ETH_RSS_L4_CHKSUM RTE_BIT64(35)
+#define ETH_RSS_L4_CHKSUM RTE_ETH_RSS_L4_CHKSUM
/*
- * We use the following macros to combine with above ETH_RSS_* for
+ * We use the following macros to combine with above RTE_ETH_RSS_* for
* more specific input set selection. These bits are defined starting
* from the high end of the 64 bits.
- * Note: If we use above ETH_RSS_* without SRC/DST_ONLY, it represents
+ * Note: If we use above RTE_ETH_RSS_* without SRC/DST_ONLY, it represents
* both SRC and DST are taken into account. If SRC_ONLY and DST_ONLY of
* the same level are used simultaneously, it is the same case as none of
* them are added.
*/
-#define ETH_RSS_L3_SRC_ONLY RTE_BIT64(63)
-#define ETH_RSS_L3_DST_ONLY RTE_BIT64(62)
-#define ETH_RSS_L4_SRC_ONLY RTE_BIT64(61)
-#define ETH_RSS_L4_DST_ONLY RTE_BIT64(60)
-#define ETH_RSS_L2_SRC_ONLY RTE_BIT64(59)
-#define ETH_RSS_L2_DST_ONLY RTE_BIT64(58)
+#define RTE_ETH_RSS_L3_SRC_ONLY RTE_BIT64(63)
+#define ETH_RSS_L3_SRC_ONLY RTE_ETH_RSS_L3_SRC_ONLY
+#define RTE_ETH_RSS_L3_DST_ONLY RTE_BIT64(62)
+#define ETH_RSS_L3_DST_ONLY RTE_ETH_RSS_L3_DST_ONLY
+#define RTE_ETH_RSS_L4_SRC_ONLY RTE_BIT64(61)
+#define ETH_RSS_L4_SRC_ONLY RTE_ETH_RSS_L4_SRC_ONLY
+#define RTE_ETH_RSS_L4_DST_ONLY RTE_BIT64(60)
+#define ETH_RSS_L4_DST_ONLY RTE_ETH_RSS_L4_DST_ONLY
+#define RTE_ETH_RSS_L2_SRC_ONLY RTE_BIT64(59)
+#define ETH_RSS_L2_SRC_ONLY RTE_ETH_RSS_L2_SRC_ONLY
+#define RTE_ETH_RSS_L2_DST_ONLY RTE_BIT64(58)
+#define ETH_RSS_L2_DST_ONLY RTE_ETH_RSS_L2_DST_ONLY
/*
* Only select IPV6 address prefix as RSS input set according to
- * https://tools.ietf.org/html/rfc6052
- * Must be combined with ETH_RSS_IPV6, ETH_RSS_NONFRAG_IPV6_UDP,
- * ETH_RSS_NONFRAG_IPV6_TCP, ETH_RSS_NONFRAG_IPV6_SCTP.
+ * https:tools.ietf.org/html/rfc6052
+ * Must be combined with RTE_ETH_RSS_IPV6, RTE_ETH_RSS_NONFRAG_IPV6_UDP,
+ * RTE_ETH_RSS_NONFRAG_IPV6_TCP, RTE_ETH_RSS_NONFRAG_IPV6_SCTP.
*/
-#define RTE_ETH_RSS_L3_PRE32 RTE_BIT64(57)
-#define RTE_ETH_RSS_L3_PRE40 RTE_BIT64(56)
-#define RTE_ETH_RSS_L3_PRE48 RTE_BIT64(55)
-#define RTE_ETH_RSS_L3_PRE56 RTE_BIT64(54)
-#define RTE_ETH_RSS_L3_PRE64 RTE_BIT64(53)
-#define RTE_ETH_RSS_L3_PRE96 RTE_BIT64(52)
+#define RTE_ETH_RSS_L3_PRE32 RTE_BIT64(57)
+#define RTE_ETH_RSS_L3_PRE40 RTE_BIT64(56)
+#define RTE_ETH_RSS_L3_PRE48 RTE_BIT64(55)
+#define RTE_ETH_RSS_L3_PRE56 RTE_BIT64(54)
+#define RTE_ETH_RSS_L3_PRE64 RTE_BIT64(53)
+#define RTE_ETH_RSS_L3_PRE96 RTE_BIT64(52)
/*
* Use the following macros to combine with the above layers
* It basically stands for the innermost encapsulation level RSS
* can be performed on according to PMD and device capabilities.
*/
-#define ETH_RSS_LEVEL_PMD_DEFAULT (0ULL << 50)
+#define RTE_ETH_RSS_LEVEL_PMD_DEFAULT (0ULL << 50)
+#define ETH_RSS_LEVEL_PMD_DEFAULT RTE_ETH_RSS_LEVEL_PMD_DEFAULT
/**
* level 1, requests RSS to be performed on the outermost packet
* encapsulation level.
*/
-#define ETH_RSS_LEVEL_OUTERMOST (1ULL << 50)
+#define RTE_ETH_RSS_LEVEL_OUTERMOST (1ULL << 50)
+#define ETH_RSS_LEVEL_OUTERMOST RTE_ETH_RSS_LEVEL_OUTERMOST
/**
* level 2, requests RSS to be performed on the specified inner packet
* encapsulation level, from outermost to innermost (lower to higher values).
*/
-#define ETH_RSS_LEVEL_INNERMOST (2ULL << 50)
-#define ETH_RSS_LEVEL_MASK (3ULL << 50)
+#define RTE_ETH_RSS_LEVEL_INNERMOST (2ULL << 50)
+#define ETH_RSS_LEVEL_INNERMOST RTE_ETH_RSS_LEVEL_INNERMOST
+#define RTE_ETH_RSS_LEVEL_MASK (3ULL << 50)
+#define ETH_RSS_LEVEL_MASK RTE_ETH_RSS_LEVEL_MASK
-#define ETH_RSS_LEVEL(rss_hf) ((rss_hf & ETH_RSS_LEVEL_MASK) >> 50)
+#define RTE_ETH_RSS_LEVEL(rss_hf) ((rss_hf & RTE_ETH_RSS_LEVEL_MASK) >> 50)
+#define ETH_RSS_LEVEL(rss_hf) RTE_ETH_RSS_LEVEL(rss_hf)
/**
* For input set change of hash filter, if SRC_ONLY and DST_ONLY of
static inline uint64_t
rte_eth_rss_hf_refine(uint64_t rss_hf)
{
- if ((rss_hf & ETH_RSS_L3_SRC_ONLY) && (rss_hf & ETH_RSS_L3_DST_ONLY))
- rss_hf &= ~(ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY);
+ if ((rss_hf & RTE_ETH_RSS_L3_SRC_ONLY) && (rss_hf & RTE_ETH_RSS_L3_DST_ONLY))
+ rss_hf &= ~(RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY);
- if ((rss_hf & ETH_RSS_L4_SRC_ONLY) && (rss_hf & ETH_RSS_L4_DST_ONLY))
- rss_hf &= ~(ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY);
+ if ((rss_hf & RTE_ETH_RSS_L4_SRC_ONLY) && (rss_hf & RTE_ETH_RSS_L4_DST_ONLY))
+ rss_hf &= ~(RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY);
return rss_hf;
}
-#define ETH_RSS_IPV6_PRE32 ( \
- ETH_RSS_IPV6 | \
+#define RTE_ETH_RSS_IPV6_PRE32 ( \
+ RTE_ETH_RSS_IPV6 | \
RTE_ETH_RSS_L3_PRE32)
+#define ETH_RSS_IPV6_PRE32 RTE_ETH_RSS_IPV6_PRE32
-#define ETH_RSS_IPV6_PRE40 ( \
- ETH_RSS_IPV6 | \
+#define RTE_ETH_RSS_IPV6_PRE40 ( \
+ RTE_ETH_RSS_IPV6 | \
RTE_ETH_RSS_L3_PRE40)
+#define ETH_RSS_IPV6_PRE40 RTE_ETH_RSS_IPV6_PRE40
-#define ETH_RSS_IPV6_PRE48 ( \
- ETH_RSS_IPV6 | \
+#define RTE_ETH_RSS_IPV6_PRE48 ( \
+ RTE_ETH_RSS_IPV6 | \
RTE_ETH_RSS_L3_PRE48)
+#define ETH_RSS_IPV6_PRE48 RTE_ETH_RSS_IPV6_PRE48
-#define ETH_RSS_IPV6_PRE56 ( \
- ETH_RSS_IPV6 | \
+#define RTE_ETH_RSS_IPV6_PRE56 ( \
+ RTE_ETH_RSS_IPV6 | \
RTE_ETH_RSS_L3_PRE56)
+#define ETH_RSS_IPV6_PRE56 RTE_ETH_RSS_IPV6_PRE56
-#define ETH_RSS_IPV6_PRE64 ( \
- ETH_RSS_IPV6 | \
+#define RTE_ETH_RSS_IPV6_PRE64 ( \
+ RTE_ETH_RSS_IPV6 | \
RTE_ETH_RSS_L3_PRE64)
+#define ETH_RSS_IPV6_PRE64 RTE_ETH_RSS_IPV6_PRE64
-#define ETH_RSS_IPV6_PRE96 ( \
- ETH_RSS_IPV6 | \
+#define RTE_ETH_RSS_IPV6_PRE96 ( \
+ RTE_ETH_RSS_IPV6 | \
RTE_ETH_RSS_L3_PRE96)
+#define ETH_RSS_IPV6_PRE96 RTE_ETH_RSS_IPV6_PRE96
-#define ETH_RSS_IPV6_PRE32_UDP ( \
- ETH_RSS_NONFRAG_IPV6_UDP | \
+#define RTE_ETH_RSS_IPV6_PRE32_UDP ( \
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
RTE_ETH_RSS_L3_PRE32)
+#define ETH_RSS_IPV6_PRE32_UDP RTE_ETH_RSS_IPV6_PRE32_UDP
-#define ETH_RSS_IPV6_PRE40_UDP ( \
- ETH_RSS_NONFRAG_IPV6_UDP | \
+#define RTE_ETH_RSS_IPV6_PRE40_UDP ( \
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
RTE_ETH_RSS_L3_PRE40)
+#define ETH_RSS_IPV6_PRE40_UDP RTE_ETH_RSS_IPV6_PRE40_UDP
-#define ETH_RSS_IPV6_PRE48_UDP ( \
- ETH_RSS_NONFRAG_IPV6_UDP | \
+#define RTE_ETH_RSS_IPV6_PRE48_UDP ( \
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
RTE_ETH_RSS_L3_PRE48)
+#define ETH_RSS_IPV6_PRE48_UDP RTE_ETH_RSS_IPV6_PRE48_UDP
-#define ETH_RSS_IPV6_PRE56_UDP ( \
- ETH_RSS_NONFRAG_IPV6_UDP | \
+#define RTE_ETH_RSS_IPV6_PRE56_UDP ( \
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
RTE_ETH_RSS_L3_PRE56)
+#define ETH_RSS_IPV6_PRE56_UDP RTE_ETH_RSS_IPV6_PRE56_UDP
-#define ETH_RSS_IPV6_PRE64_UDP ( \
- ETH_RSS_NONFRAG_IPV6_UDP | \
+#define RTE_ETH_RSS_IPV6_PRE64_UDP ( \
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
RTE_ETH_RSS_L3_PRE64)
+#define ETH_RSS_IPV6_PRE64_UDP RTE_ETH_RSS_IPV6_PRE64_UDP
-#define ETH_RSS_IPV6_PRE96_UDP ( \
- ETH_RSS_NONFRAG_IPV6_UDP | \
+#define RTE_ETH_RSS_IPV6_PRE96_UDP ( \
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
RTE_ETH_RSS_L3_PRE96)
+#define ETH_RSS_IPV6_PRE96_UDP RTE_ETH_RSS_IPV6_PRE96_UDP
-#define ETH_RSS_IPV6_PRE32_TCP ( \
- ETH_RSS_NONFRAG_IPV6_TCP | \
+#define RTE_ETH_RSS_IPV6_PRE32_TCP ( \
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
RTE_ETH_RSS_L3_PRE32)
+#define ETH_RSS_IPV6_PRE32_TCP RTE_ETH_RSS_IPV6_PRE32_TCP
-#define ETH_RSS_IPV6_PRE40_TCP ( \
- ETH_RSS_NONFRAG_IPV6_TCP | \
+#define RTE_ETH_RSS_IPV6_PRE40_TCP ( \
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
RTE_ETH_RSS_L3_PRE40)
+#define ETH_RSS_IPV6_PRE40_TCP RTE_ETH_RSS_IPV6_PRE40_TCP
-#define ETH_RSS_IPV6_PRE48_TCP ( \
- ETH_RSS_NONFRAG_IPV6_TCP | \
+#define RTE_ETH_RSS_IPV6_PRE48_TCP ( \
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
RTE_ETH_RSS_L3_PRE48)
+#define ETH_RSS_IPV6_PRE48_TCP RTE_ETH_RSS_IPV6_PRE48_TCP
-#define ETH_RSS_IPV6_PRE56_TCP ( \
- ETH_RSS_NONFRAG_IPV6_TCP | \
+#define RTE_ETH_RSS_IPV6_PRE56_TCP ( \
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
RTE_ETH_RSS_L3_PRE56)
+#define ETH_RSS_IPV6_PRE56_TCP RTE_ETH_RSS_IPV6_PRE56_TCP
-#define ETH_RSS_IPV6_PRE64_TCP ( \
- ETH_RSS_NONFRAG_IPV6_TCP | \
+#define RTE_ETH_RSS_IPV6_PRE64_TCP ( \
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
RTE_ETH_RSS_L3_PRE64)
+#define ETH_RSS_IPV6_PRE64_TCP RTE_ETH_RSS_IPV6_PRE64_TCP
-#define ETH_RSS_IPV6_PRE96_TCP ( \
- ETH_RSS_NONFRAG_IPV6_TCP | \
+#define RTE_ETH_RSS_IPV6_PRE96_TCP ( \
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
RTE_ETH_RSS_L3_PRE96)
+#define ETH_RSS_IPV6_PRE96_TCP RTE_ETH_RSS_IPV6_PRE96_TCP
-#define ETH_RSS_IPV6_PRE32_SCTP ( \
- ETH_RSS_NONFRAG_IPV6_SCTP | \
+#define RTE_ETH_RSS_IPV6_PRE32_SCTP ( \
+ RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
RTE_ETH_RSS_L3_PRE32)
+#define ETH_RSS_IPV6_PRE32_SCTP RTE_ETH_RSS_IPV6_PRE32_SCTP
-#define ETH_RSS_IPV6_PRE40_SCTP ( \
- ETH_RSS_NONFRAG_IPV6_SCTP | \
+#define RTE_ETH_RSS_IPV6_PRE40_SCTP ( \
+ RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
RTE_ETH_RSS_L3_PRE40)
+#define ETH_RSS_IPV6_PRE40_SCTP RTE_ETH_RSS_IPV6_PRE40_SCTP
-#define ETH_RSS_IPV6_PRE48_SCTP ( \
- ETH_RSS_NONFRAG_IPV6_SCTP | \
+#define RTE_ETH_RSS_IPV6_PRE48_SCTP ( \
+ RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
RTE_ETH_RSS_L3_PRE48)
+#define ETH_RSS_IPV6_PRE48_SCTP RTE_ETH_RSS_IPV6_PRE48_SCTP
-#define ETH_RSS_IPV6_PRE56_SCTP ( \
- ETH_RSS_NONFRAG_IPV6_SCTP | \
+#define RTE_ETH_RSS_IPV6_PRE56_SCTP ( \
+ RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
RTE_ETH_RSS_L3_PRE56)
+#define ETH_RSS_IPV6_PRE56_SCTP RTE_ETH_RSS_IPV6_PRE56_SCTP
-#define ETH_RSS_IPV6_PRE64_SCTP ( \
- ETH_RSS_NONFRAG_IPV6_SCTP | \
+#define RTE_ETH_RSS_IPV6_PRE64_SCTP ( \
+ RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
RTE_ETH_RSS_L3_PRE64)
+#define ETH_RSS_IPV6_PRE64_SCTP RTE_ETH_RSS_IPV6_PRE64_SCTP
-#define ETH_RSS_IPV6_PRE96_SCTP ( \
- ETH_RSS_NONFRAG_IPV6_SCTP | \
+#define RTE_ETH_RSS_IPV6_PRE96_SCTP ( \
+ RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
RTE_ETH_RSS_L3_PRE96)
-
-#define ETH_RSS_IP ( \
- ETH_RSS_IPV4 | \
- ETH_RSS_FRAG_IPV4 | \
- ETH_RSS_NONFRAG_IPV4_OTHER | \
- ETH_RSS_IPV6 | \
- ETH_RSS_FRAG_IPV6 | \
- ETH_RSS_NONFRAG_IPV6_OTHER | \
- ETH_RSS_IPV6_EX)
-
-#define ETH_RSS_UDP ( \
- ETH_RSS_NONFRAG_IPV4_UDP | \
- ETH_RSS_NONFRAG_IPV6_UDP | \
- ETH_RSS_IPV6_UDP_EX)
-
-#define ETH_RSS_TCP ( \
- ETH_RSS_NONFRAG_IPV4_TCP | \
- ETH_RSS_NONFRAG_IPV6_TCP | \
- ETH_RSS_IPV6_TCP_EX)
-
-#define ETH_RSS_SCTP ( \
- ETH_RSS_NONFRAG_IPV4_SCTP | \
- ETH_RSS_NONFRAG_IPV6_SCTP)
-
-#define ETH_RSS_TUNNEL ( \
- ETH_RSS_VXLAN | \
- ETH_RSS_GENEVE | \
- ETH_RSS_NVGRE)
-
-#define ETH_RSS_VLAN ( \
- ETH_RSS_S_VLAN | \
- ETH_RSS_C_VLAN)
+#define ETH_RSS_IPV6_PRE96_SCTP RTE_ETH_RSS_IPV6_PRE96_SCTP
+
+#define RTE_ETH_RSS_IP ( \
+ RTE_ETH_RSS_IPV4 | \
+ RTE_ETH_RSS_FRAG_IPV4 | \
+ RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
+ RTE_ETH_RSS_IPV6 | \
+ RTE_ETH_RSS_FRAG_IPV6 | \
+ RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
+ RTE_ETH_RSS_IPV6_EX)
+#define ETH_RSS_IP RTE_ETH_RSS_IP
+
+#define RTE_ETH_RSS_UDP ( \
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+ RTE_ETH_RSS_IPV6_UDP_EX)
+#define ETH_RSS_UDP RTE_ETH_RSS_UDP
+
+#define RTE_ETH_RSS_TCP ( \
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+ RTE_ETH_RSS_IPV6_TCP_EX)
+#define ETH_RSS_TCP RTE_ETH_RSS_TCP
+
+#define RTE_ETH_RSS_SCTP ( \
+ RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
+#define ETH_RSS_SCTP RTE_ETH_RSS_SCTP
+
+#define RTE_ETH_RSS_TUNNEL ( \
+ RTE_ETH_RSS_VXLAN | \
+ RTE_ETH_RSS_GENEVE | \
+ RTE_ETH_RSS_NVGRE)
+#define ETH_RSS_TUNNEL RTE_ETH_RSS_TUNNEL
+
+#define RTE_ETH_RSS_VLAN ( \
+ RTE_ETH_RSS_S_VLAN | \
+ RTE_ETH_RSS_C_VLAN)
+#define ETH_RSS_VLAN RTE_ETH_RSS_VLAN
/** Mask of valid RSS hash protocols */
-#define ETH_RSS_PROTO_MASK ( \
- ETH_RSS_IPV4 | \
- ETH_RSS_FRAG_IPV4 | \
- ETH_RSS_NONFRAG_IPV4_TCP | \
- ETH_RSS_NONFRAG_IPV4_UDP | \
- ETH_RSS_NONFRAG_IPV4_SCTP | \
- ETH_RSS_NONFRAG_IPV4_OTHER | \
- ETH_RSS_IPV6 | \
- ETH_RSS_FRAG_IPV6 | \
- ETH_RSS_NONFRAG_IPV6_TCP | \
- ETH_RSS_NONFRAG_IPV6_UDP | \
- ETH_RSS_NONFRAG_IPV6_SCTP | \
- ETH_RSS_NONFRAG_IPV6_OTHER | \
- ETH_RSS_L2_PAYLOAD | \
- ETH_RSS_IPV6_EX | \
- ETH_RSS_IPV6_TCP_EX | \
- ETH_RSS_IPV6_UDP_EX | \
- ETH_RSS_PORT | \
- ETH_RSS_VXLAN | \
- ETH_RSS_GENEVE | \
- ETH_RSS_NVGRE | \
- ETH_RSS_MPLS)
+#define RTE_ETH_RSS_PROTO_MASK ( \
+ RTE_ETH_RSS_IPV4 | \
+ RTE_ETH_RSS_FRAG_IPV4 | \
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
+ RTE_ETH_RSS_IPV6 | \
+ RTE_ETH_RSS_FRAG_IPV6 | \
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
+ RTE_ETH_RSS_L2_PAYLOAD | \
+ RTE_ETH_RSS_IPV6_EX | \
+ RTE_ETH_RSS_IPV6_TCP_EX | \
+ RTE_ETH_RSS_IPV6_UDP_EX | \
+ RTE_ETH_RSS_PORT | \
+ RTE_ETH_RSS_VXLAN | \
+ RTE_ETH_RSS_GENEVE | \
+ RTE_ETH_RSS_NVGRE | \
+ RTE_ETH_RSS_MPLS)
+#define ETH_RSS_PROTO_MASK RTE_ETH_RSS_PROTO_MASK
/*
* Definitions used for redirection table entry size.
* Some RSS RETA sizes may not be supported by some drivers, check the
* documentation or the description of relevant functions for more details.
*/
-#define ETH_RSS_RETA_SIZE_64 64
-#define ETH_RSS_RETA_SIZE_128 128
-#define ETH_RSS_RETA_SIZE_256 256
-#define ETH_RSS_RETA_SIZE_512 512
-#define RTE_RETA_GROUP_SIZE 64
+#define RTE_ETH_RSS_RETA_SIZE_64 64
+#define ETH_RSS_RETA_SIZE_64 RTE_ETH_RSS_RETA_SIZE_64
+#define RTE_ETH_RSS_RETA_SIZE_128 128
+#define ETH_RSS_RETA_SIZE_128 RTE_ETH_RSS_RETA_SIZE_128
+#define RTE_ETH_RSS_RETA_SIZE_256 256
+#define ETH_RSS_RETA_SIZE_256 RTE_ETH_RSS_RETA_SIZE_256
+#define RTE_ETH_RSS_RETA_SIZE_512 512
+#define ETH_RSS_RETA_SIZE_512 RTE_ETH_RSS_RETA_SIZE_512
+#define RTE_ETH_RETA_GROUP_SIZE 64
+#define RTE_RETA_GROUP_SIZE RTE_ETH_RETA_GROUP_SIZE
/**@{@name VMDq and DCB maximums */
-#define ETH_VMDQ_MAX_VLAN_FILTERS 64 /**< Maximum nb. of VMDq VLAN filters. */
-#define ETH_DCB_NUM_USER_PRIORITIES 8 /**< Maximum nb. of DCB priorities. */
-#define ETH_VMDQ_DCB_NUM_QUEUES 128 /**< Maximum nb. of VMDq DCB queues. */
-#define ETH_DCB_NUM_QUEUES 128 /**< Maximum nb. of DCB queues. */
+#define RTE_ETH_VMDQ_MAX_VLAN_FILTERS 64 /**< Maximum nb. of VMDq VLAN filters. */
+#define ETH_VMDQ_MAX_VLAN_FILTERS RTE_ETH_VMDQ_MAX_VLAN_FILTERS
+#define RTE_ETH_DCB_NUM_USER_PRIORITIES 8 /**< Maximum nb. of DCB priorities. */
+#define ETH_DCB_NUM_USER_PRIORITIES RTE_ETH_DCB_NUM_USER_PRIORITIES
+#define RTE_ETH_VMDQ_DCB_NUM_QUEUES 128 /**< Maximum nb. of VMDq DCB queues. */
+#define ETH_VMDQ_DCB_NUM_QUEUES RTE_ETH_VMDQ_DCB_NUM_QUEUES
+#define RTE_ETH_DCB_NUM_QUEUES 128 /**< Maximum nb. of DCB queues. */
+#define ETH_DCB_NUM_QUEUES RTE_ETH_DCB_NUM_QUEUES
/**@}*/
/**@{@name DCB capabilities */
-#define ETH_DCB_PG_SUPPORT 0x00000001 /**< Priority Group(ETS) support. */
-#define ETH_DCB_PFC_SUPPORT 0x00000002 /**< Priority Flow Control support. */
+#define RTE_ETH_DCB_PG_SUPPORT 0x00000001 /**< Priority Group(ETS) support. */
+#define ETH_DCB_PG_SUPPORT RTE_ETH_DCB_PG_SUPPORT
+#define RTE_ETH_DCB_PFC_SUPPORT 0x00000002 /**< Priority Flow Control support. */
+#define ETH_DCB_PFC_SUPPORT RTE_ETH_DCB_PFC_SUPPORT
/**@}*/
/**@{@name VLAN offload bits */
-#define ETH_VLAN_STRIP_OFFLOAD 0x0001 /**< VLAN Strip On/Off */
-#define ETH_VLAN_FILTER_OFFLOAD 0x0002 /**< VLAN Filter On/Off */
-#define ETH_VLAN_EXTEND_OFFLOAD 0x0004 /**< VLAN Extend On/Off */
-#define ETH_QINQ_STRIP_OFFLOAD 0x0008 /**< QINQ Strip On/Off */
-
-#define ETH_VLAN_STRIP_MASK 0x0001 /**< VLAN Strip setting mask */
-#define ETH_VLAN_FILTER_MASK 0x0002 /**< VLAN Filter setting mask*/
-#define ETH_VLAN_EXTEND_MASK 0x0004 /**< VLAN Extend setting mask*/
-#define ETH_QINQ_STRIP_MASK 0x0008 /**< QINQ Strip setting mask */
-#define ETH_VLAN_ID_MAX 0x0FFF /**< VLAN ID is in lower 12 bits*/
+#define RTE_ETH_VLAN_STRIP_OFFLOAD 0x0001 /**< VLAN Strip On/Off */
+#define ETH_VLAN_STRIP_OFFLOAD RTE_ETH_VLAN_STRIP_OFFLOAD
+#define RTE_ETH_VLAN_FILTER_OFFLOAD 0x0002 /**< VLAN Filter On/Off */
+#define ETH_VLAN_FILTER_OFFLOAD RTE_ETH_VLAN_FILTER_OFFLOAD
+#define RTE_ETH_VLAN_EXTEND_OFFLOAD 0x0004 /**< VLAN Extend On/Off */
+#define ETH_VLAN_EXTEND_OFFLOAD RTE_ETH_VLAN_EXTEND_OFFLOAD
+#define RTE_ETH_QINQ_STRIP_OFFLOAD 0x0008 /**< QINQ Strip On/Off */
+#define ETH_QINQ_STRIP_OFFLOAD RTE_ETH_QINQ_STRIP_OFFLOAD
+
+#define RTE_ETH_VLAN_STRIP_MASK 0x0001 /**< VLAN Strip setting mask */
+#define ETH_VLAN_STRIP_MASK RTE_ETH_VLAN_STRIP_MASK
+#define RTE_ETH_VLAN_FILTER_MASK 0x0002 /**< VLAN Filter setting mask*/
+#define ETH_VLAN_FILTER_MASK RTE_ETH_VLAN_FILTER_MASK
+#define RTE_ETH_VLAN_EXTEND_MASK 0x0004 /**< VLAN Extend setting mask*/
+#define ETH_VLAN_EXTEND_MASK RTE_ETH_VLAN_EXTEND_MASK
+#define RTE_ETH_QINQ_STRIP_MASK 0x0008 /**< QINQ Strip setting mask */
+#define ETH_QINQ_STRIP_MASK RTE_ETH_QINQ_STRIP_MASK
+#define RTE_ETH_VLAN_ID_MAX 0x0FFF /**< VLAN ID is in lower 12 bits*/
+#define ETH_VLAN_ID_MAX RTE_ETH_VLAN_ID_MAX
/**@}*/
/* Definitions used for receive MAC address */
-#define ETH_NUM_RECEIVE_MAC_ADDR 128 /**< Maximum nb. of receive mac addr. */
+#define RTE_ETH_NUM_RECEIVE_MAC_ADDR 128 /**< Maximum nb. of receive mac addr. */
+#define ETH_NUM_RECEIVE_MAC_ADDR RTE_ETH_NUM_RECEIVE_MAC_ADDR
/* Definitions used for unicast hash */
-#define ETH_VMDQ_NUM_UC_HASH_ARRAY 128 /**< Maximum nb. of UC hash array. */
+#define RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY 128 /**< Maximum nb. of UC hash array. */
+#define ETH_VMDQ_NUM_UC_HASH_ARRAY RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY
/**@{@name VMDq Rx mode
* @see rte_eth_vmdq_rx_conf.rx_mode
*/
-#define ETH_VMDQ_ACCEPT_UNTAG 0x0001 /**< accept untagged packets. */
-#define ETH_VMDQ_ACCEPT_HASH_MC 0x0002 /**< accept packets in multicast table . */
-#define ETH_VMDQ_ACCEPT_HASH_UC 0x0004 /**< accept packets in unicast table. */
-#define ETH_VMDQ_ACCEPT_BROADCAST 0x0008 /**< accept broadcast packets. */
-#define ETH_VMDQ_ACCEPT_MULTICAST 0x0010 /**< multicast promiscuous. */
+#define RTE_ETH_VMDQ_ACCEPT_UNTAG 0x0001 /**< accept untagged packets. */
+#define ETH_VMDQ_ACCEPT_UNTAG RTE_ETH_VMDQ_ACCEPT_UNTAG
+#define RTE_ETH_VMDQ_ACCEPT_HASH_MC 0x0002 /**< accept packets in multicast table . */
+#define ETH_VMDQ_ACCEPT_HASH_MC RTE_ETH_VMDQ_ACCEPT_HASH_MC
+#define RTE_ETH_VMDQ_ACCEPT_HASH_UC 0x0004 /**< accept packets in unicast table. */
+#define ETH_VMDQ_ACCEPT_HASH_UC RTE_ETH_VMDQ_ACCEPT_HASH_UC
+#define RTE_ETH_VMDQ_ACCEPT_BROADCAST 0x0008 /**< accept broadcast packets. */
+#define ETH_VMDQ_ACCEPT_BROADCAST RTE_ETH_VMDQ_ACCEPT_BROADCAST
+#define RTE_ETH_VMDQ_ACCEPT_MULTICAST 0x0010 /**< multicast promiscuous. */
+#define ETH_VMDQ_ACCEPT_MULTICAST RTE_ETH_VMDQ_ACCEPT_MULTICAST
/**@}*/
/**
/** Mask bits indicate which entries need to be updated/queried. */
uint64_t mask;
/** Group of 64 redirection table entries. */
- uint16_t reta[RTE_RETA_GROUP_SIZE];
+ uint16_t reta[RTE_ETH_RETA_GROUP_SIZE];
};
/**
* in DCB configurations
*/
enum rte_eth_nb_tcs {
- ETH_4_TCS = 4, /**< 4 TCs with DCB. */
- ETH_8_TCS = 8 /**< 8 TCs with DCB. */
+ RTE_ETH_4_TCS = 4, /**< 4 TCs with DCB. */
+ RTE_ETH_8_TCS = 8 /**< 8 TCs with DCB. */
};
+#define ETH_4_TCS RTE_ETH_4_TCS
+#define ETH_8_TCS RTE_ETH_8_TCS
/**
* This enum indicates the possible number of queue pools
* in VMDq configurations.
*/
enum rte_eth_nb_pools {
- ETH_8_POOLS = 8, /**< 8 VMDq pools. */
- ETH_16_POOLS = 16, /**< 16 VMDq pools. */
- ETH_32_POOLS = 32, /**< 32 VMDq pools. */
- ETH_64_POOLS = 64 /**< 64 VMDq pools. */
+ RTE_ETH_8_POOLS = 8, /**< 8 VMDq pools. */
+ RTE_ETH_16_POOLS = 16, /**< 16 VMDq pools. */
+ RTE_ETH_32_POOLS = 32, /**< 32 VMDq pools. */
+ RTE_ETH_64_POOLS = 64 /**< 64 VMDq pools. */
};
+#define ETH_8_POOLS RTE_ETH_8_POOLS
+#define ETH_16_POOLS RTE_ETH_16_POOLS
+#define ETH_32_POOLS RTE_ETH_32_POOLS
+#define ETH_64_POOLS RTE_ETH_64_POOLS
/* This structure may be extended in future. */
struct rte_eth_dcb_rx_conf {
enum rte_eth_nb_tcs nb_tcs; /**< Possible DCB TCs, 4 or 8 TCs */
/** Traffic class each UP mapped to. */
- uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
+ uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
};
struct rte_eth_vmdq_dcb_tx_conf {
enum rte_eth_nb_pools nb_queue_pools; /**< With DCB, 16 or 32 pools. */
/** Traffic class each UP mapped to. */
- uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
+ uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
};
struct rte_eth_dcb_tx_conf {
enum rte_eth_nb_tcs nb_tcs; /**< Possible DCB TCs, 4 or 8 TCs. */
/** Traffic class each UP mapped to. */
- uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
+ uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
};
struct rte_eth_vmdq_tx_conf {
struct {
uint16_t vlan_id; /**< The VLAN ID of the received frame */
uint64_t pools; /**< Bitmask of pools for packet Rx */
- } pool_map[ETH_VMDQ_MAX_VLAN_FILTERS]; /**< VMDq VLAN pool maps. */
+ } pool_map[RTE_ETH_VMDQ_MAX_VLAN_FILTERS]; /**< VMDq VLAN pool maps. */
/** Selects a queue in a pool */
- uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
+ uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
};
/**
* Using this feature, packets are routed to a pool of queues. By default,
* the pool selection is based on the MAC address, the VLAN ID in the
* VLAN tag as specified in the pool_map array.
- * Passing the ETH_VMDQ_ACCEPT_UNTAG in the rx_mode field allows pool
+ * Passing the RTE_ETH_VMDQ_ACCEPT_UNTAG in the rx_mode field allows pool
* selection using only the MAC address. MAC address to pool mapping is done
* using the rte_eth_dev_mac_addr_add function, with the pool parameter
* corresponding to the pool ID.
struct {
uint16_t vlan_id; /**< The VLAN ID of the received frame */
uint64_t pools; /**< Bitmask of pools for packet Rx */
- } pool_map[ETH_VMDQ_MAX_VLAN_FILTERS]; /**< VMDq VLAN pool maps. */
+ } pool_map[RTE_ETH_VMDQ_MAX_VLAN_FILTERS]; /**< VMDq VLAN pool maps. */
};
/**
struct rte_eth_txmode {
enum rte_eth_tx_mq_mode mq_mode; /**< Tx multi-queues mode. */
/**
- * Per-port Tx offloads to be set using DEV_TX_OFFLOAD_* flags.
+ * Per-port Tx offloads to be set using RTE_ETH_TX_OFFLOAD_* flags.
* Only offloads set on tx_offload_capa field on rte_eth_dev_info
* structure are allowed to be set.
*/
uint16_t share_group;
uint16_t share_qid; /**< Shared Rx queue ID in group */
/**
- * Per-queue Rx offloads to be set using DEV_RX_OFFLOAD_* flags.
+ * Per-queue Rx offloads to be set using RTE_ETH_RX_OFFLOAD_* flags.
* Only offloads set on rx_queue_offload_capa or rx_offload_capa
* fields on rte_eth_dev_info structure are allowed to be set.
*/
uint8_t tx_deferred_start; /**< Do not start queue with rte_eth_dev_start(). */
/**
- * Per-queue Tx offloads to be set using DEV_TX_OFFLOAD_* flags.
+ * Per-queue Tx offloads to be set using RTE_ETH_TX_OFFLOAD_* flags.
* Only offloads set on tx_queue_offload_capa or tx_offload_capa
* fields on rte_eth_dev_info structure are allowed to be set.
*/
* This enum indicates the flow control mode
*/
enum rte_eth_fc_mode {
- RTE_FC_NONE = 0, /**< Disable flow control. */
- RTE_FC_RX_PAUSE, /**< Rx pause frame, enable flowctrl on Tx side. */
- RTE_FC_TX_PAUSE, /**< Tx pause frame, enable flowctrl on Rx side. */
- RTE_FC_FULL /**< Enable flow control on both side. */
+ RTE_ETH_FC_NONE = 0, /**< Disable flow control. */
+ RTE_ETH_FC_RX_PAUSE, /**< Rx pause frame, enable flowctrl on Tx side. */
+ RTE_ETH_FC_TX_PAUSE, /**< Tx pause frame, enable flowctrl on Rx side. */
+ RTE_ETH_FC_FULL /**< Enable flow control on both side. */
};
+#define RTE_FC_NONE RTE_ETH_FC_NONE
+#define RTE_FC_RX_PAUSE RTE_ETH_FC_RX_PAUSE
+#define RTE_FC_TX_PAUSE RTE_ETH_FC_TX_PAUSE
+#define RTE_FC_FULL RTE_ETH_FC_FULL
+
/**
* A structure used to configure Ethernet flow control parameter.
* These parameters will be configured into the register of the NIC.
* @see rte_eth_udp_tunnel
*/
enum rte_eth_tunnel_type {
- RTE_TUNNEL_TYPE_NONE = 0,
- RTE_TUNNEL_TYPE_VXLAN,
- RTE_TUNNEL_TYPE_GENEVE,
- RTE_TUNNEL_TYPE_TEREDO,
- RTE_TUNNEL_TYPE_NVGRE,
- RTE_TUNNEL_TYPE_IP_IN_GRE,
- RTE_L2_TUNNEL_TYPE_E_TAG,
- RTE_TUNNEL_TYPE_VXLAN_GPE,
- RTE_TUNNEL_TYPE_ECPRI,
- RTE_TUNNEL_TYPE_MAX,
+ RTE_ETH_TUNNEL_TYPE_NONE = 0,
+ RTE_ETH_TUNNEL_TYPE_VXLAN,
+ RTE_ETH_TUNNEL_TYPE_GENEVE,
+ RTE_ETH_TUNNEL_TYPE_TEREDO,
+ RTE_ETH_TUNNEL_TYPE_NVGRE,
+ RTE_ETH_TUNNEL_TYPE_IP_IN_GRE,
+ RTE_ETH_L2_TUNNEL_TYPE_E_TAG,
+ RTE_ETH_TUNNEL_TYPE_VXLAN_GPE,
+ RTE_ETH_TUNNEL_TYPE_ECPRI,
+ RTE_ETH_TUNNEL_TYPE_MAX,
};
+#define RTE_TUNNEL_TYPE_NONE RTE_ETH_TUNNEL_TYPE_NONE
+#define RTE_TUNNEL_TYPE_VXLAN RTE_ETH_TUNNEL_TYPE_VXLAN
+#define RTE_TUNNEL_TYPE_GENEVE RTE_ETH_TUNNEL_TYPE_GENEVE
+#define RTE_TUNNEL_TYPE_TEREDO RTE_ETH_TUNNEL_TYPE_TEREDO
+#define RTE_TUNNEL_TYPE_NVGRE RTE_ETH_TUNNEL_TYPE_NVGRE
+#define RTE_TUNNEL_TYPE_IP_IN_GRE RTE_ETH_TUNNEL_TYPE_IP_IN_GRE
+#define RTE_L2_TUNNEL_TYPE_E_TAG RTE_ETH_L2_TUNNEL_TYPE_E_TAG
+#define RTE_TUNNEL_TYPE_VXLAN_GPE RTE_ETH_TUNNEL_TYPE_VXLAN_GPE
+#define RTE_TUNNEL_TYPE_ECPRI RTE_ETH_TUNNEL_TYPE_ECPRI
+#define RTE_TUNNEL_TYPE_MAX RTE_ETH_TUNNEL_TYPE_MAX
+
/* Deprecated API file for rte_eth_dev_filter_* functions */
#include "rte_eth_ctrl.h"
* Memory space that can be configured to store Flow Director filters
* in the board memory.
*/
-enum rte_fdir_pballoc_type {
- RTE_FDIR_PBALLOC_64K = 0, /**< 64k. */
- RTE_FDIR_PBALLOC_128K, /**< 128k. */
- RTE_FDIR_PBALLOC_256K, /**< 256k. */
+enum rte_eth_fdir_pballoc_type {
+ RTE_ETH_FDIR_PBALLOC_64K = 0, /**< 64k. */
+ RTE_ETH_FDIR_PBALLOC_128K, /**< 128k. */
+ RTE_ETH_FDIR_PBALLOC_256K, /**< 256k. */
};
+#define rte_fdir_pballoc_type rte_eth_fdir_pballoc_type
+
+#define RTE_FDIR_PBALLOC_64K RTE_ETH_FDIR_PBALLOC_64K
+#define RTE_FDIR_PBALLOC_128K RTE_ETH_FDIR_PBALLOC_128K
+#define RTE_FDIR_PBALLOC_256K RTE_ETH_FDIR_PBALLOC_256K
/**
* Select report mode of FDIR hash information in Rx descriptors.
*
* If mode is RTE_FDIR_MODE_NONE, the pballoc value is ignored.
*/
-struct rte_fdir_conf {
+struct rte_eth_fdir_conf {
enum rte_fdir_mode mode; /**< Flow Director mode. */
- enum rte_fdir_pballoc_type pballoc; /**< Space for FDIR filters. */
+ enum rte_eth_fdir_pballoc_type pballoc; /**< Space for FDIR filters. */
enum rte_fdir_status_mode status; /**< How to report FDIR hash. */
/** Rx queue of packets matching a "drop" filter in perfect mode. */
uint8_t drop_queue;
struct rte_eth_fdir_flex_conf flex_conf;
};
+#define rte_fdir_conf rte_eth_fdir_conf
+
/**
* UDP tunneling configuration.
*
/**
* A structure used to enable/disable specific device interrupts.
*/
-struct rte_intr_conf {
+struct rte_eth_intr_conf {
/** enable/disable lsc interrupt. 0 (default) - disable, 1 enable */
uint32_t lsc:1;
/** enable/disable rxq interrupt. 0 (default) - disable, 1 enable */
uint32_t rmv:1;
};
+#define rte_intr_conf rte_eth_intr_conf
+
/**
* A structure used to configure an Ethernet port.
* Depending upon the Rx multi-queue mode, extra advanced
* configuration settings may be needed.
*/
struct rte_eth_conf {
- uint32_t link_speeds; /**< bitmap of ETH_LINK_SPEED_XXX of speeds to be
- used. ETH_LINK_SPEED_FIXED disables link
+ uint32_t link_speeds; /**< bitmap of RTE_ETH_LINK_SPEED_XXX of speeds to be
+ used. RTE_ETH_LINK_SPEED_FIXED disables link
autonegotiation, and a unique speed shall be
set. Otherwise, the bitmap defines the set of
speeds to be advertised. If the special value
- ETH_LINK_SPEED_AUTONEG (0) is used, all speeds
+ RTE_ETH_LINK_SPEED_AUTONEG (0) is used, all speeds
supported are advertised. */
struct rte_eth_rxmode rxmode; /**< Port Rx configuration. */
struct rte_eth_txmode txmode; /**< Port Tx configuration. */
struct rte_eth_vmdq_tx_conf vmdq_tx_conf;
} tx_adv_conf; /**< Port Tx DCB configuration (union). */
/** Currently,Priority Flow Control(PFC) are supported,if DCB with PFC
- is needed,and the variable must be set ETH_DCB_PFC_SUPPORT. */
+ is needed,and the variable must be set RTE_ETH_DCB_PFC_SUPPORT. */
uint32_t dcb_capability_en;
- struct rte_fdir_conf fdir_conf; /**< FDIR configuration. DEPRECATED */
- struct rte_intr_conf intr_conf; /**< Interrupt mode configuration. */
+ struct rte_eth_fdir_conf fdir_conf; /**< FDIR configuration. DEPRECATED */
+ struct rte_eth_intr_conf intr_conf; /**< Interrupt mode configuration. */
};
/**
* Rx offload capabilities of a device.
*/
-#define DEV_RX_OFFLOAD_VLAN_STRIP 0x00000001
-#define DEV_RX_OFFLOAD_IPV4_CKSUM 0x00000002
-#define DEV_RX_OFFLOAD_UDP_CKSUM 0x00000004
-#define DEV_RX_OFFLOAD_TCP_CKSUM 0x00000008
-#define DEV_RX_OFFLOAD_TCP_LRO 0x00000010
-#define DEV_RX_OFFLOAD_QINQ_STRIP 0x00000020
-#define DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000040
-#define DEV_RX_OFFLOAD_MACSEC_STRIP 0x00000080
-#define DEV_RX_OFFLOAD_HEADER_SPLIT 0x00000100
-#define DEV_RX_OFFLOAD_VLAN_FILTER 0x00000200
-#define DEV_RX_OFFLOAD_VLAN_EXTEND 0x00000400
-#define DEV_RX_OFFLOAD_SCATTER 0x00002000
+#define RTE_ETH_RX_OFFLOAD_VLAN_STRIP 0x00000001
+#define DEV_RX_OFFLOAD_VLAN_STRIP RTE_ETH_RX_OFFLOAD_VLAN_STRIP
+#define RTE_ETH_RX_OFFLOAD_IPV4_CKSUM 0x00000002
+#define DEV_RX_OFFLOAD_IPV4_CKSUM RTE_ETH_RX_OFFLOAD_IPV4_CKSUM
+#define RTE_ETH_RX_OFFLOAD_UDP_CKSUM 0x00000004
+#define DEV_RX_OFFLOAD_UDP_CKSUM RTE_ETH_RX_OFFLOAD_UDP_CKSUM
+#define RTE_ETH_RX_OFFLOAD_TCP_CKSUM 0x00000008
+#define DEV_RX_OFFLOAD_TCP_CKSUM RTE_ETH_RX_OFFLOAD_TCP_CKSUM
+#define RTE_ETH_RX_OFFLOAD_TCP_LRO 0x00000010
+#define DEV_RX_OFFLOAD_TCP_LRO RTE_ETH_RX_OFFLOAD_TCP_LRO
+#define RTE_ETH_RX_OFFLOAD_QINQ_STRIP 0x00000020
+#define DEV_RX_OFFLOAD_QINQ_STRIP RTE_ETH_RX_OFFLOAD_QINQ_STRIP
+#define RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000040
+#define DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM
+#define RTE_ETH_RX_OFFLOAD_MACSEC_STRIP 0x00000080
+#define DEV_RX_OFFLOAD_MACSEC_STRIP RTE_ETH_RX_OFFLOAD_MACSEC_STRIP
+#define RTE_ETH_RX_OFFLOAD_HEADER_SPLIT 0x00000100
+#define DEV_RX_OFFLOAD_HEADER_SPLIT RTE_ETH_RX_OFFLOAD_HEADER_SPLIT
+#define RTE_ETH_RX_OFFLOAD_VLAN_FILTER 0x00000200
+#define DEV_RX_OFFLOAD_VLAN_FILTER RTE_ETH_RX_OFFLOAD_VLAN_FILTER
+#define RTE_ETH_RX_OFFLOAD_VLAN_EXTEND 0x00000400
+#define DEV_RX_OFFLOAD_VLAN_EXTEND RTE_ETH_RX_OFFLOAD_VLAN_EXTEND
+#define RTE_ETH_RX_OFFLOAD_SCATTER 0x00002000
+#define DEV_RX_OFFLOAD_SCATTER RTE_ETH_RX_OFFLOAD_SCATTER
/**
* Timestamp is set by the driver in RTE_MBUF_DYNFIELD_TIMESTAMP_NAME
* and RTE_MBUF_DYNFLAG_RX_TIMESTAMP_NAME is set in ol_flags.
* The mbuf field and flag are registered when the offload is configured.
*/
-#define DEV_RX_OFFLOAD_TIMESTAMP 0x00004000
-#define DEV_RX_OFFLOAD_SECURITY 0x00008000
-#define DEV_RX_OFFLOAD_KEEP_CRC 0x00010000
-#define DEV_RX_OFFLOAD_SCTP_CKSUM 0x00020000
-#define DEV_RX_OFFLOAD_OUTER_UDP_CKSUM 0x00040000
-#define DEV_RX_OFFLOAD_RSS_HASH 0x00080000
-#define RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT 0x00100000
-
-#define DEV_RX_OFFLOAD_CHECKSUM (DEV_RX_OFFLOAD_IPV4_CKSUM | \
- DEV_RX_OFFLOAD_UDP_CKSUM | \
- DEV_RX_OFFLOAD_TCP_CKSUM)
-#define DEV_RX_OFFLOAD_VLAN (DEV_RX_OFFLOAD_VLAN_STRIP | \
- DEV_RX_OFFLOAD_VLAN_FILTER | \
- DEV_RX_OFFLOAD_VLAN_EXTEND | \
- DEV_RX_OFFLOAD_QINQ_STRIP)
+#define RTE_ETH_RX_OFFLOAD_TIMESTAMP 0x00004000
+#define DEV_RX_OFFLOAD_TIMESTAMP RTE_ETH_RX_OFFLOAD_TIMESTAMP
+#define RTE_ETH_RX_OFFLOAD_SECURITY 0x00008000
+#define DEV_RX_OFFLOAD_SECURITY RTE_ETH_RX_OFFLOAD_SECURITY
+#define RTE_ETH_RX_OFFLOAD_KEEP_CRC 0x00010000
+#define DEV_RX_OFFLOAD_KEEP_CRC RTE_ETH_RX_OFFLOAD_KEEP_CRC
+#define RTE_ETH_RX_OFFLOAD_SCTP_CKSUM 0x00020000
+#define DEV_RX_OFFLOAD_SCTP_CKSUM RTE_ETH_RX_OFFLOAD_SCTP_CKSUM
+#define RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM 0x00040000
+#define DEV_RX_OFFLOAD_OUTER_UDP_CKSUM RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM
+#define RTE_ETH_RX_OFFLOAD_RSS_HASH 0x00080000
+#define DEV_RX_OFFLOAD_RSS_HASH RTE_ETH_RX_OFFLOAD_RSS_HASH
+#define RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT 0x00100000
+
+#define RTE_ETH_RX_OFFLOAD_CHECKSUM (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | \
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM | \
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM)
+#define DEV_RX_OFFLOAD_CHECKSUM RTE_ETH_RX_OFFLOAD_CHECKSUM
+#define RTE_ETH_RX_OFFLOAD_VLAN (RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER | \
+ RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | \
+ RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
+#define DEV_RX_OFFLOAD_VLAN RTE_ETH_RX_OFFLOAD_VLAN
/*
* If new Rx offload capabilities are defined, they also must be
/**
* Tx offload capabilities of a device.
*/
-#define DEV_TX_OFFLOAD_VLAN_INSERT 0x00000001
-#define DEV_TX_OFFLOAD_IPV4_CKSUM 0x00000002
-#define DEV_TX_OFFLOAD_UDP_CKSUM 0x00000004
-#define DEV_TX_OFFLOAD_TCP_CKSUM 0x00000008
-#define DEV_TX_OFFLOAD_SCTP_CKSUM 0x00000010
-#define DEV_TX_OFFLOAD_TCP_TSO 0x00000020
-#define DEV_TX_OFFLOAD_UDP_TSO 0x00000040
-#define DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000080 /**< Used for tunneling packet. */
-#define DEV_TX_OFFLOAD_QINQ_INSERT 0x00000100
-#define DEV_TX_OFFLOAD_VXLAN_TNL_TSO 0x00000200 /**< Used for tunneling packet. */
-#define DEV_TX_OFFLOAD_GRE_TNL_TSO 0x00000400 /**< Used for tunneling packet. */
-#define DEV_TX_OFFLOAD_IPIP_TNL_TSO 0x00000800 /**< Used for tunneling packet. */
-#define DEV_TX_OFFLOAD_GENEVE_TNL_TSO 0x00001000 /**< Used for tunneling packet. */
-#define DEV_TX_OFFLOAD_MACSEC_INSERT 0x00002000
+#define RTE_ETH_TX_OFFLOAD_VLAN_INSERT 0x00000001
+#define DEV_TX_OFFLOAD_VLAN_INSERT RTE_ETH_TX_OFFLOAD_VLAN_INSERT
+#define RTE_ETH_TX_OFFLOAD_IPV4_CKSUM 0x00000002
+#define DEV_TX_OFFLOAD_IPV4_CKSUM RTE_ETH_TX_OFFLOAD_IPV4_CKSUM
+#define RTE_ETH_TX_OFFLOAD_UDP_CKSUM 0x00000004
+#define DEV_TX_OFFLOAD_UDP_CKSUM RTE_ETH_TX_OFFLOAD_UDP_CKSUM
+#define RTE_ETH_TX_OFFLOAD_TCP_CKSUM 0x00000008
+#define DEV_TX_OFFLOAD_TCP_CKSUM RTE_ETH_TX_OFFLOAD_TCP_CKSUM
+#define RTE_ETH_TX_OFFLOAD_SCTP_CKSUM 0x00000010
+#define DEV_TX_OFFLOAD_SCTP_CKSUM RTE_ETH_TX_OFFLOAD_SCTP_CKSUM
+#define RTE_ETH_TX_OFFLOAD_TCP_TSO 0x00000020
+#define DEV_TX_OFFLOAD_TCP_TSO RTE_ETH_TX_OFFLOAD_TCP_TSO
+#define RTE_ETH_TX_OFFLOAD_UDP_TSO 0x00000040
+#define DEV_TX_OFFLOAD_UDP_TSO RTE_ETH_TX_OFFLOAD_UDP_TSO
+#define RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000080 /**< Used for tunneling packet. */
+#define DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM
+#define RTE_ETH_TX_OFFLOAD_QINQ_INSERT 0x00000100
+#define DEV_TX_OFFLOAD_QINQ_INSERT RTE_ETH_TX_OFFLOAD_QINQ_INSERT
+#define RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO 0x00000200 /**< Used for tunneling packet. */
+#define DEV_TX_OFFLOAD_VXLAN_TNL_TSO RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO
+#define RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO 0x00000400 /**< Used for tunneling packet. */
+#define DEV_TX_OFFLOAD_GRE_TNL_TSO RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO
+#define RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO 0x00000800 /**< Used for tunneling packet. */
+#define DEV_TX_OFFLOAD_IPIP_TNL_TSO RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO
+#define RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO 0x00001000 /**< Used for tunneling packet. */
+#define DEV_TX_OFFLOAD_GENEVE_TNL_TSO RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO
+#define RTE_ETH_TX_OFFLOAD_MACSEC_INSERT 0x00002000
+#define DEV_TX_OFFLOAD_MACSEC_INSERT RTE_ETH_TX_OFFLOAD_MACSEC_INSERT
/**
* Multiple threads can invoke rte_eth_tx_burst() concurrently on the same
* Tx queue without SW lock.
*/
-#define DEV_TX_OFFLOAD_MT_LOCKFREE 0x00004000
+#define RTE_ETH_TX_OFFLOAD_MT_LOCKFREE 0x00004000
+#define DEV_TX_OFFLOAD_MT_LOCKFREE RTE_ETH_TX_OFFLOAD_MT_LOCKFREE
/** Device supports multi segment send. */
-#define DEV_TX_OFFLOAD_MULTI_SEGS 0x00008000
+#define RTE_ETH_TX_OFFLOAD_MULTI_SEGS 0x00008000
+#define DEV_TX_OFFLOAD_MULTI_SEGS RTE_ETH_TX_OFFLOAD_MULTI_SEGS
/**
* Device supports optimization for fast release of mbufs.
* When set application must guarantee that per-queue all mbufs comes from
* the same mempool and has refcnt = 1.
*/
-#define DEV_TX_OFFLOAD_MBUF_FAST_FREE 0x00010000
-#define DEV_TX_OFFLOAD_SECURITY 0x00020000
+#define RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE 0x00010000
+#define DEV_TX_OFFLOAD_MBUF_FAST_FREE RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE
+#define RTE_ETH_TX_OFFLOAD_SECURITY 0x00020000
+#define DEV_TX_OFFLOAD_SECURITY RTE_ETH_TX_OFFLOAD_SECURITY
/**
* Device supports generic UDP tunneled packet TSO.
* Application must set PKT_TX_TUNNEL_UDP and other mbuf fields required
* for tunnel TSO.
*/
-#define DEV_TX_OFFLOAD_UDP_TNL_TSO 0x00040000
+#define RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO 0x00040000
+#define DEV_TX_OFFLOAD_UDP_TNL_TSO RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO
/**
* Device supports generic IP tunneled packet TSO.
* Application must set PKT_TX_TUNNEL_IP and other mbuf fields required
* for tunnel TSO.
*/
-#define DEV_TX_OFFLOAD_IP_TNL_TSO 0x00080000
+#define RTE_ETH_TX_OFFLOAD_IP_TNL_TSO 0x00080000
+#define DEV_TX_OFFLOAD_IP_TNL_TSO RTE_ETH_TX_OFFLOAD_IP_TNL_TSO
/** Device supports outer UDP checksum */
-#define DEV_TX_OFFLOAD_OUTER_UDP_CKSUM 0x00100000
+#define RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM 0x00100000
+#define DEV_TX_OFFLOAD_OUTER_UDP_CKSUM RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM
/**
* Device sends on time read from RTE_MBUF_DYNFIELD_TIMESTAMP_NAME
* if RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME is set in ol_flags.
* The mbuf field and flag are registered when the offload is configured.
*/
-#define DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP 0x00200000
+#define RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP 0x00200000
+#define DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP
/*
* If new Tx offload capabilities are defined, they also must be
* mentioned in rte_tx_offload_names in rte_ethdev.c file.
uint16_t vmdq_pool_base; /**< First ID of VMDq pools. */
struct rte_eth_desc_lim rx_desc_lim; /**< Rx descriptors limits */
struct rte_eth_desc_lim tx_desc_lim; /**< Tx descriptors limits */
- uint32_t speed_capa; /**< Supported speeds bitmap (ETH_LINK_SPEED_). */
+ uint32_t speed_capa; /**< Supported speeds bitmap (RTE_ETH_LINK_SPEED_). */
/** Configured number of Rx/Tx queues */
uint16_t nb_rx_queues; /**< Number of Rx queues. */
uint16_t nb_tx_queues; /**< Number of Tx queues. */
char name[RTE_ETH_XSTATS_NAME_SIZE]; /**< The statistic name. */
};
-#define ETH_DCB_NUM_TCS 8
-#define ETH_MAX_VMDQ_POOL 64
+#define RTE_ETH_DCB_NUM_TCS 8
+#define ETH_DCB_NUM_TCS RTE_ETH_DCB_NUM_TCS
+#define RTE_ETH_MAX_VMDQ_POOL 64
+#define ETH_MAX_VMDQ_POOL RTE_ETH_MAX_VMDQ_POOL
/**
* A structure used to get the information of queue and
struct {
uint16_t base;
uint16_t nb_queue;
- } tc_rxq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
+ } tc_rxq[RTE_ETH_MAX_VMDQ_POOL][RTE_ETH_DCB_NUM_TCS];
/** Rx queues assigned to tc per Pool */
struct {
uint16_t base;
uint16_t nb_queue;
- } tc_txq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
+ } tc_txq[RTE_ETH_MAX_VMDQ_POOL][RTE_ETH_DCB_NUM_TCS];
};
/**
*/
struct rte_eth_dcb_info {
uint8_t nb_tcs; /**< number of TCs */
- uint8_t prio_tc[ETH_DCB_NUM_USER_PRIORITIES]; /**< Priority to tc */
- uint8_t tc_bws[ETH_DCB_NUM_TCS]; /**< Tx BW percentage for each TC */
+ uint8_t prio_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES]; /**< Priority to tc */
+ uint8_t tc_bws[RTE_ETH_DCB_NUM_TCS]; /**< Tx BW percentage for each TC */
/** Rx queues assigned to tc */
struct rte_eth_dcb_tc_queue_mapping tc_queue;
};
/* A structure used to get capabilities per link speed */
struct rte_eth_fec_capa {
- uint32_t speed; /**< Link speed (see ETH_SPEED_NUM_*) */
+ uint32_t speed; /**< Link speed (see RTE_ETH_SPEED_NUM_*) */
uint32_t capa; /**< FEC capabilities bitmask */
};
* @param speed
* Numerical speed value in Mbps
* @param duplex
- * ETH_LINK_[HALF/FULL]_DUPLEX (only for 10/100M speeds)
+ * RTE_ETH_LINK_[HALF/FULL]_DUPLEX (only for 10/100M speeds)
* @return
* 0 if the speed cannot be mapped
*/
uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex);
/**
- * Get DEV_RX_OFFLOAD_* flag name.
+ * Get RTE_ETH_RX_OFFLOAD_* flag name.
*
* @param offload
* Offload flag.
const char *rte_eth_dev_rx_offload_name(uint64_t offload);
/**
- * Get DEV_TX_OFFLOAD_* flag name.
+ * Get RTE_ETH_TX_OFFLOAD_* flag name.
*
* @param offload
* Offload flag.
* of the Prefetch, Host, and Write-Back threshold registers of the receive
* ring.
* In addition it contains the hardware offloads features to activate using
- * the DEV_RX_OFFLOAD_* flags.
+ * the RTE_ETH_RX_OFFLOAD_* flags.
* If an offloading set in rx_conf->offloads
* hasn't been set in the input argument eth_conf->rxmode.offloads
* to rte_eth_dev_configure(), it is a new added offloading, it must be
*
* @param str
* A pointer to a string to be filled with textual representation of
- * device status. At least ETH_LINK_MAX_STR_LEN bytes should be allocated to
+ * device status. At least RTE_ETH_LINK_MAX_STR_LEN bytes should be allocated to
* store default link status text.
* @param len
* Length of available memory at 'str' string.
* The port identifier of the Ethernet device.
* @param offload_mask
* The VLAN Offload bit mask can be mixed use with "OR"
- * ETH_VLAN_STRIP_OFFLOAD
- * ETH_VLAN_FILTER_OFFLOAD
- * ETH_VLAN_EXTEND_OFFLOAD
- * ETH_QINQ_STRIP_OFFLOAD
+ * RTE_ETH_VLAN_STRIP_OFFLOAD
+ * RTE_ETH_VLAN_FILTER_OFFLOAD
+ * RTE_ETH_VLAN_EXTEND_OFFLOAD
+ * RTE_ETH_QINQ_STRIP_OFFLOAD
* @return
* - (0) if successful.
* - (-ENOTSUP) if hardware-assisted VLAN filtering not configured.
* The port identifier of the Ethernet device.
* @return
* - (>0) if successful. Bit mask to indicate
- * ETH_VLAN_STRIP_OFFLOAD
- * ETH_VLAN_FILTER_OFFLOAD
- * ETH_VLAN_EXTEND_OFFLOAD
- * ETH_QINQ_STRIP_OFFLOAD
+ * RTE_ETH_VLAN_STRIP_OFFLOAD
+ * RTE_ETH_VLAN_FILTER_OFFLOAD
+ * RTE_ETH_VLAN_EXTEND_OFFLOAD
+ * RTE_ETH_QINQ_STRIP_OFFLOAD
* - (-ENODEV) if *port_id* invalid.
*/
int rte_eth_dev_get_vlan_offload(uint16_t port_id);
* rte_eth_tx_burst() function must [attempt to] free the *rte_mbuf* buffers
* of those packets whose transmission was effectively completed.
*
- * If the PMD is DEV_TX_OFFLOAD_MT_LOCKFREE capable, multiple threads can
+ * If the PMD is RTE_ETH_TX_OFFLOAD_MT_LOCKFREE capable, multiple threads can
* invoke this function concurrently on the same Tx queue without SW lock.
* @see rte_eth_dev_info_get, struct rte_eth_txconf::offloads
*
* through.
*/
uint32_t level;
- uint64_t types; /**< Specific RSS hash types (see ETH_RSS_*). */
+ uint64_t types; /**< Specific RSS hash types (see RTE_ETH_RSS_*). */
uint32_t key_len; /**< Hash key length in bytes. */
uint32_t queue_num; /**< Number of entries in @p queue. */
const uint8_t *key; /**< Hash key. */
#include "gso_udp4.h"
#define ILLEGAL_UDP_GSO_CTX(ctx) \
- ((((ctx)->gso_types & DEV_TX_OFFLOAD_UDP_TSO) == 0) || \
+ ((((ctx)->gso_types & RTE_ETH_TX_OFFLOAD_UDP_TSO) == 0) || \
(ctx)->gso_size < RTE_GSO_UDP_SEG_SIZE_MIN)
#define ILLEGAL_TCP_GSO_CTX(ctx) \
- ((((ctx)->gso_types & (DEV_TX_OFFLOAD_TCP_TSO | \
- DEV_TX_OFFLOAD_VXLAN_TNL_TSO | \
- DEV_TX_OFFLOAD_GRE_TNL_TSO)) == 0) || \
+ ((((ctx)->gso_types & (RTE_ETH_TX_OFFLOAD_TCP_TSO | \
+ RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | \
+ RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO)) == 0) || \
(ctx)->gso_size < RTE_GSO_SEG_SIZE_MIN)
int
ol_flags = pkt->ol_flags;
if ((IS_IPV4_VXLAN_TCP4(pkt->ol_flags) &&
- (gso_ctx->gso_types & DEV_TX_OFFLOAD_VXLAN_TNL_TSO)) ||
+ (gso_ctx->gso_types & RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO)) ||
((IS_IPV4_GRE_TCP4(pkt->ol_flags) &&
- (gso_ctx->gso_types & DEV_TX_OFFLOAD_GRE_TNL_TSO)))) {
+ (gso_ctx->gso_types & RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO)))) {
pkt->ol_flags &= (~PKT_TX_TCP_SEG);
ret = gso_tunnel_tcp4_segment(pkt, gso_size, ipid_delta,
direct_pool, indirect_pool,
pkts_out, nb_pkts_out);
} else if (IS_IPV4_VXLAN_UDP4(pkt->ol_flags) &&
- (gso_ctx->gso_types & DEV_TX_OFFLOAD_VXLAN_TNL_TSO) &&
- (gso_ctx->gso_types & DEV_TX_OFFLOAD_UDP_TSO)) {
+ (gso_ctx->gso_types & RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO) &&
+ (gso_ctx->gso_types & RTE_ETH_TX_OFFLOAD_UDP_TSO)) {
pkt->ol_flags &= (~PKT_TX_UDP_SEG);
ret = gso_tunnel_udp4_segment(pkt, gso_size,
direct_pool, indirect_pool,
pkts_out, nb_pkts_out);
} else if (IS_IPV4_TCP(pkt->ol_flags) &&
- (gso_ctx->gso_types & DEV_TX_OFFLOAD_TCP_TSO)) {
+ (gso_ctx->gso_types & RTE_ETH_TX_OFFLOAD_TCP_TSO)) {
pkt->ol_flags &= (~PKT_TX_TCP_SEG);
ret = gso_tcp4_segment(pkt, gso_size, ipid_delta,
direct_pool, indirect_pool,
pkts_out, nb_pkts_out);
} else if (IS_IPV4_UDP(pkt->ol_flags) &&
- (gso_ctx->gso_types & DEV_TX_OFFLOAD_UDP_TSO)) {
+ (gso_ctx->gso_types & RTE_ETH_TX_OFFLOAD_UDP_TSO)) {
pkt->ol_flags &= (~PKT_TX_UDP_SEG);
ret = gso_udp4_segment(pkt, gso_size, direct_pool,
indirect_pool, pkts_out, nb_pkts_out);
uint32_t gso_types;
/**< the bit mask of required GSO types. The GSO library
* uses the same macros as that of describing device TX
- * offloading capabilities (i.e. DEV_TX_OFFLOAD_*_TSO) for
+ * offloading capabilities (i.e. RTE_ETH_TX_OFFLOAD_*_TSO) for
* gso_types.
*
* For example, if applications want to segment TCP/IPv4
- * packets, set DEV_TX_OFFLOAD_TCP_TSO in gso_types.
+ * packets, set RTE_ETH_TX_OFFLOAD_TCP_TSO in gso_types.
*/
uint16_t gso_size;
/**< maximum size of an output GSO segment, including packet
* The detection of PKT_RX_OUTER_L4_CKSUM_GOOD shall be based on the given
* HW capability, At minimum, the PMD should support
* PKT_RX_OUTER_L4_CKSUM_UNKNOWN and PKT_RX_OUTER_L4_CKSUM_BAD states
- * if the DEV_RX_OFFLOAD_OUTER_UDP_CKSUM offload is available.
+ * if the RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM offload is available.
*/
#define PKT_RX_OUTER_L4_CKSUM_MASK ((1ULL << 21) | (1ULL << 22))
* a) Fill outer_l2_len and outer_l3_len in mbuf.
* b) Set the PKT_TX_OUTER_UDP_CKSUM flag.
* c) Set the PKT_TX_OUTER_IPV4 or PKT_TX_OUTER_IPV6 flag.
- * 2) Configure DEV_TX_OFFLOAD_OUTER_UDP_CKSUM offload flag.
+ * 2) Configure RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM offload flag.
*/
#define PKT_TX_OUTER_UDP_CKSUM (1ULL << 41)
* It can be used for tunnels which are not standards or listed above.
* It is preferred to use specific tunnel flags like PKT_TX_TUNNEL_GRE
* or PKT_TX_TUNNEL_IPIP if possible.
- * The ethdev must be configured with DEV_TX_OFFLOAD_IP_TNL_TSO.
+ * The ethdev must be configured with RTE_ETH_TX_OFFLOAD_IP_TNL_TSO.
* Outer and inner checksums are done according to the existing flags like
* PKT_TX_xxx_CKSUM.
* Specific tunnel headers that contain payload length, sequence id
* It can be used for tunnels which are not standards or listed above.
* It is preferred to use specific tunnel flags like PKT_TX_TUNNEL_VXLAN
* if possible.
- * The ethdev must be configured with DEV_TX_OFFLOAD_UDP_TNL_TSO.
+ * The ethdev must be configured with RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO.
* Outer and inner checksums are done according to the existing flags like
* PKT_TX_xxx_CKSUM.
* Specific tunnel headers that contain payload length, sequence id
* of the dynamic field to be registered:
* const struct rte_mbuf_dynfield rte_dynfield_my_feature = { ... };
* - The application initializes the PMD, and asks for this feature
- * at port initialization by passing DEV_RX_OFFLOAD_MY_FEATURE in
+ * at port initialization by passing RTE_ETH_RX_OFFLOAD_MY_FEATURE in
* rxconf. This will make the PMD to register the field by calling
* rte_mbuf_dynfield_register(&rte_dynfield_my_feature). The PMD
* stores the returned offset.