struct rte_eth_conf port_conf = {
.rxmode = {
.mq_mode = ETH_MQ_RX_RSS,
- .max_rx_pkt_len = ETHER_MAX_LEN,
+ .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
.split_hdr_size = 0,
},
.rx_adv_conf = {
struct rte_eth_conf port_conf = {
.rxmode = {
.mq_mode = ETH_MQ_RX_RSS,
- .max_rx_pkt_len = ETHER_MAX_LEN,
+ .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
},
.rx_adv_conf = {
.rss_conf = {
uint64_t rx_offloads = port->dev_conf.rxmode.offloads;
if (!strcmp(res->name, "max-pkt-len")) {
- if (res->value < ETHER_MIN_LEN) {
+ if (res->value < RTE_ETHER_MIN_LEN) {
printf("max-pkt-len can not be less than %d\n",
- ETHER_MIN_LEN);
+ RTE_ETHER_MIN_LEN);
return;
}
if (res->value == port->dev_conf.rxmode.max_rx_pkt_len)
return;
port->dev_conf.rxmode.max_rx_pkt_len = res->value;
- if (res->value > ETHER_MAX_LEN)
+ if (res->value > RTE_ETHER_MAX_LEN)
rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
else
rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
{
struct cmd_config_mtu_result *res = parsed_result;
- if (res->value < ETHER_MIN_LEN) {
- printf("mtu cannot be less than %d\n", ETHER_MIN_LEN);
+ if (res->value < RTE_ETHER_MIN_LEN) {
+ printf("mtu cannot be less than %d\n", RTE_ETHER_MIN_LEN);
return;
}
port_mtu_set(res->port_id, res->value);
memset(&filter, 0, sizeof(struct rte_eth_mac_filter));
- rte_memcpy(&filter.mac_addr, &res->address, ETHER_ADDR_LEN);
+ rte_memcpy(&filter.mac_addr, &res->address, RTE_ETHER_ADDR_LEN);
/* set VF MAC filter */
filter.is_vf = 1;
return;
for (i = 0; i < nb_item; i++) {
- if (vlan_list[i] > ETHER_MAX_VLAN_ID) {
+ if (vlan_list[i] > RTE_ETHER_MAX_VLAN_ID) {
printf("Invalid vlan_id: must be < 4096\n");
return;
}
if (vxlan_encap_conf.select_vlan)
vxlan_encap_conf.vlan_tci = rte_cpu_to_be_16(res->tci);
rte_memcpy(vxlan_encap_conf.eth_src, res->eth_src.addr_bytes,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
rte_memcpy(vxlan_encap_conf.eth_dst, res->eth_dst.addr_bytes,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
}
cmdline_parse_inst_t cmd_set_vxlan = {
if (nvgre_encap_conf.select_vlan)
nvgre_encap_conf.vlan_tci = rte_cpu_to_be_16(res->tci);
rte_memcpy(nvgre_encap_conf.eth_src, res->eth_src.addr_bytes,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
rte_memcpy(nvgre_encap_conf.eth_dst, res->eth_dst.addr_bytes,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
}
cmdline_parse_inst_t cmd_set_nvgre = {
if (l2_encap_conf.select_vlan)
l2_encap_conf.vlan_tci = rte_cpu_to_be_16(res->tci);
rte_memcpy(l2_encap_conf.eth_src, res->eth_src.addr_bytes,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
rte_memcpy(l2_encap_conf.eth_dst, res->eth_dst.addr_bytes,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
}
cmdline_parse_inst_t cmd_set_l2_encap = {
if (mplsogre_encap_conf.select_vlan)
mplsogre_encap_conf.vlan_tci = rte_cpu_to_be_16(res->tci);
rte_memcpy(mplsogre_encap_conf.eth_src, res->eth_src.addr_bytes,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
rte_memcpy(mplsogre_encap_conf.eth_dst, res->eth_dst.addr_bytes,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
}
cmdline_parse_inst_t cmd_set_mplsogre_encap = {
if (mplsoudp_encap_conf.select_vlan)
mplsoudp_encap_conf.vlan_tci = rte_cpu_to_be_16(res->tci);
rte_memcpy(mplsoudp_encap_conf.eth_src, res->eth_src.addr_bytes,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
rte_memcpy(mplsoudp_encap_conf.eth_dst, res->eth_dst.addr_bytes,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
}
cmdline_parse_inst_t cmd_set_mplsoudp_encap = {
.item_vxlan.flags = 0,
};
memcpy(action_vxlan_encap_data->item_eth.dst.addr_bytes,
- vxlan_encap_conf.eth_dst, ETHER_ADDR_LEN);
+ vxlan_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
memcpy(action_vxlan_encap_data->item_eth.src.addr_bytes,
- vxlan_encap_conf.eth_src, ETHER_ADDR_LEN);
+ vxlan_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
if (!vxlan_encap_conf.select_ipv4) {
memcpy(&action_vxlan_encap_data->item_ipv6.hdr.src_addr,
&vxlan_encap_conf.ipv6_src,
.item_nvgre.flow_id = 0,
};
memcpy(action_nvgre_encap_data->item_eth.dst.addr_bytes,
- nvgre_encap_conf.eth_dst, ETHER_ADDR_LEN);
+ nvgre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
memcpy(action_nvgre_encap_data->item_eth.src.addr_bytes,
- nvgre_encap_conf.eth_src, ETHER_ADDR_LEN);
+ nvgre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
if (!nvgre_encap_conf.select_ipv4) {
memcpy(&action_nvgre_encap_data->item_ipv6.hdr.src_addr,
&nvgre_encap_conf.ipv6_src,
};
header = action_encap_data->data;
if (l2_encap_conf.select_vlan)
- eth.type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
+ eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
else if (l2_encap_conf.select_ipv4)
- eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+ eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);
else
- eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+ eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6);
memcpy(eth.dst.addr_bytes,
- l2_encap_conf.eth_dst, ETHER_ADDR_LEN);
+ l2_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
memcpy(eth.src.addr_bytes,
- l2_encap_conf.eth_src, ETHER_ADDR_LEN);
+ l2_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
memcpy(header, ð, sizeof(eth));
header += sizeof(eth);
if (l2_encap_conf.select_vlan) {
if (l2_encap_conf.select_ipv4)
- vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+ vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);
else
- vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+ vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6);
memcpy(header, &vlan, sizeof(vlan));
header += sizeof(vlan);
}
};
header = action_decap_data->data;
if (l2_decap_conf.select_vlan)
- eth.type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
+ eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
memcpy(header, ð, sizeof(eth));
header += sizeof(eth);
if (l2_decap_conf.select_vlan) {
};
header = action_encap_data->data;
if (mplsogre_encap_conf.select_vlan)
- eth.type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
+ eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
else if (mplsogre_encap_conf.select_ipv4)
- eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+ eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);
else
- eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+ eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6);
memcpy(eth.dst.addr_bytes,
- mplsogre_encap_conf.eth_dst, ETHER_ADDR_LEN);
+ mplsogre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
memcpy(eth.src.addr_bytes,
- mplsogre_encap_conf.eth_src, ETHER_ADDR_LEN);
+ mplsogre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
memcpy(header, ð, sizeof(eth));
header += sizeof(eth);
if (mplsogre_encap_conf.select_vlan) {
if (mplsogre_encap_conf.select_ipv4)
- vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+ vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);
else
- vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+ vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6);
memcpy(header, &vlan, sizeof(vlan));
header += sizeof(vlan);
}
};
header = action_decap_data->data;
if (mplsogre_decap_conf.select_vlan)
- eth.type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
+ eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
else if (mplsogre_encap_conf.select_ipv4)
- eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+ eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);
else
- eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+ eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6);
memcpy(eth.dst.addr_bytes,
- mplsogre_encap_conf.eth_dst, ETHER_ADDR_LEN);
+ mplsogre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
memcpy(eth.src.addr_bytes,
- mplsogre_encap_conf.eth_src, ETHER_ADDR_LEN);
+ mplsogre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
memcpy(header, ð, sizeof(eth));
header += sizeof(eth);
if (mplsogre_encap_conf.select_vlan) {
if (mplsogre_encap_conf.select_ipv4)
- vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+ vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);
else
- vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+ vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6);
memcpy(header, &vlan, sizeof(vlan));
header += sizeof(vlan);
}
};
header = action_encap_data->data;
if (mplsoudp_encap_conf.select_vlan)
- eth.type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
+ eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
else if (mplsoudp_encap_conf.select_ipv4)
- eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+ eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);
else
- eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+ eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6);
memcpy(eth.dst.addr_bytes,
- mplsoudp_encap_conf.eth_dst, ETHER_ADDR_LEN);
+ mplsoudp_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
memcpy(eth.src.addr_bytes,
- mplsoudp_encap_conf.eth_src, ETHER_ADDR_LEN);
+ mplsoudp_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
memcpy(header, ð, sizeof(eth));
header += sizeof(eth);
if (mplsoudp_encap_conf.select_vlan) {
if (mplsoudp_encap_conf.select_ipv4)
- vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+ vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);
else
- vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+ vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6);
memcpy(header, &vlan, sizeof(vlan));
header += sizeof(vlan);
}
};
header = action_decap_data->data;
if (mplsoudp_decap_conf.select_vlan)
- eth.type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
+ eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
else if (mplsoudp_encap_conf.select_ipv4)
- eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+ eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);
else
- eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+ eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6);
memcpy(eth.dst.addr_bytes,
- mplsoudp_encap_conf.eth_dst, ETHER_ADDR_LEN);
+ mplsoudp_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
memcpy(eth.src.addr_bytes,
- mplsoudp_encap_conf.eth_src, ETHER_ADDR_LEN);
+ mplsoudp_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
memcpy(header, ð, sizeof(eth));
header += sizeof(eth);
if (mplsoudp_encap_conf.select_vlan) {
if (mplsoudp_encap_conf.select_ipv4)
- vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+ vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);
else
- vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+ vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6);
memcpy(header, &vlan, sizeof(vlan));
header += sizeof(vlan);
}
static void
print_ethaddr(const char *name, struct rte_ether_addr *eth_addr)
{
- char buf[ETHER_ADDR_FMT_SIZE];
- rte_ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);
+ char buf[RTE_ETHER_ADDR_FMT_SIZE];
+ rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
printf("%s%s", name, buf);
}
static uint16_t
get_udptcp_checksum(void *l3_hdr, void *l4_hdr, uint16_t ethertype)
{
- if (ethertype == _htons(ETHER_TYPE_IPv4))
+ if (ethertype == _htons(RTE_ETHER_TYPE_IPv4))
return rte_ipv4_udptcp_cksum(l3_hdr, l4_hdr);
- else /* assume ethertype == ETHER_TYPE_IPv6 */
+ else /* assume ethertype == RTE_ETHER_TYPE_IPv6 */
return rte_ipv6_udptcp_cksum(l3_hdr, l4_hdr);
}
info->l2_len = sizeof(struct rte_ether_hdr);
info->ethertype = eth_hdr->ether_type;
- if (info->ethertype == _htons(ETHER_TYPE_VLAN)) {
+ if (info->ethertype == _htons(RTE_ETHER_TYPE_VLAN)) {
struct rte_vlan_hdr *vlan_hdr = (
struct rte_vlan_hdr *)(eth_hdr + 1);
}
switch (info->ethertype) {
- case _htons(ETHER_TYPE_IPv4):
+ case _htons(RTE_ETHER_TYPE_IPv4):
ipv4_hdr = (struct ipv4_hdr *) ((char *)eth_hdr + info->l2_len);
parse_ipv4(ipv4_hdr, info);
break;
- case _htons(ETHER_TYPE_IPv6):
+ case _htons(RTE_ETHER_TYPE_IPv6):
ipv6_hdr = (struct ipv6_hdr *) ((char *)eth_hdr + info->l2_len);
parse_ipv6(ipv6_hdr, info);
break;
sizeof(struct rte_vxlan_hdr));
parse_ethernet(eth_hdr, info);
- info->l2_len += ETHER_VXLAN_HLEN; /* add udp + vxlan */
+ info->l2_len += RTE_ETHER_VXLAN_HLEN; /* add udp + vxlan */
}
/* Parse a vxlan-gpe header */
sizeof(struct udp_hdr));
if (!vxlan_gpe_hdr->proto || vxlan_gpe_hdr->proto ==
- VXLAN_GPE_TYPE_IPV4) {
+ RTE_VXLAN_GPE_TYPE_IPV4) {
info->is_tunnel = 1;
info->outer_ethertype = info->ethertype;
info->outer_l2_len = info->l2_len;
vxlan_gpe_len);
parse_ipv4(ipv4_hdr, info);
- info->ethertype = _htons(ETHER_TYPE_IPv4);
+ info->ethertype = _htons(RTE_ETHER_TYPE_IPv4);
info->l2_len = 0;
- } else if (vxlan_gpe_hdr->proto == VXLAN_GPE_TYPE_IPV6) {
+ } else if (vxlan_gpe_hdr->proto == RTE_VXLAN_GPE_TYPE_IPV6) {
info->is_tunnel = 1;
info->outer_ethertype = info->ethertype;
info->outer_l2_len = info->l2_len;
ipv6_hdr = (struct ipv6_hdr *)((char *)vxlan_gpe_hdr +
vxlan_gpe_len);
- info->ethertype = _htons(ETHER_TYPE_IPv6);
+ info->ethertype = _htons(RTE_ETHER_TYPE_IPv6);
parse_ipv6(ipv6_hdr, info);
info->l2_len = 0;
- } else if (vxlan_gpe_hdr->proto == VXLAN_GPE_TYPE_ETH) {
+ } else if (vxlan_gpe_hdr->proto == RTE_VXLAN_GPE_TYPE_ETH) {
info->is_tunnel = 1;
info->outer_ethertype = info->ethertype;
info->outer_l2_len = info->l2_len;
} else
return;
- info->l2_len += ETHER_VXLAN_GPE_HLEN;
+ info->l2_len += RTE_ETHER_VXLAN_GPE_HLEN;
}
/* Parse a gre header */
if (gre_hdr->flags & _htons(GRE_CHECKSUM_PRESENT))
gre_len += GRE_EXT_LEN;
- if (gre_hdr->proto == _htons(ETHER_TYPE_IPv4)) {
+ if (gre_hdr->proto == _htons(RTE_ETHER_TYPE_IPv4)) {
info->is_tunnel = 1;
info->outer_ethertype = info->ethertype;
info->outer_l2_len = info->l2_len;
ipv4_hdr = (struct ipv4_hdr *)((char *)gre_hdr + gre_len);
parse_ipv4(ipv4_hdr, info);
- info->ethertype = _htons(ETHER_TYPE_IPv4);
+ info->ethertype = _htons(RTE_ETHER_TYPE_IPv4);
info->l2_len = 0;
- } else if (gre_hdr->proto == _htons(ETHER_TYPE_IPv6)) {
+ } else if (gre_hdr->proto == _htons(RTE_ETHER_TYPE_IPv6)) {
info->is_tunnel = 1;
info->outer_ethertype = info->ethertype;
info->outer_l2_len = info->l2_len;
ipv6_hdr = (struct ipv6_hdr *)((char *)gre_hdr + gre_len);
- info->ethertype = _htons(ETHER_TYPE_IPv6);
+ info->ethertype = _htons(RTE_ETHER_TYPE_IPv6);
parse_ipv6(ipv6_hdr, info);
info->l2_len = 0;
- } else if (gre_hdr->proto == _htons(ETHER_TYPE_TEB)) {
+ } else if (gre_hdr->proto == _htons(RTE_ETHER_TYPE_TEB)) {
info->is_tunnel = 1;
info->outer_ethertype = info->ethertype;
info->outer_l2_len = info->l2_len;
if (ip_version == 4) {
parse_ipv4(ipv4_hdr, info);
- info->ethertype = _htons(ETHER_TYPE_IPv4);
+ info->ethertype = _htons(RTE_ETHER_TYPE_IPv4);
} else {
parse_ipv6(ipv6_hdr, info);
- info->ethertype = _htons(ETHER_TYPE_IPv6);
+ info->ethertype = _htons(RTE_ETHER_TYPE_IPv6);
}
info->l2_len = 0;
}
tso_segsz = info->tunnel_tso_segsz;
}
- if (info->ethertype == _htons(ETHER_TYPE_IPv4)) {
+ if (info->ethertype == _htons(RTE_ETHER_TYPE_IPv4)) {
ipv4_hdr = l3_hdr;
ipv4_hdr->hdr_checksum = 0;
ipv4_hdr->hdr_checksum =
rte_ipv4_cksum(ipv4_hdr);
}
- } else if (info->ethertype == _htons(ETHER_TYPE_IPv6))
+ } else if (info->ethertype == _htons(RTE_ETHER_TYPE_IPv6))
ol_flags |= PKT_TX_IPV6;
else
return 0; /* packet type not supported, nothing to do */
struct udp_hdr *udp_hdr;
uint64_t ol_flags = 0;
- if (info->outer_ethertype == _htons(ETHER_TYPE_IPv4)) {
+ if (info->outer_ethertype == _htons(RTE_ETHER_TYPE_IPv4)) {
ipv4_hdr->hdr_checksum = 0;
ol_flags |= PKT_TX_OUTER_IPV4;
/* do not recalculate udp cksum if it was 0 */
if (udp_hdr->dgram_cksum != 0) {
udp_hdr->dgram_cksum = 0;
- if (info->outer_ethertype == _htons(ETHER_TYPE_IPv4))
+ if (info->outer_ethertype == _htons(RTE_ETHER_TYPE_IPv4))
udp_hdr->dgram_cksum =
rte_ipv4_udptcp_cksum(ipv4_hdr, udp_hdr);
else
eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
rte_ether_addr_copy(&cfg_ether_dst, ð_hdr->d_addr);
rte_ether_addr_copy(&cfg_ether_src, ð_hdr->s_addr);
- eth_hdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+ eth_hdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);
/* Initialize IP header. */
ip_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
static void
ether_addr_dump(const char *what, const struct rte_ether_addr *ea)
{
- char buf[ETHER_ADDR_FMT_SIZE];
+ char buf[RTE_ETHER_ADDR_FMT_SIZE];
- rte_ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, ea);
+ rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, ea);
if (what)
printf("%s", what);
printf("%s", buf);
ether_addr_dump(" ETH: src=", ð_h->s_addr);
ether_addr_dump(" dst=", ð_h->d_addr);
}
- if (eth_type == ETHER_TYPE_VLAN) {
+ if (eth_type == RTE_ETHER_TYPE_VLAN) {
vlan_h = (struct rte_vlan_hdr *)
((char *)eth_h + sizeof(struct rte_ether_hdr));
l2_len += sizeof(struct rte_vlan_hdr);
}
/* Reply to ARP requests */
- if (eth_type == ETHER_TYPE_ARP) {
+ if (eth_type == RTE_ETHER_TYPE_ARP) {
arp_h = (struct rte_arp_hdr *) ((char *)eth_h + l2_len);
arp_op = RTE_BE_TO_CPU_16(arp_h->arp_opcode);
arp_pro = RTE_BE_TO_CPU_16(arp_h->arp_protocol);
}
if ((RTE_BE_TO_CPU_16(arp_h->arp_hardware) !=
RTE_ARP_HRD_ETHER) ||
- (arp_pro != ETHER_TYPE_IPv4) ||
+ (arp_pro != RTE_ETHER_TYPE_IPv4) ||
(arp_h->arp_hlen != 6) ||
(arp_h->arp_plen != 4)
) {
continue;
}
- if (eth_type != ETHER_TYPE_IPv4) {
+ if (eth_type != RTE_ETHER_TYPE_IPv4) {
rte_pktmbuf_free(pkt);
continue;
}
eth_type = rte_be_to_cpu_16(eth_hdr->ether_type);
if (! (mb->ol_flags & PKT_RX_IEEE1588_PTP)) {
- if (eth_type == ETHER_TYPE_1588) {
+ if (eth_type == RTE_ETHER_TYPE_1588) {
printf("Port %u Received PTP packet not filtered"
" by hardware\n",
fs->rx_port);
rte_pktmbuf_free(mb);
return;
}
- if (eth_type != ETHER_TYPE_1588) {
+ if (eth_type != RTE_ETHER_TYPE_1588) {
printf("Port %u Received NON PTP packet incorrectly"
" detected by hardware\n",
fs->rx_port);
}
if (!strcmp(lgopts[opt_idx].name, "max-pkt-len")) {
n = atoi(optarg);
- if (n >= ETHER_MIN_LEN) {
+ if (n >= RTE_ETHER_MIN_LEN) {
rx_mode.max_rx_pkt_len = (uint32_t) n;
- if (n > ETHER_MAX_LEN)
+ if (n > RTE_ETHER_MAX_LEN)
rx_offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
} else
rte_exit(EXIT_FAILURE,
"Invalid max-pkt-len=%d - should be > %d\n",
- n, ETHER_MIN_LEN);
+ n, RTE_ETHER_MIN_LEN);
}
if (!strcmp(lgopts[opt_idx].name, "pkt-filter-mode")) {
if (!strcmp(optarg, "signature"))
* Ethernet device configuration.
*/
struct rte_eth_rxmode rx_mode = {
- .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
+ .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
+ /**< Default maximum frame length. */
};
struct rte_eth_txmode tx_mode = {
static int all_ports_started(void);
struct gso_status gso_ports[RTE_MAX_ETHPORTS];
-uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
+uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN;
/*
* Helper function to check if socket is already discovered.
portid_t i;
for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
- peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
+ peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR;
peer_eth_addrs[i].addr_bytes[5] = i;
}
}
fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
- fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
- ETHER_CRC_LEN;
+ fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN -
+ RTE_ETHER_CRC_LEN;
fwd_lcores[lc_id]->gso_ctx.flag = 0;
}
rte_be16_t vlan_tci;
uint8_t ip_tos;
uint8_t ip_ttl;
- uint8_t eth_src[ETHER_ADDR_LEN];
- uint8_t eth_dst[ETHER_ADDR_LEN];
+ uint8_t eth_src[RTE_ETHER_ADDR_LEN];
+ uint8_t eth_dst[RTE_ETHER_ADDR_LEN];
};
struct vxlan_encap_conf vxlan_encap_conf;
uint8_t ipv6_src[16];
uint8_t ipv6_dst[16];
rte_be16_t vlan_tci;
- uint8_t eth_src[ETHER_ADDR_LEN];
- uint8_t eth_dst[ETHER_ADDR_LEN];
+ uint8_t eth_src[RTE_ETHER_ADDR_LEN];
+ uint8_t eth_dst[RTE_ETHER_ADDR_LEN];
};
struct nvgre_encap_conf nvgre_encap_conf;
uint32_t select_ipv4:1;
uint32_t select_vlan:1;
rte_be16_t vlan_tci;
- uint8_t eth_src[ETHER_ADDR_LEN];
- uint8_t eth_dst[ETHER_ADDR_LEN];
+ uint8_t eth_src[RTE_ETHER_ADDR_LEN];
+ uint8_t eth_dst[RTE_ETHER_ADDR_LEN];
};
struct l2_encap_conf l2_encap_conf;
uint8_t ipv6_src[16];
uint8_t ipv6_dst[16];
rte_be16_t vlan_tci;
- uint8_t eth_src[ETHER_ADDR_LEN];
- uint8_t eth_dst[ETHER_ADDR_LEN];
+ uint8_t eth_src[RTE_ETHER_ADDR_LEN];
+ uint8_t eth_dst[RTE_ETHER_ADDR_LEN];
};
struct mplsogre_encap_conf mplsogre_encap_conf;
uint8_t ipv6_src[16];
uint8_t ipv6_dst[16];
rte_be16_t vlan_tci;
- uint8_t eth_src[ETHER_ADDR_LEN];
- uint8_t eth_dst[ETHER_ADDR_LEN];
+ uint8_t eth_src[RTE_ETHER_ADDR_LEN];
+ uint8_t eth_dst[RTE_ETHER_ADDR_LEN];
};
struct mplsoudp_encap_conf mplsoudp_encap_conf;
*/
rte_ether_addr_copy(&peer_eth_addrs[fs->peer_addr], ð_hdr.d_addr);
rte_ether_addr_copy(&ports[fs->tx_port].eth_addr, ð_hdr.s_addr);
- eth_hdr.ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+ eth_hdr.ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);
if (rte_mempool_get_bulk(mbp, (void **)pkts_burst,
nb_pkt_per_burst) == 0) {
static inline void
print_ether_addr(const char *what, struct rte_ether_addr *eth_addr)
{
- char buf[ETHER_ADDR_FMT_SIZE];
- rte_ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);
+ char buf[RTE_ETHER_ADDR_FMT_SIZE];
+ rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
printf("%s%s", what, buf);
}
struct rte_vlan_hdr *vhdr = (struct rte_vlan_hdr *)(
(uint8_t *)eth_hdr + sizeof(struct rte_ether_hdr));
- eth_hdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
+ eth_hdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
vhdr->eth_proto = rte_cpu_to_be_16(ether_type);
vhdr->vlan_tci = van_id;
uint32_t opcode)
{
arp_hdr->arp_hardware = rte_cpu_to_be_16(RTE_ARP_HRD_ETHER);
- arp_hdr->arp_protocol = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
- arp_hdr->arp_hlen = ETHER_ADDR_LEN;
+ arp_hdr->arp_protocol = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);
+ arp_hdr->arp_hlen = RTE_ETHER_ADDR_LEN;
arp_hdr->arp_plen = sizeof(uint32_t);
arp_hdr->arp_opcode = rte_cpu_to_be_16(opcode);
rte_ether_addr_copy(src_mac, &arp_hdr->arp_data.arp_sha);
pkt->l2_len = eth_hdr_size;
if (ipv4) {
- pkt->vlan_tci = ETHER_TYPE_IPv4;
+ pkt->vlan_tci = RTE_ETHER_TYPE_IPv4;
pkt->l3_len = sizeof(struct ipv4_hdr);
} else {
- pkt->vlan_tci = ETHER_TYPE_IPv6;
+ pkt->vlan_tci = RTE_ETHER_TYPE_IPv6;
pkt->l3_len = sizeof(struct ipv6_hdr);
}
pkt->l2_len = eth_hdr_size;
if (ipv4) {
- pkt->vlan_tci = ETHER_TYPE_IPv4;
+ pkt->vlan_tci = RTE_ETHER_TYPE_IPv4;
pkt->l3_len = sizeof(struct ipv4_hdr);
} else {
- pkt->vlan_tci = ETHER_TYPE_IPv6;
+ pkt->vlan_tci = RTE_ETHER_TYPE_IPv6;
pkt->l3_len = sizeof(struct ipv6_hdr);
}
is_addr_different(const struct rte_ether_addr addr, uint64_t num)
{
int i;
- for (i = 0; i < ETHER_ADDR_LEN; i++, num >>= 8)
+ for (i = 0; i < RTE_ETHER_ADDR_LEN; i++, num >>= 8)
if (addr.addr_bytes[i] != (num & 0xFF)) {
return 1;
}
printf("Set up IPv4 UDP traffic\n");
initialize_eth_header(&pkt_eth_hdr,
(struct rte_ether_addr *)src_mac,
- (struct rte_ether_addr *)dst_mac, ETHER_TYPE_IPv4, 0, 0);
+ (struct rte_ether_addr *)dst_mac, RTE_ETHER_TYPE_IPv4, 0, 0);
pktlen = (uint16_t)(sizeof(struct rte_ether_hdr));
printf("ETH pktlen %u\n", pktlen);
printf("Set up IPv4 TCP traffic\n");
initialize_eth_header(&pkt_eth_hdr,
(struct rte_ether_addr *)src_mac,
- (struct rte_ether_addr *)dst_mac, ETHER_TYPE_IPv4, 0, 0);
+ (struct rte_ether_addr *)dst_mac, RTE_ETHER_TYPE_IPv4, 0, 0);
pktlen = (uint16_t)(sizeof(struct rte_ether_hdr));
printf("ETH pktlen %u\n", pktlen);
printf("Set up IPv4 SCTP traffic\n");
initialize_eth_header(&pkt_eth_hdr,
(struct rte_ether_addr *)src_mac,
- (struct rte_ether_addr *)dst_mac, ETHER_TYPE_IPv4, 0, 0);
+ (struct rte_ether_addr *)dst_mac, RTE_ETHER_TYPE_IPv4, 0, 0);
pktlen = (uint16_t)(sizeof(struct rte_ether_hdr));
printf("ETH pktlen %u\n", pktlen);
.rxmode = {
.mq_mode = ETH_MQ_RX_NONE,
.split_hdr_size = 0,
- .max_rx_pkt_len = ETHER_MAX_LEN,
+ .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
for (i = 0; i < TEST_MAX_NUMBER_OF_PORTS; i++) {
char pmd_name[RTE_ETH_NAME_MAX_LEN];
- mac_addr->addr_bytes[ETHER_ADDR_LEN-1] = i;
+ mac_addr->addr_bytes[RTE_ETHER_ADDR_LEN-1] = i;
snprintf(pmd_name, RTE_ETH_NAME_MAX_LEN, "eth_virt_%d", i);
mac_addr = (struct rte_ether_addr *)slave_mac;
- mac_addr->addr_bytes[ETHER_ADDR_LEN-1] =
+ mac_addr->addr_bytes[RTE_ETHER_ADDR_LEN-1] =
test_params->bonded_slave_count-1;
rte_eth_macaddr_get(
test_params->bonded_port_id);
expected_mac_addr = (struct rte_ether_addr *)&slave_mac;
- expected_mac_addr->addr_bytes[ETHER_ADDR_LEN-1] = i;
+ expected_mac_addr->addr_bytes[RTE_ETHER_ADDR_LEN-1] = i;
/* Check primary slave MAC */
rte_eth_macaddr_get(test_params->slave_port_ids[i], &read_mac_addr);
for (i = 0; i < BONDED_INIT_MAC_ASSIGNMENT_SLAVE_COUNT; i++) {
char pmd_name[RTE_ETH_NAME_MAX_LEN];
- slave_mac_addr.addr_bytes[ETHER_ADDR_LEN-1] = i + 100;
+ slave_mac_addr.addr_bytes[RTE_ETHER_ADDR_LEN - 1] =
+ i + 100;
snprintf(pmd_name, RTE_ETH_NAME_MAX_LEN,
"eth_slave_%d", i);
/*
* 3. Set explicit MAC address on bonded ethdev
*/
- bonded_mac_addr.addr_bytes[ETHER_ADDR_LEN-2] = 0xFF;
- bonded_mac_addr.addr_bytes[ETHER_ADDR_LEN-1] = 0xAA;
+ bonded_mac_addr.addr_bytes[RTE_ETHER_ADDR_LEN-2] = 0xFF;
+ bonded_mac_addr.addr_bytes[RTE_ETHER_ADDR_LEN-1] = 0xAA;
TEST_ASSERT_SUCCESS(rte_eth_bond_mac_address_set(
bonded_port_id, &bonded_mac_addr),
sizeof(read_mac_addr)),
"slave port 0 mac address not as expected");
- slave_mac_addr.addr_bytes[ETHER_ADDR_LEN-1] = 1 + 100;
+ slave_mac_addr.addr_bytes[RTE_ETHER_ADDR_LEN-1] = 1 + 100;
rte_eth_macaddr_get(slave_port_ids[1], &read_mac_addr);
TEST_ASSERT_SUCCESS(memcmp(&slave_mac_addr, &read_mac_addr,
sizeof(read_mac_addr)),
"slave port 1 mac address not as expected");
- slave_mac_addr.addr_bytes[ETHER_ADDR_LEN-1] = 2 + 100;
+ slave_mac_addr.addr_bytes[RTE_ETHER_ADDR_LEN-1] = 2 + 100;
rte_eth_macaddr_get(slave_port_ids[2], &read_mac_addr);
TEST_ASSERT_SUCCESS(memcmp(&slave_mac_addr, &read_mac_addr,
sizeof(read_mac_addr)),
sizeof(read_mac_addr)),
"bonded port mac address not as expected");
- slave_mac_addr.addr_bytes[ETHER_ADDR_LEN-1] = 0 + 100;
+ slave_mac_addr.addr_bytes[RTE_ETHER_ADDR_LEN-1] = 0 + 100;
rte_eth_macaddr_get(slave_port_ids[0], &read_mac_addr);
TEST_ASSERT_SUCCESS(memcmp(&slave_mac_addr, &read_mac_addr,
sizeof(read_mac_addr)),
"slave port 0 mac address not as expected");
- slave_mac_addr.addr_bytes[ETHER_ADDR_LEN-1] = 1 + 100;
+ slave_mac_addr.addr_bytes[RTE_ETHER_ADDR_LEN-1] = 1 + 100;
rte_eth_macaddr_get(slave_port_ids[1], &read_mac_addr);
TEST_ASSERT_SUCCESS(memcmp(&slave_mac_addr, &read_mac_addr,
sizeof(read_mac_addr)),
"Number of slaves (%d) is great than expected (%d).",
slave_count, 0);
- slave_mac_addr.addr_bytes[ETHER_ADDR_LEN-1] = 0 + 100;
+ slave_mac_addr.addr_bytes[RTE_ETHER_ADDR_LEN-1] = 0 + 100;
rte_eth_macaddr_get(slave_port_ids[0], &read_mac_addr);
TEST_ASSERT_SUCCESS(memcmp(&slave_mac_addr, &read_mac_addr,
sizeof(read_mac_addr)),
"slave port 0 mac address not as expected");
- slave_mac_addr.addr_bytes[ETHER_ADDR_LEN-1] = 1 + 100;
+ slave_mac_addr.addr_bytes[RTE_ETHER_ADDR_LEN-1] = 1 + 100;
rte_eth_macaddr_get(slave_port_ids[1], &read_mac_addr);
TEST_ASSERT_SUCCESS(memcmp(&slave_mac_addr, &read_mac_addr,
sizeof(read_mac_addr)),
"slave port 1 mac address not as expected");
- slave_mac_addr.addr_bytes[ETHER_ADDR_LEN-1] = 2 + 100;
+ slave_mac_addr.addr_bytes[RTE_ETHER_ADDR_LEN-1] = 2 + 100;
rte_eth_macaddr_get(slave_port_ids[2], &read_mac_addr);
TEST_ASSERT_SUCCESS(memcmp(&slave_mac_addr, &read_mac_addr,
sizeof(read_mac_addr)),
void *ip_hdr;
if (ipv4)
- ether_type = ETHER_TYPE_IPv4;
+ ether_type = RTE_ETHER_TYPE_IPv4;
else
- ether_type = ETHER_TYPE_IPv6;
+ ether_type = RTE_ETHER_TYPE_IPv6;
if (toggle_dst_mac)
initialize_eth_header(test_params->pkt_eth_hdr,
for (i = 0; i < TEST_RR_POLLING_LINK_STATUS_SLAVE_COUNT; i++) {
/* Generate slave name / MAC address */
snprintf(slave_name, RTE_ETH_NAME_MAX_LEN, "eth_virt_poll_%d", i);
- mac_addr->addr_bytes[ETHER_ADDR_LEN-1] = i;
+ mac_addr->addr_bytes[RTE_ETHER_ADDR_LEN-1] = i;
/* Create slave devices with no ISR Support */
if (polling_test_slaves[i] == -1) {
initialize_eth_header(test_params->pkt_eth_hdr,
(struct rte_ether_addr *)src_mac,
(struct rte_ether_addr *)dst_mac_0,
- ETHER_TYPE_IPv4, 0, 0);
+ RTE_ETHER_TYPE_IPv4, 0, 0);
pktlen = initialize_udp_header(test_params->pkt_udp_hdr, src_port,
dst_port_0, 16);
pktlen = initialize_ipv4_header(test_params->pkt_ipv4_hdr, src_addr,
initialize_eth_header(test_params->pkt_eth_hdr,
(struct rte_ether_addr *)src_mac,
(struct rte_ether_addr *)dst_mac_0,
- ETHER_TYPE_IPv4, 0, 0);
+ RTE_ETHER_TYPE_IPv4, 0, 0);
pktlen = initialize_udp_header(test_params->pkt_udp_hdr, src_port,
dst_port_0, 16);
pktlen = initialize_ipv4_header(test_params->pkt_ipv4_hdr, src_addr,
initialize_eth_header(test_params->pkt_eth_hdr,
(struct rte_ether_addr *)src_mac,
(struct rte_ether_addr *)dst_mac_1,
- ETHER_TYPE_IPv4, 0, 0);
+ RTE_ETHER_TYPE_IPv4, 0, 0);
/* Generate a burst 2 of packets to transmit */
TEST_ASSERT_EQUAL(generate_packet_burst(test_params->mbuf_pool, &pkts_burst[1][0],
initialize_eth_header(test_params->pkt_eth_hdr,
(struct rte_ether_addr *)src_mac,
(struct rte_ether_addr *)dst_mac_0,
- ETHER_TYPE_IPv4, 0, 0);
+ RTE_ETHER_TYPE_IPv4, 0, 0);
pktlen = initialize_udp_header(test_params->pkt_udp_hdr, src_port,
dst_port_0, 16);
initialize_eth_header(test_params->pkt_eth_hdr,
(struct rte_ether_addr *)src_mac,
(struct rte_ether_addr *)dst_mac_0,
- ETHER_TYPE_IPv4, 0, 0);
+ RTE_ETHER_TYPE_IPv4, 0, 0);
} else {
initialize_eth_header(test_params->pkt_eth_hdr,
(struct rte_ether_addr *)test_params->default_slave_mac,
(struct rte_ether_addr *)dst_mac_0,
- ETHER_TYPE_IPv4, 0, 0);
+ RTE_ETHER_TYPE_IPv4, 0, 0);
}
pktlen = initialize_udp_header(test_params->pkt_udp_hdr, src_port,
dst_port_0, 16);
* them through the bonding port.
*/
pkt = rte_pktmbuf_alloc(test_params->mbuf_pool);
- memcpy(client_mac.addr_bytes, mac_client1, ETHER_ADDR_LEN);
+ memcpy(client_mac.addr_bytes, mac_client1, RTE_ETHER_ADDR_LEN);
eth_pkt = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
- initialize_eth_header(eth_pkt, &bond_mac, &client_mac, ETHER_TYPE_ARP, 0,
- 0);
+ initialize_eth_header(eth_pkt, &bond_mac, &client_mac,
+ RTE_ETHER_TYPE_ARP, 0, 0);
arp_pkt = (struct rte_arp_hdr *)((char *)eth_pkt +
sizeof(struct rte_ether_hdr));
initialize_arp_header(arp_pkt, &bond_mac, &client_mac, ip_host, ip_client1,
rte_eth_tx_burst(test_params->bonded_port_id, 0, &pkt, 1);
pkt = rte_pktmbuf_alloc(test_params->mbuf_pool);
- memcpy(client_mac.addr_bytes, mac_client2, ETHER_ADDR_LEN);
+ memcpy(client_mac.addr_bytes, mac_client2, RTE_ETHER_ADDR_LEN);
eth_pkt = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
- initialize_eth_header(eth_pkt, &bond_mac, &client_mac, ETHER_TYPE_ARP, 0,
- 0);
+ initialize_eth_header(eth_pkt, &bond_mac, &client_mac,
+ RTE_ETHER_TYPE_ARP, 0, 0);
arp_pkt = (struct rte_arp_hdr *)((char *)eth_pkt +
sizeof(struct rte_ether_hdr));
initialize_arp_header(arp_pkt, &bond_mac, &client_mac, ip_host, ip_client2,
rte_eth_tx_burst(test_params->bonded_port_id, 0, &pkt, 1);
pkt = rte_pktmbuf_alloc(test_params->mbuf_pool);
- memcpy(client_mac.addr_bytes, mac_client3, ETHER_ADDR_LEN);
+ memcpy(client_mac.addr_bytes, mac_client3, RTE_ETHER_ADDR_LEN);
eth_pkt = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
- initialize_eth_header(eth_pkt, &bond_mac, &client_mac, ETHER_TYPE_ARP, 0,
- 0);
+ initialize_eth_header(eth_pkt, &bond_mac, &client_mac,
+ RTE_ETHER_TYPE_ARP, 0, 0);
arp_pkt = (struct rte_arp_hdr *)((char *)eth_pkt +
sizeof(struct rte_ether_hdr));
initialize_arp_header(arp_pkt, &bond_mac, &client_mac, ip_host, ip_client3,
rte_eth_tx_burst(test_params->bonded_port_id, 0, &pkt, 1);
pkt = rte_pktmbuf_alloc(test_params->mbuf_pool);
- memcpy(client_mac.addr_bytes, mac_client4, ETHER_ADDR_LEN);
+ memcpy(client_mac.addr_bytes, mac_client4, RTE_ETHER_ADDR_LEN);
eth_pkt = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
- initialize_eth_header(eth_pkt, &bond_mac, &client_mac, ETHER_TYPE_ARP, 0,
- 0);
+ initialize_eth_header(eth_pkt, &bond_mac, &client_mac,
+ RTE_ETHER_TYPE_ARP, 0, 0);
arp_pkt = (struct rte_arp_hdr *)((char *)eth_pkt +
sizeof(struct rte_ether_hdr));
initialize_arp_header(arp_pkt, &bond_mac, &client_mac, ip_host, ip_client4,
* them in the rx queue to be received by the bonding driver on rx_burst.
*/
pkt = rte_pktmbuf_alloc(test_params->mbuf_pool);
- memcpy(client_mac.addr_bytes, mac_client1, ETHER_ADDR_LEN);
+ memcpy(client_mac.addr_bytes, mac_client1, RTE_ETHER_ADDR_LEN);
eth_pkt = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
- initialize_eth_header(eth_pkt, &bond_mac, &client_mac, ETHER_TYPE_ARP, 0,
- 0);
+ initialize_eth_header(eth_pkt, &bond_mac, &client_mac,
+ RTE_ETHER_TYPE_ARP, 0, 0);
arp_pkt = (struct rte_arp_hdr *)((char *)eth_pkt +
sizeof(struct rte_ether_hdr));
initialize_arp_header(arp_pkt, &client_mac, &bond_mac, ip_client1, ip_host,
1);
pkt = rte_pktmbuf_alloc(test_params->mbuf_pool);
- memcpy(client_mac.addr_bytes, mac_client2, ETHER_ADDR_LEN);
+ memcpy(client_mac.addr_bytes, mac_client2, RTE_ETHER_ADDR_LEN);
eth_pkt = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
- initialize_eth_header(eth_pkt, &bond_mac, &client_mac, ETHER_TYPE_ARP, 0,
- 0);
+ initialize_eth_header(eth_pkt, &bond_mac, &client_mac,
+ RTE_ETHER_TYPE_ARP, 0, 0);
arp_pkt = (struct rte_arp_hdr *)((char *)eth_pkt +
sizeof(struct rte_ether_hdr));
initialize_arp_header(arp_pkt, &client_mac, &bond_mac, ip_client2, ip_host,
1);
pkt = rte_pktmbuf_alloc(test_params->mbuf_pool);
- memcpy(client_mac.addr_bytes, mac_client3, ETHER_ADDR_LEN);
+ memcpy(client_mac.addr_bytes, mac_client3, RTE_ETHER_ADDR_LEN);
eth_pkt = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
- initialize_eth_header(eth_pkt, &bond_mac, &client_mac, ETHER_TYPE_ARP, 0,
- 0);
+ initialize_eth_header(eth_pkt, &bond_mac, &client_mac,
+ RTE_ETHER_TYPE_ARP, 0, 0);
arp_pkt = (struct rte_arp_hdr *)((char *)eth_pkt +
sizeof(struct rte_ether_hdr));
initialize_arp_header(arp_pkt, &client_mac, &bond_mac, ip_client3, ip_host,
1);
pkt = rte_pktmbuf_alloc(test_params->mbuf_pool);
- memcpy(client_mac.addr_bytes, mac_client4, ETHER_ADDR_LEN);
+ memcpy(client_mac.addr_bytes, mac_client4, RTE_ETHER_ADDR_LEN);
eth_pkt = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
- initialize_eth_header(eth_pkt, &bond_mac, &client_mac, ETHER_TYPE_ARP, 0,
- 0);
+ initialize_eth_header(eth_pkt, &bond_mac, &client_mac,
+ RTE_ETHER_TYPE_ARP, 0, 0);
arp_pkt = (struct rte_arp_hdr *)((char *)eth_pkt +
sizeof(struct rte_ether_hdr));
initialize_arp_header(arp_pkt, &client_mac, &bond_mac, ip_client4, ip_host,
* Generating packet with double VLAN header and placing it in the rx queue.
*/
pkt = rte_pktmbuf_alloc(test_params->mbuf_pool);
- memcpy(client_mac.addr_bytes, mac_client1, ETHER_ADDR_LEN);
+ memcpy(client_mac.addr_bytes, mac_client1, RTE_ETHER_ADDR_LEN);
eth_pkt = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
- initialize_eth_header(eth_pkt, &bond_mac, &client_mac, ETHER_TYPE_VLAN, 0,
- 0);
+ initialize_eth_header(eth_pkt, &bond_mac, &client_mac,
+ RTE_ETHER_TYPE_VLAN, 0, 0);
vlan_pkt = (struct rte_vlan_hdr *)((char *)(eth_pkt + 1));
vlan_pkt->vlan_tci = rte_cpu_to_be_16(1);
- vlan_pkt->eth_proto = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
+ vlan_pkt->eth_proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
vlan_pkt = vlan_pkt+1;
vlan_pkt->vlan_tci = rte_cpu_to_be_16(2);
- vlan_pkt->eth_proto = rte_cpu_to_be_16(ETHER_TYPE_ARP);
+ vlan_pkt->eth_proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP);
arp_pkt = (struct rte_arp_hdr *)((char *)(vlan_pkt + 1));
initialize_arp_header(arp_pkt, &client_mac, &bond_mac, ip_client1, ip_host,
RTE_ARP_OP_REPLY);
retval = -1;
goto test_end;
}
- if (vlan_pkt->eth_proto != rte_cpu_to_be_16(ETHER_TYPE_VLAN)) {
+ if (vlan_pkt->eth_proto != rte_cpu_to_be_16(
+ RTE_ETHER_TYPE_VLAN)) {
retval = -1;
goto test_end;
}
retval = -1;
goto test_end;
}
- if (vlan_pkt->eth_proto != rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
+ if (vlan_pkt->eth_proto != rte_cpu_to_be_16(
+ RTE_ETHER_TYPE_ARP)) {
retval = -1;
goto test_end;
}
static struct rte_eth_conf default_pmd_conf = {
.rxmode = {
.mq_mode = ETH_MQ_RX_NONE,
- .max_rx_pkt_len = ETHER_MAX_LEN,
+ .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
.split_hdr_size = 0,
},
.txmode = {
RTE_VERIFY(slave->port_id != INVALID_PORT_ID);
rte_ether_addr_copy(&slave_mac_default, &addr);
- addr.addr_bytes[ETHER_ADDR_LEN - 1] = slave->port_id;
+ addr.addr_bytes[RTE_ETHER_ADDR_LEN - 1] = slave->port_id;
rte_eth_dev_mac_addr_remove(slave->port_id, &addr);
RTE_VERIFY(lacp_pkt != NULL);
hdr = rte_pktmbuf_mtod(lacp_pkt, struct rte_ether_hdr *);
- RTE_VERIFY(hdr->ether_type == rte_cpu_to_be_16(ETHER_TYPE_SLOW));
+ RTE_VERIFY(hdr->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_SLOW));
slow_hdr = rte_pktmbuf_mtod(lacp_pkt, struct slow_protocol_frame *);
RTE_VERIFY(slow_hdr->slow_protocol.subtype == SLOW_SUBTYPE_LACP);
/* look for LACP */
hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
- if (hdr->ether_type != rte_cpu_to_be_16(ETHER_TYPE_SLOW))
+ if (hdr->ether_type != rte_cpu_to_be_16(RTE_ETHER_TYPE_SLOW))
return 1;
slow_hdr = rte_pktmbuf_mtod(pkt, struct slow_protocol_frame *);
/* Change source address to partner address */
rte_ether_addr_copy(&parnter_mac_default, &slow_hdr->eth_hdr.s_addr);
- slow_hdr->eth_hdr.s_addr.addr_bytes[ETHER_ADDR_LEN - 1] = slave->port_id;
+ slow_hdr->eth_hdr.s_addr.addr_bytes[RTE_ETHER_ADDR_LEN - 1] =
+ slave->port_id;
lacp = (struct lacpdu *) &slow_hdr->slow_protocol;
/* Save last received state */
FOR_EACH_SLAVE(i, slave) {
void *pkt = NULL;
- dst_mac.addr_bytes[ETHER_ADDR_LEN - 1] = slave->port_id;
+ dst_mac.addr_bytes[RTE_ETHER_ADDR_LEN - 1] = slave->port_id;
retval = generate_and_put_packets(slave, &src_mac, &dst_mac, 1);
TEST_ASSERT_SUCCESS(retval, "Failed to generate test packet burst.");
- src_mac.addr_bytes[ETHER_ADDR_LEN - 1] = slave->port_id;
+ src_mac.addr_bytes[RTE_ETHER_ADDR_LEN - 1] = slave->port_id;
retval = generate_and_put_packets(slave, &src_mac, &bonded_mac, 1);
TEST_ASSERT_SUCCESS(retval, "Failed to generate test packet burst.");
/* Prepare burst */
for (pkts_cnt = 0; pkts_cnt < RTE_DIM(pkts); pkts_cnt++) {
- dst_mac.addr_bytes[ETHER_ADDR_LEN - 1] = pkts_cnt;
+ dst_mac.addr_bytes[RTE_ETHER_ADDR_LEN - 1] = pkts_cnt;
retval = generate_packets(&bonded_mac, &dst_mac, 1, &pkts[pkts_cnt]);
if (retval != 1)
/* Prepare burst. */
for (pkts_cnt = 0; pkts_cnt < RTE_DIM(pkts); pkts_cnt++) {
- dst_mac.addr_bytes[ETHER_ADDR_LEN - 1] = pkts_cnt;
+ dst_mac.addr_bytes[RTE_ETHER_ADDR_LEN - 1] = pkts_cnt;
retval = generate_packets(&bonded_mac, &dst_mac, 1, &pkts[pkts_cnt]);
if (retval != 1)
/* Init source address */
rte_ether_addr_copy(&parnter_mac_default, &marker_hdr->eth_hdr.s_addr);
- marker_hdr->eth_hdr.s_addr.addr_bytes[ETHER_ADDR_LEN-1] = slave->port_id;
+ marker_hdr->eth_hdr.s_addr.addr_bytes[RTE_ETHER_ADDR_LEN - 1] =
+ slave->port_id;
- marker_hdr->eth_hdr.ether_type = rte_cpu_to_be_16(ETHER_TYPE_SLOW);
+ marker_hdr->eth_hdr.ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_SLOW);
marker_hdr->marker.subtype = SLOW_SUBTYPE_MARKER;
marker_hdr->marker.version_number = 1;
int retval;
uint16_t nb_pkts;
uint8_t i, j;
- const uint16_t ethtype_slow_be = rte_be_to_cpu_16(ETHER_TYPE_SLOW);
+ const uint16_t ethtype_slow_be = rte_be_to_cpu_16(RTE_ETHER_TYPE_SLOW);
retval = initialize_bonded_device_with_slaves(TEST_MARKER_SLAVE_COUT,
0);
rte_ether_addr_copy(&slow_protocol_mac_addr, &dst_mac);
initialize_eth_header(&lacpdu.eth_hdr, &src_mac, &dst_mac,
- ETHER_TYPE_SLOW, 0, 0);
+ RTE_ETHER_TYPE_SLOW, 0, 0);
for (i = 0; i < SLAVE_COUNT; i++) {
lacp_tx_buf[i] = rte_pktmbuf_alloc(test_params.mbuf_pool);
rte_ether_addr_copy(&slow_protocol_mac_addr, &dst_mac);
initialize_eth_header(&lacpdu.eth_hdr, &src_mac, &dst_mac,
- ETHER_TYPE_SLOW, 0, 0);
+ RTE_ETHER_TYPE_SLOW, 0, 0);
for (i = 0; i < SLAVE_COUNT; i++) {
lacp_tx_buf[i] = rte_pktmbuf_alloc(test_params.mbuf_pool);
static struct rte_eth_conf default_pmd_conf = {
.rxmode = {
.mq_mode = ETH_MQ_RX_NONE,
- .max_rx_pkt_len = ETHER_MAX_LEN,
+ .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
.split_hdr_size = 0,
},
.txmode = {
static struct rte_eth_conf rss_pmd_conf = {
.rxmode = {
.mq_mode = ETH_MQ_RX_RSS,
- .max_rx_pkt_len = ETHER_MAX_LEN,
+ .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
.split_hdr_size = 0,
},
.txmode = {
static struct rte_eth_conf port_conf = {
.rxmode = {
.mq_mode = ETH_MQ_RX_NONE,
- .max_rx_pkt_len = ETHER_MAX_LEN,
+ .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
.split_hdr_size = 0,
},
.txmode = {
static void
print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
{
- char buf[ETHER_ADDR_FMT_SIZE];
- rte_ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);
+ char buf[RTE_ETHER_ADDR_FMT_SIZE];
+ rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
printf("%s%s", name, buf);
}
initialize_eth_header(&pkt_eth_hdr,
(struct rte_ether_addr *)src_mac,
- (struct rte_ether_addr *)dst_mac, ETHER_TYPE_IPv4, 0, 0);
+ (struct rte_ether_addr *)dst_mac, RTE_ETHER_TYPE_IPv4, 0, 0);
pktlen = initialize_ipv4_header(&pkt_ipv4_hdr,
IPV4_ADDR(10, 0, 0, 1),
vlan1->vlan_tci = rte_cpu_to_be_16(SUBPORT);
vlan2->vlan_tci = rte_cpu_to_be_16(PIPE);
- eth_hdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+ eth_hdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);
ip_hdr->dst_addr = IPv4(0,0,TC,QUEUE);
eth_dev->data->dev_link.link_speed = ETH_SPEED_NUM_10G;
eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
- eth_dev->data->mac_addrs = rte_zmalloc(name, ETHER_ADDR_LEN, 0);
+ eth_dev->data->mac_addrs = rte_zmalloc(name, RTE_ETHER_ADDR_LEN, 0);
if (eth_dev->data->mac_addrs == NULL)
goto err;
Interface name: kni#
force bind kernel thread to a core : NO
mbuf size: (rte_pktmbuf_data_room_size(pktmbuf_pool) - RTE_PKTMBUF_HEADROOM)
- mtu: (conf.mbuf_size - ETHER_HDR_LEN)
+ mtu: (conf.mbuf_size - RTE_ETHER_HDR_LEN)
KNI control path is not supported with the PMD, since there is no physical
backend device by default.
Matches an 802.1Q/ad VLAN tag.
The corresponding standard outer EtherType (TPID) values are
-``ETHER_TYPE_VLAN`` or ``ETHER_TYPE_QINQ``. It can be overridden by the
+``RTE_ETHER_TYPE_VLAN`` or ``RTE_ETHER_TYPE_QINQ``. It can be overridden by the
preceding pattern item.
- ``tci``: tag control information.
Matches an IEEE 802.1BR E-Tag header.
The corresponding standard outer EtherType (TPID) value is
-``ETHER_TYPE_ETAG``. It can be overridden by the preceding pattern item.
+``RTE_ETHER_TYPE_ETAG``. It can be overridden by the preceding pattern item.
- ``epcp_edei_in_ecid_b``: E-Tag control information (E-TCI), E-PCP (3b),
E-DEI (1b), ingress E-CID base (12b).
.. code-block:: c
static const struct rte_eth_conf port_conf_default = {
- .rxmode = { .max_rx_pkt_len = ETHER_MAX_LEN }
+ .rxmode = { .max_rx_pkt_len = RTE_ETHER_MAX_LEN }
};
For this example the ports are set up with 1 RX and 1 TX queue using the
rte_ether_addr_copy(dest_addr, ðdr->d_addr);
rte_ether_addr_copy(&ports_eth_addr[port], ðdr->s_addr);
- ethdr->ether_type = rte_be_to_cpu_16(ETHER_TYPE_IPv4);
+ ethdr->ether_type = rte_be_to_cpu_16(RTE_ETHER_TYPE_IPv4);
/* Put new packet into the output queue */
.. code-block:: c
static const struct rte_eth_conf port_conf_default = {
- .rxmode = { .max_rx_pkt_len = ETHER_MAX_LEN }
+ .rxmode = { .max_rx_pkt_len = RTE_ETHER_MAX_LEN }
};
For this example the ports are set up with 1 RX and 1 TX queue using the
typedef uint32_t gfp_t;
typedef uint32_t irqreturn_t;
+#define ETHER_ADDR_LEN 6
+
#define IRQ_HANDLED 0
#define request_irq qbman_request_irq
#define free_irq qbman_free_irq
dev_info->max_rx_queues = 1;
dev_info->max_tx_queues = 1;
- dev_info->min_mtu = ETHER_MIN_MTU;
+ dev_info->min_mtu = RTE_ETHER_MIN_MTU;
dev_info->max_mtu = ETH_AF_XDP_FRAME_SIZE - ETH_AF_XDP_DATA_HEADROOM;
dev_info->default_rxportconf.nb_queues = 1;
if (ioctl(sock, SIOCGIFHWADDR, &ifr))
goto error;
- rte_memcpy(eth_addr, ifr.ifr_hwaddr.sa_data, ETHER_ADDR_LEN);
+ rte_memcpy(eth_addr, ifr.ifr_hwaddr.sa_data, RTE_ETHER_ADDR_LEN);
close(sock);
return 0;
dev->dev_ops = &ark_eth_dev_ops;
- dev->data->mac_addrs = rte_zmalloc("ark", ETHER_ADDR_LEN, 0);
+ dev->data->mac_addrs = rte_zmalloc("ark", RTE_ETHER_ADDR_LEN, 0);
if (!dev->data->mac_addrs) {
PMD_DRV_LOG(ERR,
"Failed to allocated memory for storing mac address"
rte_eth_copy_pci_info(eth_dev, pci_dev);
- eth_dev->data->mac_addrs = rte_zmalloc(name, ETHER_ADDR_LEN, 0);
+ eth_dev->data->mac_addrs = rte_zmalloc(name,
+ RTE_ETHER_ADDR_LEN, 0);
if (!eth_dev->data->mac_addrs) {
PMD_DRV_LOG(ERR,
"Memory allocation for MAC failed!"
atl_disable_intr(hw);
/* Allocate memory for storing MAC addresses */
- eth_dev->data->mac_addrs = rte_zmalloc("atlantic", ETHER_ADDR_LEN, 0);
+ eth_dev->data->mac_addrs = rte_zmalloc("atlantic",
+ RTE_ETHER_ADDR_LEN, 0);
if (eth_dev->data->mac_addrs == NULL) {
PMD_INIT_LOG(ERR, "MAC Malloc failed");
return -ENOMEM;
ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
memset(&cfg->aq_macsec.txsc.mac, 0, sizeof(cfg->aq_macsec.txsc.mac));
- memcpy((uint8_t *)&cfg->aq_macsec.txsc.mac + 2, mac, ETHER_ADDR_LEN);
+ memcpy((uint8_t *)&cfg->aq_macsec.txsc.mac + 2, mac,
+ RTE_ETHER_ADDR_LEN);
return 0;
}
ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
memset(&cfg->aq_macsec.rxsc.mac, 0, sizeof(cfg->aq_macsec.rxsc.mac));
- memcpy((uint8_t *)&cfg->aq_macsec.rxsc.mac + 2, mac, ETHER_ADDR_LEN);
+ memcpy((uint8_t *)&cfg->aq_macsec.rxsc.mac + 2, mac,
+ RTE_ETHER_ADDR_LEN);
cfg->aq_macsec.rxsc.pi = pi;
return 0;
atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
{
struct rte_eth_dev_info dev_info;
- uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
atl_dev_info_get(dev, &dev_info);
- if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
+ if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
return -EINVAL;
/* update max frame size */
#define AVP_MAX_RX_BURST 64
#define AVP_MAX_TX_BURST 64
#define AVP_MAX_MAC_ADDRS 1
-#define AVP_MIN_RX_BUFSIZE ETHER_MIN_LEN
+#define AVP_MIN_RX_BUFSIZE RTE_ETHER_MIN_LEN
/*
avp->host_features = host_info->features;
rte_spinlock_init(&avp->lock);
memcpy(&avp->ethaddr.addr_bytes[0],
- host_info->ethaddr, ETHER_ADDR_LEN);
+ host_info->ethaddr, RTE_ETHER_ADDR_LEN);
/* adjust max values to not exceed our max */
avp->max_tx_queues =
RTE_MIN(host_info->max_tx_queues, RTE_AVP_MAX_QUEUES);
}
/* Allocate memory for storing MAC addresses */
- eth_dev->data->mac_addrs = rte_zmalloc("avp_ethdev", ETHER_ADDR_LEN, 0);
+ eth_dev->data->mac_addrs = rte_zmalloc("avp_ethdev",
+ RTE_ETHER_ADDR_LEN, 0);
if (eth_dev->data->mac_addrs == NULL) {
PMD_DRV_LOG(ERR, "Failed to allocate %d bytes needed to store MAC addresses\n",
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
return -ENOMEM;
}
/* Ethernet info */
char ethaddr[ETH_ALEN];
#else
- char ethaddr[ETHER_ADDR_LEN];
+ char ethaddr[RTE_ETHER_ADDR_LEN];
#endif
uint8_t mode; /**< device mode, i.e guest, host, trace */
static inline unsigned int axgbe_get_max_frame(struct axgbe_port *pdata)
{
- return pdata->eth_dev->data->mtu + ETHER_HDR_LEN +
- ETHER_CRC_LEN + VLAN_HLEN;
+ return pdata->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
+ RTE_ETHER_CRC_LEN + VLAN_HLEN;
}
/* query busy bit */
pdata->mac_addr.addr_bytes[5] = (mac_hi >> 8) & 0xff;
eth_dev->data->mac_addrs = rte_zmalloc("axgbe_mac_addr",
- ETHER_ADDR_LEN, 0);
+ RTE_ETHER_ADDR_LEN, 0);
if (!eth_dev->data->mac_addrs) {
PMD_INIT_LOG(ERR,
"Failed to alloc %u bytes needed to store MAC addr tbl",
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
return -ENOMEM;
}
#define AXGBE_TX_MAX_BUF_SIZE (0x3fff & ~(64 - 1))
#define AXGBE_RX_MAX_BUF_SIZE (0x3fff & ~(64 - 1))
-#define AXGBE_RX_MIN_BUF_SIZE (ETHER_MAX_LEN + VLAN_HLEN)
+#define AXGBE_RX_MIN_BUF_SIZE (RTE_ETHER_MAX_LEN + VLAN_HLEN)
#define AXGBE_MAX_MAC_ADDRS 1
#define AXGBE_RX_BUF_ALIGN 64
rxq->dma_tail_reg = (volatile uint32_t *)((uint8_t *)rxq->dma_regs +
DMA_CH_RDTR_LO);
if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
- rxq->crc_len = ETHER_CRC_LEN;
+ rxq->crc_len = RTE_ETHER_CRC_LEN;
else
rxq->crc_len = 0;
bnx2x_get_phy_info(sc);
} else {
/* Left mac of VF unfilled, PF should set it for VF */
- memset(sc->link_params.mac_addr, 0, ETHER_ADDR_LEN);
+ memset(sc->link_params.mac_addr, 0, RTE_ETHER_ADDR_LEN);
}
sc->wol = 0;
/* set the default MTU (changed via ifconfig) */
- sc->mtu = ETHER_MTU;
+ sc->mtu = RTE_ETHER_MTU;
bnx2x_set_modes_bitmap(sc);
typedef volatile int ecore_atomic_t;
-#define ETH_ALEN ETHER_ADDR_LEN /* 6 */
+#define ETH_ALEN RTE_ETHER_ADDR_LEN /* 6 */
#define ECORE_SWCID_SHIFT 17
#define ECORE_SWCID_MASK ((0x1 << ECORE_SWCID_SHIFT) - 1)
struct bnxt_irq *irq_tbl;
#define MAX_NUM_MAC_ADDR 32
- uint8_t mac_addr[ETHER_ADDR_LEN];
+ uint8_t mac_addr[RTE_ETHER_ADDR_LEN];
uint16_t hwrm_cmd_seq;
uint16_t kong_cmd_seq;
uint8_t tx_cosq_id;
uint16_t fw_fid;
- uint8_t dflt_mac_addr[ETHER_ADDR_LEN];
+ uint8_t dflt_mac_addr[RTE_ETHER_ADDR_LEN];
uint16_t max_rsscos_ctx;
uint16_t max_cp_rings;
uint16_t max_tx_rings;
/* disable uio/vfio intr/eventfd mapping */
rte_intr_disable(intr_handle);
- if (bp->eth_dev->data->mtu > ETHER_MTU) {
+ if (bp->eth_dev->data->mtu > RTE_ETHER_MTU) {
bp->eth_dev->data->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
bp->flags |= BNXT_FLAG_JUMBO;
/* Fast path specifics */
dev_info->min_rx_bufsize = 1;
- dev_info->max_rx_pktlen = BNXT_MAX_MTU + ETHER_HDR_LEN + ETHER_CRC_LEN
- + VLAN_TAG_SIZE * 2;
+ dev_info->max_rx_pktlen = BNXT_MAX_MTU + RTE_ETHER_HDR_LEN +
+ RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE * 2;
dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT;
if (bp->flags & BNXT_FLAG_PTP_SUPPORTED)
if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
eth_dev->data->mtu =
- eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
- ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE *
- BNXT_NUM_VLANS;
+ eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
+ RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN - VLAN_TAG_SIZE *
+ BNXT_NUM_VLANS;
bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu);
}
return 0;
bnxt_filter_info, next);
bnxt_hwrm_clear_l2_filter(bp, filter);
filter->mac_index = INVALID_MAC_INDEX;
- memset(&filter->l2_addr, 0, ETHER_ADDR_LEN);
+ memset(&filter->l2_addr, 0, RTE_ETHER_ADDR_LEN);
STAILQ_INSERT_TAIL(&bp->free_filter_list,
filter, next);
}
}
STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
filter->mac_index = index;
- memcpy(filter->l2_addr, mac_addr, ETHER_ADDR_LEN);
+ memcpy(filter->l2_addr, mac_addr, RTE_ETHER_ADDR_LEN);
return bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter);
}
new_filter->mac_index =
filter->mac_index;
memcpy(new_filter->l2_addr, filter->l2_addr,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
/* MAC only filter */
rc = bnxt_hwrm_set_l2_filter(bp,
vnic->fw_vnic_id,
/* Inherit MAC from the previous filter */
new_filter->mac_index = filter->mac_index;
memcpy(new_filter->l2_addr, filter->l2_addr,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
/* MAC + VLAN ID filter */
new_filter->l2_ivlan = vlan_id;
new_filter->l2_ivlan_mask = 0xF000;
rc = bnxt_hwrm_clear_l2_filter(bp, filter);
if (rc)
return rc;
- memcpy(filter->l2_addr, bp->mac_addr, ETHER_ADDR_LEN);
- memset(filter->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
+ memcpy(filter->l2_addr, bp->mac_addr, RTE_ETHER_ADDR_LEN);
+ memset(filter->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN);
filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
filter->enables |=
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
/* TODO Check for Duplicate mcast addresses */
vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI;
for (i = 0; i < nb_mc_addr; i++) {
- memcpy(vnic->mc_list + off, &mc_addr_list[i], ETHER_ADDR_LEN);
- off += ETHER_ADDR_LEN;
+ memcpy(vnic->mc_list + off, &mc_addr_list[i],
+ RTE_ETHER_ADDR_LEN);
+ off += RTE_ETHER_ADDR_LEN;
}
vnic->mc_addr_cnt = i;
bnxt_dev_info_get_op(eth_dev, &dev_info);
- if (new_mtu < ETHER_MIN_MTU || new_mtu > BNXT_MAX_MTU) {
+ if (new_mtu < RTE_ETHER_MIN_MTU || new_mtu > BNXT_MAX_MTU) {
PMD_DRV_LOG(ERR, "MTU requested must be within (%d, %d)\n",
- ETHER_MIN_MTU, BNXT_MAX_MTU);
+ RTE_ETHER_MIN_MTU, BNXT_MAX_MTU);
return -EINVAL;
}
- if (new_mtu > ETHER_MTU) {
+ if (new_mtu > RTE_ETHER_MTU) {
bp->flags |= BNXT_FLAG_JUMBO;
bp->eth_dev->data->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
}
eth_dev->data->dev_conf.rxmode.max_rx_pkt_len =
- new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + VLAN_TAG_SIZE * 2;
+ new_mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN +
+ VLAN_TAG_SIZE * 2;
eth_dev->data->mtu = new_mtu;
PMD_DRV_LOG(INFO, "New MTU is %d\n", eth_dev->data->mtu);
struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
uint16_t size = 0;
- vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
- ETHER_CRC_LEN + VLAN_TAG_SIZE * 2;
+ vnic->mru = bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
+ RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE * 2;
rc = bnxt_hwrm_vnic_cfg(bp, vnic);
if (rc)
break;
int match = 0;
*ret = 0;
- if (efilter->ether_type == ETHER_TYPE_IPv4 ||
- efilter->ether_type == ETHER_TYPE_IPv6) {
+ if (efilter->ether_type == RTE_ETHER_TYPE_IPv4 ||
+ efilter->ether_type == RTE_ETHER_TYPE_IPv6) {
PMD_DRV_LOG(ERR, "invalid ether_type(0x%04x) in"
" ethertype filter.", efilter->ether_type);
*ret = -EINVAL;
if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) {
STAILQ_FOREACH(mfilter, &vnic0->filter, next) {
if ((!memcmp(efilter->mac_addr.addr_bytes,
- mfilter->l2_addr, ETHER_ADDR_LEN) &&
+ mfilter->l2_addr, RTE_ETHER_ADDR_LEN) &&
mfilter->flags ==
HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP &&
mfilter->ethertype == efilter->ether_type)) {
} else {
STAILQ_FOREACH(mfilter, &vnic->filter, next)
if ((!memcmp(efilter->mac_addr.addr_bytes,
- mfilter->l2_addr, ETHER_ADDR_LEN) &&
+ mfilter->l2_addr, RTE_ETHER_ADDR_LEN) &&
mfilter->ethertype == efilter->ether_type &&
mfilter->flags ==
HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX)) {
}
bfilter->filter_type = HWRM_CFA_NTUPLE_FILTER;
memcpy(bfilter->l2_addr, efilter->mac_addr.addr_bytes,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
memcpy(bfilter->dst_macaddr, efilter->mac_addr.addr_bytes,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR;
bfilter->ethertype = efilter->ether_type;
bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
//filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
} else {
filter->dst_id = vnic->fw_vnic_id;
- for (i = 0; i < ETHER_ADDR_LEN; i++)
+ for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
if (filter->dst_macaddr[i] == 0x00)
filter1 = STAILQ_FIRST(&vnic0->filter);
else
mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
mf->l2_ivlan == nf->l2_ivlan &&
mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
- !memcmp(mf->l2_addr, nf->l2_addr, ETHER_ADDR_LEN) &&
+ !memcmp(mf->l2_addr, nf->l2_addr,
+ RTE_ETHER_ADDR_LEN) &&
!memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
- ETHER_ADDR_LEN) &&
+ RTE_ETHER_ADDR_LEN) &&
!memcmp(mf->src_macaddr, nf->src_macaddr,
- ETHER_ADDR_LEN) &&
+ RTE_ETHER_ADDR_LEN) &&
!memcmp(mf->dst_macaddr, nf->dst_macaddr,
- ETHER_ADDR_LEN) &&
+ RTE_ETHER_ADDR_LEN) &&
!memcmp(mf->src_ipaddr, nf->src_ipaddr,
sizeof(nf->src_ipaddr)) &&
!memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
goto error_free;
}
eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl",
- ETHER_ADDR_LEN * bp->max_l2_ctx, 0);
+ RTE_ETHER_ADDR_LEN * bp->max_l2_ctx, 0);
if (eth_dev->data->mac_addrs == NULL) {
PMD_DRV_LOG(ERR,
"Failed to alloc %u bytes needed to store MAC addr tbl",
- ETHER_ADDR_LEN * bp->max_l2_ctx);
+ RTE_ETHER_ADDR_LEN * bp->max_l2_ctx);
rc = -ENOMEM;
goto error_free;
}
- if (bnxt_check_zero_bytes(bp->dflt_mac_addr, ETHER_ADDR_LEN)) {
+ if (bnxt_check_zero_bytes(bp->dflt_mac_addr, RTE_ETHER_ADDR_LEN)) {
PMD_DRV_LOG(ERR,
"Invalid MAC addr %02X:%02X:%02X:%02X:%02X:%02X\n",
bp->dflt_mac_addr[0], bp->dflt_mac_addr[1],
}
/* Copy the permanent MAC from the qcap response address now. */
memcpy(bp->mac_addr, bp->dflt_mac_addr, sizeof(bp->mac_addr));
- memcpy(ð_dev->data->mac_addrs[0], bp->mac_addr, ETHER_ADDR_LEN);
+ memcpy(ð_dev->data->mac_addrs[0], bp->mac_addr, RTE_ETHER_ADDR_LEN);
if (bp->max_ring_grps < bp->rx_cp_nr_rings) {
/* 1 ring is for default completion ring */
filter->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK;
memcpy(filter->l2_addr, bp->eth_dev->data->mac_addrs->addr_bytes,
- ETHER_ADDR_LEN);
- memset(filter->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
+ memset(filter->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN);
return filter;
}
/* Filter Characteristics */
uint32_t flags;
uint32_t enables;
- uint8_t l2_addr[ETHER_ADDR_LEN];
- uint8_t l2_addr_mask[ETHER_ADDR_LEN];
+ uint8_t l2_addr[RTE_ETHER_ADDR_LEN];
+ uint8_t l2_addr_mask[RTE_ETHER_ADDR_LEN];
uint16_t l2_ovlan;
uint16_t l2_ovlan_mask;
uint16_t l2_ivlan;
uint16_t l2_ivlan_mask;
- uint8_t t_l2_addr[ETHER_ADDR_LEN];
- uint8_t t_l2_addr_mask[ETHER_ADDR_LEN];
+ uint8_t t_l2_addr[RTE_ETHER_ADDR_LEN];
+ uint8_t t_l2_addr_mask[RTE_ETHER_ADDR_LEN];
uint16_t t_l2_ovlan;
uint16_t t_l2_ovlan_mask;
uint16_t t_l2_ivlan;
f0 = STAILQ_FIRST(&vnic0->filter);
/* This flow has same DST MAC as the port/l2 filter. */
- if (memcmp(f0->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN) == 0)
+ if (memcmp(f0->l2_addr, nf->dst_macaddr, RTE_ETHER_ADDR_LEN) == 0)
return f0;
/* This flow needs DST MAC which is not same as port/l2 */
filter1->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
filter1->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
L2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK;
- memcpy(filter1->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN);
- memset(filter1->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
+ memcpy(filter1->l2_addr, nf->dst_macaddr, RTE_ETHER_ADDR_LEN);
+ memset(filter1->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN);
rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
filter1);
if (rc) {
mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
mf->l2_ivlan == nf->l2_ivlan &&
mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
- !memcmp(mf->l2_addr, nf->l2_addr, ETHER_ADDR_LEN) &&
+ !memcmp(mf->l2_addr, nf->l2_addr,
+ RTE_ETHER_ADDR_LEN) &&
!memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
- ETHER_ADDR_LEN) &&
+ RTE_ETHER_ADDR_LEN) &&
!memcmp(mf->src_macaddr, nf->src_macaddr,
- ETHER_ADDR_LEN) &&
+ RTE_ETHER_ADDR_LEN) &&
!memcmp(mf->dst_macaddr, nf->dst_macaddr,
- ETHER_ADDR_LEN) &&
+ RTE_ETHER_ADDR_LEN) &&
!memcmp(mf->src_ipaddr, nf->src_ipaddr,
sizeof(nf->src_ipaddr)) &&
!memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
if (enables &
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
memcpy(req.l2_addr, filter->l2_addr,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
if (enables &
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
memcpy(req.l2_addr_mask, filter->l2_addr_mask,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
if (enables &
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
req.l2_ovlan = filter->l2_ovlan;
}
bp->fw_fid = rte_le_to_cpu_32(resp->fid);
- memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
+ memcpy(bp->dflt_mac_addr, &resp->mac_address, RTE_ETHER_ADDR_LEN);
bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
- vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
- ETHER_CRC_LEN + VLAN_TAG_SIZE;
+ vnic->mru = bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
+ RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE;
HWRM_PREP(req, VNIC_ALLOC, BNXT_USE_CHIMP_MB);
if (vnic->func_default)
HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
- req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
- ETHER_CRC_LEN + VLAN_TAG_SIZE *
+ req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
+ RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE *
BNXT_NUM_VLANS);
req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
- req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
- ETHER_CRC_LEN + VLAN_TAG_SIZE *
+ req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
+ RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE *
BNXT_NUM_VLANS);
- req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
- ETHER_CRC_LEN + VLAN_TAG_SIZE *
+ req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
+ RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE *
BNXT_NUM_VLANS);
req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
(num_vfs + 1));
rte_eth_random_addr(cfg_req->dflt_mac_addr);
bp->pf.vf_info[vf].random_mac = true;
} else {
- memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN);
+ memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes,
+ RTE_ETHER_ADDR_LEN);
}
}
HWRM_CHECK_RESULT();
- memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
+ memcpy(mac->addr_bytes, resp->mac_address, RTE_ETHER_ADDR_LEN);
HWRM_UNLOCK();
if (enables &
HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR)
memcpy(req.src_macaddr, filter->src_macaddr,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
if (enables &
HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR)
memcpy(req.dst_macaddr, filter->dst_macaddr,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
if (enables &
HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID)
req.ovlan_vid = filter->l2_ovlan;
if (enables &
HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
memcpy(req.src_macaddr, filter->src_macaddr,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
//if (enables &
//HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR)
//memcpy(req.dst_macaddr, filter->dst_macaddr,
- //ETHER_ADDR_LEN);
+ //RTE_ETHER_ADDR_LEN);
if (enables &
HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
req.ethertype = rte_cpu_to_be_16(filter->ethertype);
bp->grp_info[queue_index].ag_fw_ring_id = ring->fw_ring_id;
B_RX_DB(rxr->ag_doorbell, rxr->ag_prod);
- rxq->rx_buf_use_size = BNXT_MAX_MTU + ETHER_HDR_LEN +
- ETHER_CRC_LEN + (2 * VLAN_TAG_SIZE);
+ rxq->rx_buf_use_size = BNXT_MAX_MTU + RTE_ETHER_HDR_LEN +
+ RTE_ETHER_CRC_LEN + (2 * VLAN_TAG_SIZE);
if (bp->eth_dev->data->rx_queue_state[queue_index] ==
RTE_ETH_QUEUE_STATE_STARTED) {
bp->grp_info[i].ag_fw_ring_id = ring->fw_ring_id;
B_RX_DB(rxr->ag_doorbell, rxr->ag_prod);
- rxq->rx_buf_use_size = BNXT_MAX_MTU + ETHER_HDR_LEN +
- ETHER_CRC_LEN + (2 * VLAN_TAG_SIZE);
+ rxq->rx_buf_use_size = BNXT_MAX_MTU + RTE_ETHER_HDR_LEN +
+ RTE_ETHER_CRC_LEN + (2 * VLAN_TAG_SIZE);
if (bnxt_init_one_rx_ring(rxq)) {
PMD_DRV_LOG(ERR, "bnxt_init_one_rx_ring failed!\n");
bnxt_rx_queue_release_op(rxq);
rxq->queue_id = queue_idx;
rxq->port_id = eth_dev->data->port_id;
if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC)
- rxq->crc_len = ETHER_CRC_LEN;
+ rxq->crc_len = RTE_ETHER_CRC_LEN;
else
rxq->crc_len = 0;
struct bnxt_rx_ring_info *rxr;
struct bnxt_ring *ring;
- rxq->rx_buf_use_size = BNXT_MAX_MTU + ETHER_HDR_LEN + ETHER_CRC_LEN +
- (2 * VLAN_TAG_SIZE);
+ rxq->rx_buf_use_size = BNXT_MAX_MTU + RTE_ETHER_HDR_LEN +
+ RTE_ETHER_CRC_LEN + (2 * VLAN_TAG_SIZE);
rxq->rx_buf_size = rxq->rx_buf_use_size + sizeof(struct rte_mbuf);
rxr = rte_zmalloc_socket("bnxt_rx_ring",
uint32_t entry_length = RTE_CACHE_LINE_ROUNDUP(
HW_HASH_INDEX_SIZE * sizeof(*vnic->rss_table) +
HW_HASH_KEY_SIZE +
- BNXT_MAX_MC_ADDRS * ETHER_ADDR_LEN);
+ BNXT_MAX_MC_ADDRS * RTE_ETHER_ADDR_LEN);
uint16_t max_vnics;
int i;
rte_iova_t mz_phys_addr;
filter->enables ==
(HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK) &&
- memcmp(addr, filter->l2_addr, ETHER_ADDR_LEN) == 0) {
+ memcmp(addr, filter->l2_addr, RTE_ETHER_ADDR_LEN) == 0) {
bnxt_hwrm_clear_l2_filter(bp, filter);
break;
}
filter->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
filter->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK;
- memcpy(filter->l2_addr, addr, ETHER_ADDR_LEN);
- memset(filter->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
+ memcpy(filter->l2_addr, addr, RTE_ETHER_ADDR_LEN);
+ memset(filter->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN);
/* Do not add a filter for the default MAC */
if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf_id, &dflt_mac) ||
- memcmp(filter->l2_addr, dflt_mac.addr_bytes, ETHER_ADDR_LEN))
+ memcmp(filter->l2_addr, dflt_mac.addr_bytes, RTE_ETHER_ADDR_LEN))
rc = bnxt_hwrm_set_l2_filter(bp, vnic.fw_vnic_id, filter);
exit:
/* Source and destination MAC */
rte_ether_addr_copy(&lacp_mac_addr, &hdr->eth_hdr.d_addr);
rte_eth_macaddr_get(slave_id, &hdr->eth_hdr.s_addr);
- hdr->eth_hdr.ether_type = rte_cpu_to_be_16(ETHER_TYPE_SLOW);
+ hdr->eth_hdr.ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_SLOW);
lacpdu = &hdr->lacpdu;
memset(lacpdu, 0, sizeof(*lacpdu));
rte_ether_addr_copy(&client_info->app_mac, ð_h->s_addr);
rte_ether_addr_copy(&client_info->cli_mac, ð_h->d_addr);
if (client_info->vlan_count > 0)
- eth_h->ether_type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
+ eth_h->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
else
- eth_h->ether_type = rte_cpu_to_be_16(ETHER_TYPE_ARP);
+ eth_h->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP);
arp_h = (struct rte_arp_hdr *)(
(char *)eth_h + sizeof(struct rte_ether_hdr)
arp_h->arp_data.arp_tip = client_info->cli_ip;
arp_h->arp_hardware = rte_cpu_to_be_16(RTE_ARP_HRD_ETHER);
- arp_h->arp_protocol = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
- arp_h->arp_hlen = ETHER_ADDR_LEN;
+ arp_h->arp_protocol = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);
+ arp_h->arp_hlen = RTE_ETHER_ADDR_LEN;
arp_h->arp_plen = sizeof(uint32_t);
arp_h->arp_opcode = rte_cpu_to_be_16(RTE_ARP_OP_REPLY);
{
size_t vlan_offset = 0;
- if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto ||
- rte_cpu_to_be_16(ETHER_TYPE_QINQ) == *proto) {
+ if (rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) == *proto ||
+ rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ) == *proto) {
struct rte_vlan_hdr *vlan_hdr =
(struct rte_vlan_hdr *)(eth_hdr + 1);
vlan_offset = sizeof(struct rte_vlan_hdr);
*proto = vlan_hdr->eth_proto;
- if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) {
+ if (rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) == *proto) {
vlan_hdr = vlan_hdr + 1;
*proto = vlan_hdr->eth_proto;
vlan_offset += sizeof(struct rte_vlan_hdr);
static inline uint8_t
is_lacp_packets(uint16_t ethertype, uint8_t subtype, struct rte_mbuf *mbuf)
{
- const uint16_t ether_type_slow_be = rte_be_to_cpu_16(ETHER_TYPE_SLOW);
+ const uint16_t ether_type_slow_be =
+ rte_be_to_cpu_16(RTE_ETHER_TYPE_SLOW);
return !((mbuf->ol_flags & PKT_RX_VLAN) ? mbuf->vlan_tci : 0) &&
(ethertype == ether_type_slow_be &&
static struct rte_flow_item_eth flow_item_eth_type_8023ad = {
.dst.addr_bytes = { 0 },
.src.addr_bytes = { 0 },
- .type = RTE_BE16(ETHER_TYPE_SLOW),
+ .type = RTE_BE16(RTE_ETHER_TYPE_SLOW),
};
static struct rte_flow_item_eth flow_item_eth_mask_type_8023ad = {
struct rte_ether_addr *bond_mac = bonded_eth_dev->data->mac_addrs;
struct rte_ether_hdr *hdr;
- const uint16_t ether_type_slow_be = rte_be_to_cpu_16(ETHER_TYPE_SLOW);
+ const uint16_t ether_type_slow_be =
+ rte_be_to_cpu_16(RTE_ETHER_TYPE_SLOW);
uint16_t num_rx_total = 0; /* Total number of received packets */
uint16_t slaves[RTE_MAX_ETHPORTS];
uint16_t slave_count, idx;
strlcpy(buf, info, 16);
#endif
- if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
+ if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4)) {
ipv4_h = (struct ipv4_hdr *)((char *)(eth_h + 1) + offset);
ipv4_addr_to_dot(ipv4_h->src_addr, src_ip, MaxIPv4String);
#ifdef RTE_LIBRTE_BOND_DEBUG_ALB
update_client_stats(ipv4_h->src_addr, port, burstnumber);
}
#ifdef RTE_LIBRTE_BOND_DEBUG_ALB
- else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
+ else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) {
arp_h = (struct rte_arp_hdr *)((char *)(eth_h + 1) + offset);
ipv4_addr_to_dot(arp_h->arp_data.arp_sip, src_ip, MaxIPv4String);
ipv4_addr_to_dot(arp_h->arp_data.arp_tip, dst_ip, MaxIPv4String);
ether_type = eth_h->ether_type;
offset = get_vlan_offset(eth_h, ðer_type);
- if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
+ if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) {
#if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
mode6_debug("RX ARP:", eth_h, bufs[i]->port, &burstnumberRX);
#endif
bond_mode_alb_arp_recv(eth_h, offset, internals);
}
#if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
- else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4))
+ else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4))
mode6_debug("RX IPv4:", eth_h, bufs[i]->port, &burstnumberRX);
#endif
}
vlan_offset = get_vlan_offset(eth_hdr, &proto);
- if (rte_cpu_to_be_16(ETHER_TYPE_IPv4) == proto) {
+ if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4) == proto) {
struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)
((char *)(eth_hdr + 1) + vlan_offset);
l3hash = ipv4_hash(ipv4_hdr);
- } else if (rte_cpu_to_be_16(ETHER_TYPE_IPv6) == proto) {
+ } else if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6) == proto) {
struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)
((char *)(eth_hdr + 1) + vlan_offset);
l3hash = ipv6_hash(ipv6_hdr);
l3hash = 0;
l4hash = 0;
- if (rte_cpu_to_be_16(ETHER_TYPE_IPv4) == proto) {
+ if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4) == proto) {
struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)
((char *)(eth_hdr + 1) + vlan_offset);
size_t ip_hdr_offset;
l4hash = HASH_L4_PORTS(udp_hdr);
}
}
- } else if (rte_cpu_to_be_16(ETHER_TYPE_IPv6) == proto) {
+ } else if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6) == proto) {
struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)
((char *)(eth_hdr + 1) + vlan_offset);
l3hash = ipv6_hash(ipv6_hdr);
ether_type = eth_h->ether_type;
offset = get_vlan_offset(eth_h, ðer_type);
- if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
+ if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) {
slave_idx = bond_mode_alb_arp_xmit(eth_h, offset, internals);
/* Change src mac in eth header */
dev_info->max_rx_pktlen = internals->candidate_max_rx_pktlen ?
internals->candidate_max_rx_pktlen :
- ETHER_MAX_JUMBO_FRAME_LEN;
+ RTE_ETHER_MAX_JUMBO_FRAME_LEN;
/* Max number of tx/rx queues that the bonded device can support is the
* minimum values of the bonded slaves, as all slaves must be capable
eth_dev->data->nb_tx_queues = (uint16_t)1;
/* Allocate memory for storing MAC addresses */
- eth_dev->data->mac_addrs = rte_zmalloc_socket(name, ETHER_ADDR_LEN *
+ eth_dev->data->mac_addrs = rte_zmalloc_socket(name, RTE_ETHER_ADDR_LEN *
BOND_MAX_MAC_ADDRS, 0, socket_id);
if (eth_dev->data->mac_addrs == NULL) {
RTE_BOND_LOG(ERR,
"Failed to allocate %u bytes needed to store MAC addresses",
- ETHER_ADDR_LEN * BOND_MAX_MAC_ADDRS);
+ RTE_ETHER_ADDR_LEN * BOND_MAX_MAC_ADDRS);
goto err;
}
}
vlan_filter_bmp_size =
- rte_bitmap_get_memory_footprint(ETHER_MAX_VLAN_ID + 1);
+ rte_bitmap_get_memory_footprint(RTE_ETHER_MAX_VLAN_ID + 1);
internals->vlan_filter_bmpmem = rte_malloc(name, vlan_filter_bmp_size,
RTE_CACHE_LINE_SIZE);
if (internals->vlan_filter_bmpmem == NULL) {
goto err;
}
- internals->vlan_filter_bmp = rte_bitmap_init(ETHER_MAX_VLAN_ID + 1,
+ internals->vlan_filter_bmp = rte_bitmap_init(RTE_ETHER_MAX_VLAN_ID + 1,
internals->vlan_filter_bmpmem, vlan_filter_bmp_size);
if (internals->vlan_filter_bmp == NULL) {
RTE_BOND_LOG(ERR,
#define CXGBE_DEFAULT_TX_DESC_SIZE 1024 /* Default TX ring size */
#define CXGBE_DEFAULT_RX_DESC_SIZE 1024 /* Default RX ring size */
-#define CXGBE_MIN_RX_BUFSIZE ETHER_MIN_MTU /* min buf size */
-#define CXGBE_MAX_RX_PKTLEN (9000 + ETHER_HDR_LEN + ETHER_CRC_LEN) /* max pkt */
+#define CXGBE_MIN_RX_BUFSIZE RTE_ETHER_MIN_MTU /* min buf size */
+#define CXGBE_MAX_RX_PKTLEN (9000 + RTE_ETHER_HDR_LEN + \
+ RTE_ETHER_CRC_LEN) /* max pkt */
/* Max poll time is 100 * 100msec = 10 sec */
#define CXGBE_LINK_STATUS_POLL_MS 100 /* 100ms */
#define PTR_ALIGN(p, a) ((typeof(p))CXGBE_ALIGN((unsigned long)(p), (a)))
#define VLAN_HLEN 4
+#define ETHER_ADDR_LEN 6
#define rmb() rte_rmb() /* dpdk rte provided rmb */
#define wmb() rte_wmb() /* dpdk rte provided wmb */
struct adapter *adapter = pi->adapter;
struct rte_eth_dev_info dev_info;
int err;
- uint16_t new_mtu = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ uint16_t new_mtu = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
cxgbe_dev_info_get(eth_dev, &dev_info);
- /* Must accommodate at least ETHER_MIN_MTU */
- if ((new_mtu < ETHER_MIN_MTU) || (new_mtu > dev_info.max_rx_pktlen))
+ /* Must accommodate at least RTE_ETHER_MIN_MTU */
+ if (new_mtu < RTE_ETHER_MIN_MTU || new_mtu > dev_info.max_rx_pktlen)
return -EINVAL;
/* set to jumbo mode if needed */
- if (new_mtu > ETHER_MAX_LEN)
+ if (new_mtu > RTE_ETHER_MAX_LEN)
eth_dev->data->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
else
cxgbe_dev_info_get(eth_dev, &dev_info);
- /* Must accommodate at least ETHER_MIN_MTU */
+ /* Must accommodate at least RTE_ETHER_MIN_MTU */
if ((pkt_len < dev_info.min_rx_bufsize) ||
(pkt_len > dev_info.max_rx_pktlen)) {
dev_err(adap, "%s: max pkt len must be > %d and <= %d\n",
rxq->fl.size = temp_nb_desc;
/* Set to jumbo mode if necessary */
- if (pkt_len > ETHER_MAX_LEN)
+ if (pkt_len > RTE_ETHER_MAX_LEN)
eth_dev->data->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
else
uint32_t eport:2; /* egress port to switch packet out */
uint32_t swapmac:1; /* swap SMAC/DMAC for loopback packet */
uint32_t newvlan:2; /* rewrite VLAN Tag */
- uint8_t dmac[ETHER_ADDR_LEN]; /* new destination MAC address */
+ uint8_t dmac[RTE_ETHER_ADDR_LEN]; /* new destination MAC address */
uint16_t vlan; /* VLAN Tag to insert */
/*
item, "ttl/tos are not supported");
fs->type = FILTER_TYPE_IPV4;
- CXGBE_FILL_FS(ETHER_TYPE_IPv4, 0xffff, ethtype);
+ CXGBE_FILL_FS(RTE_ETHER_TYPE_IPv4, 0xffff, ethtype);
if (!val)
return 0; /* ipv4 wild card */
"tc/flow/hop are not supported");
fs->type = FILTER_TYPE_IPV6;
- CXGBE_FILL_FS(ETHER_TYPE_IPv6, 0xffff, ethtype);
+ CXGBE_FILL_FS(RTE_ETHER_TYPE_IPv6, 0xffff, ethtype);
if (!val)
return 0; /* ipv6 wild card */
case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
pushvlan = (const struct rte_flow_action_of_push_vlan *)
a->conf;
- if (pushvlan->ethertype != ETHER_TYPE_VLAN)
+ if (pushvlan->ethertype != RTE_ETHER_TYPE_VLAN)
return rte_flow_error_set(e, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, a,
"only ethertype 0x8100 "
int ret;
mtu = pi->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
- (ETHER_HDR_LEN + ETHER_CRC_LEN);
+ (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN);
conf_offloads = pi->eth_dev->data->dev_conf.rxmode.offloads;
rte_eth_copy_pci_info(pi->eth_dev, adapter->pdev);
pi->eth_dev->data->mac_addrs = rte_zmalloc(name,
- ETHER_ADDR_LEN, 0);
+ RTE_ETHER_ADDR_LEN, 0);
if (!pi->eth_dev->data->mac_addrs) {
dev_err(adapter, "%s: Mem allocation failed for storing mac addr, aborting\n",
__func__);
rte_eth_copy_pci_info(pi->eth_dev, adapter->pdev);
pi->eth_dev->data->mac_addrs = rte_zmalloc(name,
- ETHER_ADDR_LEN, 0);
+ RTE_ETHER_ADDR_LEN, 0);
if (!pi->eth_dev->data->mac_addrs) {
dev_err(adapter, "%s: Mem allocation failed for storing mac addr, aborting\n",
__func__);
V_L2T_W_NOREPLY(!sync));
req->l2t_idx = cpu_to_be16(l2t_idx);
req->vlan = cpu_to_be16(e->vlan);
- rte_memcpy(req->dst_mac, e->dmac, ETHER_ADDR_LEN);
+ rte_memcpy(req->dst_mac, e->dmac, RTE_ETHER_ADDR_LEN);
if (loopback)
- memset(req->dst_mac, 0, ETHER_ADDR_LEN);
+ memset(req->dst_mac, 0, RTE_ETHER_ADDR_LEN);
t4_mgmt_tx(ctrlq, mbuf);
first_free = e;
} else {
if (e->state == L2T_STATE_SWITCHING) {
- if ((!memcmp(e->dmac, dmac, ETHER_ADDR_LEN)) &&
+ if ((!memcmp(e->dmac, dmac, RTE_ETHER_ADDR_LEN)) &&
e->vlan == vlan && e->lport == port)
goto exists;
}
e->state = L2T_STATE_SWITCHING;
e->vlan = vlan;
e->lport = port;
- rte_memcpy(e->dmac, eth_addr, ETHER_ADDR_LEN);
+ rte_memcpy(e->dmac, eth_addr, RTE_ETHER_ADDR_LEN);
rte_atomic32_set(&e->refcnt, 1);
ret = write_l2e(dev, e, 0, !L2T_LPBK, !L2T_ARPMISS);
if (ret < 0)
u16 idx; /* entry index within in-memory table */
u16 vlan; /* VLAN TCI (id: bits 0-11, prio: 13-15 */
u8 lport; /* destination port */
- u8 dmac[ETHER_ADDR_LEN]; /* destination MAC address */
+ u8 dmac[RTE_ETHER_ADDR_LEN]; /* destination MAC address */
rte_spinlock_t lock; /* entry lock */
rte_atomic32_t refcnt; /* entry reference count */
};
static inline bool
match_entry(struct mps_tcam_entry *entry, const u8 *eth_addr, const u8 *mask)
{
- if (!memcmp(eth_addr, entry->eth_addr, ETHER_ADDR_LEN) &&
- !memcmp(mask, entry->mask, ETHER_ADDR_LEN))
+ if (!memcmp(eth_addr, entry->eth_addr, RTE_ETHER_ADDR_LEN) &&
+ !memcmp(mask, entry->mask, RTE_ETHER_ADDR_LEN))
return true;
return false;
}
/* Fill in the new values */
entry = &mpstcam->entry[ret];
- memcpy(entry->eth_addr, eth_addr, ETHER_ADDR_LEN);
- memcpy(entry->mask, mask, ETHER_ADDR_LEN);
+ memcpy(entry->eth_addr, eth_addr, RTE_ETHER_ADDR_LEN);
+ memcpy(entry->mask, mask, RTE_ETHER_ADDR_LEN);
rte_atomic32_set(&entry->refcnt, 1);
entry->state = MPS_ENTRY_USED;
/* idx can now be different from what user provided */
entry = &mpstcam->entry[idx];
- memcpy(entry->eth_addr, addr, ETHER_ADDR_LEN);
+ memcpy(entry->eth_addr, addr, RTE_ETHER_ADDR_LEN);
/* NOTE: we have considered the case that idx returned by t4_change_mac
* will be different from the user provided value only if user
* provided value is -1
*/
static inline void reset_mpstcam_entry(struct mps_tcam_entry *entry)
{
- memset(entry->eth_addr, 0, ETHER_ADDR_LEN);
- memset(entry->mask, 0, ETHER_ADDR_LEN);
+ memset(entry->eth_addr, 0, RTE_ETHER_ADDR_LEN);
+ memset(entry->mask, 0, RTE_ETHER_ADDR_LEN);
rte_atomic32_clear(&entry->refcnt);
entry->state = MPS_ENTRY_UNUSED;
}
u16 idx;
/* add data here which uniquely defines an entry */
- u8 eth_addr[ETHER_ADDR_LEN];
- u8 mask[ETHER_ADDR_LEN];
+ u8 eth_addr[RTE_ETHER_ADDR_LEN];
+ u8 mask[RTE_ETHER_ADDR_LEN];
struct mpstcam_table *mpstcam; /* backptr */
rte_atomic32_t refcnt;
{
struct sge *s = &adapter->sge;
- return CXGBE_ALIGN(s->pktshift + ETHER_HDR_LEN + VLAN_HLEN + mtu,
+ return CXGBE_ALIGN(s->pktshift + RTE_ETHER_HDR_LEN + VLAN_HLEN + mtu,
s->fl_align);
}
* The chip min packet length is 10 octets but play safe and reject
* anything shorter than an Ethernet header.
*/
- if (unlikely(m->pkt_len < ETHER_HDR_LEN)) {
+ if (unlikely(m->pkt_len < RTE_ETHER_HDR_LEN)) {
out_free:
rte_pktmbuf_free(m);
return 0;
/* align the end of coalesce WR to a 512 byte boundary */
txq->q.coalesce.max = (8 - (txq->q.pidx & 7)) * 8;
- if (!((m->ol_flags & PKT_TX_TCP_SEG) || (m->pkt_len > ETHER_MAX_LEN))) {
+ if (!((m->ol_flags & PKT_TX_TCP_SEG) ||
+ m->pkt_len > RTE_ETHER_MAX_LEN)) {
if (should_tx_packet_coalesce(txq, mbuf, &cflits, adap)) {
if (unlikely(map_mbuf(mbuf, addr) < 0)) {
dev_warn(adap, "%s: mapping err for coalesce\n",
v6 = (m->ol_flags & PKT_TX_IPV6) != 0;
l3hdr_len = m->l3_len;
l4hdr_len = m->l4_len;
- eth_xtra_len = m->l2_len - ETHER_HDR_LEN;
+ eth_xtra_len = m->l2_len - RTE_ETHER_HDR_LEN;
len += sizeof(*lso);
wr->op_immdlen = htonl(V_FW_WR_OP(is_pf4(adap) ?
FW_ETH_TX_PKT_WR :
dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
{
struct dpaa_if *dpaa_intf = dev->data->dev_private;
- uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
+ uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN
+ VLAN_TAG_SIZE;
uint32_t buffsz = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
PMD_INIT_FUNC_TRACE();
- if (mtu < ETHER_MIN_MTU || frame_size > DPAA_MAX_RX_PKT_LEN)
+ if (mtu < RTE_ETHER_MIN_MTU || frame_size > DPAA_MAX_RX_PKT_LEN)
return -EINVAL;
/*
* Refuse mtu that requires the support of scattered packets
return -EINVAL;
}
- if (frame_size > ETHER_MAX_LEN)
+ if (frame_size > RTE_ETHER_MAX_LEN)
dev->data->dev_conf.rxmode.offloads &=
DEV_RX_OFFLOAD_JUMBO_FRAME;
else
fman_if_set_maxfrm(dpaa_intf->fif, max_len);
dev->data->mtu = max_len
- - ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE;
+ - RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN - VLAN_TAG_SIZE;
}
if (rx_offloads & DEV_RX_OFFLOAD_SCATTER) {
/* Allocate memory for storing MAC addresses */
eth_dev->data->mac_addrs = rte_zmalloc("mac_addr",
- ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER, 0);
+ RTE_ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER, 0);
if (eth_dev->data->mac_addrs == NULL) {
DPAA_PMD_ERR("Failed to allocate %d bytes needed to "
"store MAC addresses",
- ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER);
+ RTE_ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER);
ret = -ENOMEM;
goto free_tx;
}
fman_if_stats_reset(fman_intf);
/* Disable SG by default */
fman_if_set_sg(fman_intf, 0);
- fman_if_set_maxfrm(fman_intf, ETHER_MAX_LEN + VLAN_TAG_SIZE);
+ fman_if_set_maxfrm(fman_intf, RTE_ETHER_MAX_LEN + VLAN_TAG_SIZE);
return 0;
struct tcp_hdr *tcp_hdr = (struct tcp_hdr *)(l3_hdr +
mbuf->l3_len);
tcp_hdr->cksum = 0;
- if (eth_hdr->ether_type == htons(ETHER_TYPE_IPv4))
+ if (eth_hdr->ether_type == htons(RTE_ETHER_TYPE_IPv4))
tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr,
tcp_hdr);
- else /* assume ethertype == ETHER_TYPE_IPv6 */
+ else /* assume ethertype == RTE_ETHER_TYPE_IPv6 */
tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr,
tcp_hdr);
} else if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) ==
struct udp_hdr *udp_hdr = (struct udp_hdr *)(l3_hdr +
mbuf->l3_len);
udp_hdr->dgram_cksum = 0;
- if (eth_hdr->ether_type == htons(ETHER_TYPE_IPv4))
+ if (eth_hdr->ether_type == htons(RTE_ETHER_TYPE_IPv4))
udp_hdr->dgram_cksum = rte_ipv4_udptcp_cksum(ipv4_hdr,
udp_hdr);
- else /* assume ethertype == ETHER_TYPE_IPv6 */
+ else /* assume ethertype == RTE_ETHER_TYPE_IPv6 */
udp_hdr->dgram_cksum = rte_ipv6_udptcp_cksum(ipv6_hdr,
udp_hdr);
}
int ret;
struct dpaa2_dev_priv *priv = dev->data->dev_private;
struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
- uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
+ uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN
+ VLAN_TAG_SIZE;
PMD_INIT_FUNC_TRACE();
}
/* check that mtu is within the allowed range */
- if ((mtu < ETHER_MIN_MTU) || (frame_size > DPAA2_MAX_RX_PKT_LEN))
+ if (mtu < RTE_ETHER_MIN_MTU || frame_size > DPAA2_MAX_RX_PKT_LEN)
return -EINVAL;
- if (frame_size > ETHER_MAX_LEN)
+ if (frame_size > RTE_ETHER_MAX_LEN)
dev->data->dev_conf.rxmode.offloads &=
DEV_RX_OFFLOAD_JUMBO_FRAME;
else
* can add MAC entries when rte_eth_dev_mac_addr_add is called.
*/
eth_dev->data->mac_addrs = rte_zmalloc("dpni",
- ETHER_ADDR_LEN * attr.mac_filter_entries, 0);
+ RTE_ETHER_ADDR_LEN * attr.mac_filter_entries, 0);
if (eth_dev->data->mac_addrs == NULL) {
DPAA2_PMD_ERR(
"Failed to allocate %d bytes needed to store MAC addresses",
- ETHER_ADDR_LEN * attr.mac_filter_entries);
+ RTE_ETHER_ADDR_LEN * attr.mac_filter_entries);
ret = -ENOMEM;
goto init_err;
}
* The overhead from MTU to max frame size.
* Considering VLAN so a tag needs to be counted.
*/
-#define E1000_ETH_OVERHEAD (ETHER_HDR_LEN + ETHER_CRC_LEN + VLAN_TAG_SIZE)
+#define E1000_ETH_OVERHEAD (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + \
+ VLAN_TAG_SIZE)
/*
* Maximum number of Ring Descriptors.
*/
#define E1000_MAX_VF_MC_ENTRIES 30
struct e1000_vf_info {
- uint8_t vf_mac_addresses[ETHER_ADDR_LEN];
+ uint8_t vf_mac_addresses[RTE_ETHER_ADDR_LEN];
uint16_t vf_mc_hashes[E1000_MAX_VF_MC_ENTRIES];
uint16_t num_vf_mc_hashes;
uint16_t default_vf_vlan_id;
}
/* Allocate memory for storing MAC addresses */
- eth_dev->data->mac_addrs = rte_zmalloc("e1000", ETHER_ADDR_LEN *
+ eth_dev->data->mac_addrs = rte_zmalloc("e1000", RTE_ETHER_ADDR_LEN *
hw->mac.rar_entry_count, 0);
if (eth_dev->data->mac_addrs == NULL) {
PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to "
"store MAC addresses",
- ETHER_ADDR_LEN * hw->mac.rar_entry_count);
+ RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count);
return -ENOMEM;
}
return -EIO;
}
- E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN);
+ E1000_WRITE_REG(hw, E1000_VET, RTE_ETHER_TYPE_VLAN);
/* Configure for OS presence */
em_init_manageability(hw);
*/
rx_buf_size = em_get_rx_buffer_size(hw);
- hw->fc.high_water = rx_buf_size - PMD_ROUNDUP(ETHER_MAX_LEN * 2, 1024);
+ hw->fc.high_water = rx_buf_size -
+ PMD_ROUNDUP(RTE_ETHER_MAX_LEN * 2, 1024);
hw->fc.low_water = hw->fc.high_water - 1500;
if (hw->mac.type == e1000_80003es2lan)
return 0x1000;
/* Adapters that do not support jumbo frames */
case e1000_ich8lan:
- return ETHER_MAX_LEN;
+ return RTE_ETHER_MAX_LEN;
default:
return MAX_JUMBO_FRAME_SIZE;
}
PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
/* At least reserve one Ethernet frame for watermark */
- max_high_water = rx_buf_size - ETHER_MAX_LEN;
+ max_high_water = rx_buf_size - RTE_ETHER_MAX_LEN;
if ((fc_conf->high_water > max_high_water) ||
(fc_conf->high_water < fc_conf->low_water)) {
PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value");
static void
eth_em_rar_clear(struct rte_eth_dev *dev, uint32_t index)
{
- uint8_t addr[ETHER_ADDR_LEN];
+ uint8_t addr[RTE_ETHER_ADDR_LEN];
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
memset(addr, 0, sizeof(addr));
uint32_t rctl;
eth_em_infos_get(dev, &dev_info);
- frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + VLAN_TAG_SIZE;
+ frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN +
+ VLAN_TAG_SIZE;
/* check that mtu is within the allowed range */
- if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
+ if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
return -EINVAL;
/* refuse mtu that requires the support of scattered packets when this
rctl = E1000_READ_REG(hw, E1000_RCTL);
/* switch to jumbo mode if needed */
- if (frame_size > ETHER_MAX_LEN) {
+ if (frame_size > RTE_ETHER_MAX_LEN) {
dev->data->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
rctl |= E1000_RCTL_LPE;
*/
rxm->next = NULL;
if (unlikely(rxq->crc_len > 0)) {
- first_seg->pkt_len -= ETHER_CRC_LEN;
- if (data_len <= ETHER_CRC_LEN) {
+ first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
+ if (data_len <= RTE_ETHER_CRC_LEN) {
rte_pktmbuf_free_seg(rxm);
first_seg->nb_segs--;
last_seg->data_len = (uint16_t)
(last_seg->data_len -
- (ETHER_CRC_LEN - data_len));
+ (RTE_ETHER_CRC_LEN - data_len));
last_seg->next = NULL;
} else
- rxm->data_len =
- (uint16_t) (data_len - ETHER_CRC_LEN);
+ rxm->data_len = (uint16_t)
+ (data_len - RTE_ETHER_CRC_LEN);
}
/*
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_KEEP_CRC |
DEV_RX_OFFLOAD_SCATTER;
- if (max_rx_pktlen > ETHER_MAX_LEN)
+ if (max_rx_pktlen > RTE_ETHER_MAX_LEN)
rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
return rx_offload_capa;
rxq->queue_id = queue_idx;
rxq->port_id = dev->data->port_id;
if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
- rxq->crc_len = ETHER_CRC_LEN;
+ rxq->crc_len = RTE_ETHER_CRC_LEN;
else
rxq->crc_len = 0;
* call to configure
*/
if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
- rxq->crc_len = ETHER_CRC_LEN;
+ rxq->crc_len = RTE_ETHER_CRC_LEN;
else
rxq->crc_len = 0;
* one buffer.
*/
if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME ||
- rctl_bsize < ETHER_MAX_LEN) {
+ rctl_bsize < RTE_ETHER_MAX_LEN) {
if (!dev->data->scattered_rx)
PMD_INIT_LOG(DEBUG, "forcing scatter mode");
dev->rx_pkt_burst =
/* Allocate memory for storing MAC addresses */
eth_dev->data->mac_addrs = rte_zmalloc("e1000",
- ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0);
+ RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0);
if (eth_dev->data->mac_addrs == NULL) {
PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to "
"store MAC addresses",
- ETHER_ADDR_LEN * hw->mac.rar_entry_count);
+ RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count);
error = -ENOMEM;
goto err_late;
}
diag = hw->mac.ops.reset_hw(hw);
/* Allocate memory for storing MAC addresses */
- eth_dev->data->mac_addrs = rte_zmalloc("igbvf", ETHER_ADDR_LEN *
+ eth_dev->data->mac_addrs = rte_zmalloc("igbvf", RTE_ETHER_ADDR_LEN *
hw->mac.rar_entry_count, 0);
if (eth_dev->data->mac_addrs == NULL) {
PMD_INIT_LOG(ERR,
"Failed to allocate %d bytes needed to store MAC "
"addresses",
- ETHER_ADDR_LEN * hw->mac.rar_entry_count);
+ RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count);
return -ENOMEM;
}
}
adapter->stopped = 0;
- E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN << 16 | ETHER_TYPE_VLAN);
+ E1000_WRITE_REG(hw, E1000_VET,
+ RTE_ETHER_TYPE_VLAN << 16 | RTE_ETHER_TYPE_VLAN);
ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
/* Set PF Reset Done bit so PF/VF Mail Ops can work */
*/
rx_buf_size = igb_get_rx_buffer_size(hw);
- hw->fc.high_water = rx_buf_size - (ETHER_MAX_LEN * 2);
+ hw->fc.high_water = rx_buf_size - (RTE_ETHER_MAX_LEN * 2);
hw->fc.low_water = hw->fc.high_water - 1500;
hw->fc.pause_time = IGB_FC_PAUSE_TIME;
hw->fc.send_xon = 1;
if (diag < 0)
return diag;
- E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN << 16 | ETHER_TYPE_VLAN);
+ E1000_WRITE_REG(hw, E1000_VET,
+ RTE_ETHER_TYPE_VLAN << 16 | RTE_ETHER_TYPE_VLAN);
e1000_get_phy_info(hw);
e1000_check_for_link(hw);
/* Workaround CRC bytes included in size, take away 4 bytes/packet */
stats->gorc += E1000_READ_REG(hw, E1000_GORCL);
stats->gorc += ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32);
- stats->gorc -= (stats->gprc - old_gprc) * ETHER_CRC_LEN;
+ stats->gorc -= (stats->gprc - old_gprc) * RTE_ETHER_CRC_LEN;
stats->gotc += E1000_READ_REG(hw, E1000_GOTCL);
stats->gotc += ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32);
- stats->gotc -= (stats->gptc - old_gptc) * ETHER_CRC_LEN;
+ stats->gotc -= (stats->gptc - old_gptc) * RTE_ETHER_CRC_LEN;
stats->rnbc += E1000_READ_REG(hw, E1000_RNBC);
stats->ruc += E1000_READ_REG(hw, E1000_RUC);
stats->tor += E1000_READ_REG(hw, E1000_TORL);
stats->tor += ((uint64_t)E1000_READ_REG(hw, E1000_TORH) << 32);
- stats->tor -= (stats->tpr - old_tpr) * ETHER_CRC_LEN;
+ stats->tor -= (stats->tpr - old_tpr) * RTE_ETHER_CRC_LEN;
stats->tot += E1000_READ_REG(hw, E1000_TOTL);
stats->tot += ((uint64_t)E1000_READ_REG(hw, E1000_TOTH) << 32);
- stats->tot -= (stats->tpt - old_tpt) * ETHER_CRC_LEN;
+ stats->tot -= (stats->tpt - old_tpt) * RTE_ETHER_CRC_LEN;
stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64);
stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127);
stats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC);
stats->hgorc += E1000_READ_REG(hw, E1000_HGORCL);
stats->hgorc += ((uint64_t)E1000_READ_REG(hw, E1000_HGORCH) << 32);
- stats->hgorc -= (stats->rpthc - old_rpthc) * ETHER_CRC_LEN;
+ stats->hgorc -= (stats->rpthc - old_rpthc) * RTE_ETHER_CRC_LEN;
stats->hgotc += E1000_READ_REG(hw, E1000_HGOTCL);
stats->hgotc += ((uint64_t)E1000_READ_REG(hw, E1000_HGOTCH) << 32);
- stats->hgotc -= (stats->hgptc - old_hgptc) * ETHER_CRC_LEN;
+ stats->hgotc -= (stats->hgptc - old_hgptc) * RTE_ETHER_CRC_LEN;
stats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS);
stats->scvpc += E1000_READ_REG(hw, E1000_SCVPC);
stats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC);
ETH_LINK_SPEED_1G;
dev_info->max_mtu = dev_info->max_rx_pktlen - E1000_ETH_OVERHEAD;
- dev_info->min_mtu = ETHER_MIN_MTU;
+ dev_info->min_mtu = RTE_ETHER_MIN_MTU;
}
PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
/* At least reserve one Ethernet frame for watermark */
- max_high_water = rx_buf_size - ETHER_MAX_LEN;
+ max_high_water = rx_buf_size - RTE_ETHER_MAX_LEN;
if ((fc_conf->high_water > max_high_water) ||
(fc_conf->high_water < fc_conf->low_water)) {
PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value");
static void
eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index)
{
- uint8_t addr[ETHER_ADDR_LEN];
+ uint8_t addr[RTE_ETHER_ADDR_LEN];
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
memset(addr, 0, sizeof(addr));
eth_igb_infos_get(dev, &dev_info);
/* check that mtu is within the allowed range */
- if ((mtu < ETHER_MIN_MTU) ||
- (frame_size > dev_info.max_rx_pktlen))
+ if (mtu < RTE_ETHER_MIN_MTU ||
+ frame_size > dev_info.max_rx_pktlen)
return -EINVAL;
/* refuse mtu that requires the support of scattered packets when this
rctl = E1000_READ_REG(hw, E1000_RCTL);
/* switch to jumbo mode if needed */
- if (frame_size > ETHER_MAX_LEN) {
+ if (frame_size > RTE_ETHER_MAX_LEN) {
dev->data->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
rctl |= E1000_RCTL_LPE;
uint32_t etqf = 0;
int ret;
- if (filter->ether_type == ETHER_TYPE_IPv4 ||
- filter->ether_type == ETHER_TYPE_IPv6) {
+ if (filter->ether_type == RTE_ETHER_TYPE_IPv4 ||
+ filter->ether_type == RTE_ETHER_TYPE_IPv6) {
PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
" ethertype filter.", filter->ether_type);
return -EINVAL;
/* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
E1000_WRITE_REG(hw, E1000_ETQF(E1000_ETQF_FILTER_1588),
- (ETHER_TYPE_1588 |
+ (RTE_ETHER_TYPE_1588 |
E1000_ETQF_FILTER_ENABLE |
E1000_ETQF_1588));
}
}
- if (filter->ether_type == ETHER_TYPE_IPv4 ||
- filter->ether_type == ETHER_TYPE_IPv6) {
+ if (filter->ether_type == RTE_ETHER_TYPE_IPv4 ||
+ filter->ether_type == RTE_ETHER_TYPE_IPv6) {
memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
static inline
int igb_vf_perm_addr_gen(struct rte_eth_dev *dev, uint16_t vf_num)
{
- unsigned char vf_mac_addr[ETHER_ADDR_LEN];
+ unsigned char vf_mac_addr[RTE_ETHER_ADDR_LEN];
struct e1000_vf_info *vfinfo =
*E1000_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
uint16_t vfn;
rte_eth_random_addr(vf_mac_addr);
/* keep the random address as default */
memcpy(vfinfo[vfn].vf_mac_addresses, vf_mac_addr,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
}
return 0;
/* reply to reset with ack and vf mac address */
msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
- rte_memcpy(new_mac, vf_mac, ETHER_ADDR_LEN);
+ rte_memcpy(new_mac, vf_mac, RTE_ETHER_ADDR_LEN);
e1000_write_mbx(hw, msgbuf, 3, vf);
return 0;
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint16_t rlpml = msgbuf[1] & E1000_VMOLR_RLPML_MASK;
- uint32_t max_frame = rlpml + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ uint32_t max_frame = rlpml + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
uint32_t vmolr;
- if ((max_frame < ETHER_MIN_LEN) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN))
+ if (max_frame < RTE_ETHER_MIN_LEN ||
+ max_frame > RTE_ETHER_MAX_JUMBO_FRAME_LEN)
return -1;
vmolr = E1000_READ_REG(hw, E1000_VMOLR(vf));
*/
rxm->next = NULL;
if (unlikely(rxq->crc_len > 0)) {
- first_seg->pkt_len -= ETHER_CRC_LEN;
- if (data_len <= ETHER_CRC_LEN) {
+ first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
+ if (data_len <= RTE_ETHER_CRC_LEN) {
rte_pktmbuf_free_seg(rxm);
first_seg->nb_segs--;
last_seg->data_len = (uint16_t)
(last_seg->data_len -
- (ETHER_CRC_LEN - data_len));
+ (RTE_ETHER_CRC_LEN - data_len));
last_seg->next = NULL;
} else
- rxm->data_len =
- (uint16_t) (data_len - ETHER_CRC_LEN);
+ rxm->data_len = (uint16_t)
+ (data_len - RTE_ETHER_CRC_LEN);
}
/*
queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
rxq->port_id = dev->data->port_id;
if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
- rxq->crc_len = ETHER_CRC_LEN;
+ rxq->crc_len = RTE_ETHER_CRC_LEN;
else
rxq->crc_len = 0;
* call to configure
*/
if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
- rxq->crc_len = ETHER_CRC_LEN;
+ rxq->crc_len = RTE_ETHER_CRC_LEN;
else
rxq->crc_len = 0;
int id_number;
char name[ENA_NAME_MAX_LEN];
- u8 mac_addr[ETHER_ADDR_LEN];
+ u8 mac_addr[RTE_ETHER_ADDR_LEN];
void *regs;
void *dev_mem_base;
};
struct enetc_eth_mac_info {
- uint8_t addr[ETHER_ADDR_LEN];
- uint8_t perm_addr[ETHER_ADDR_LEN];
+ uint8_t addr[RTE_ETHER_ADDR_LEN];
+ uint8_t perm_addr[RTE_ETHER_ADDR_LEN];
uint8_t get_link_status;
};
}
rx_ring->crc_len = (uint8_t)((rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) ?
- ETHER_CRC_LEN : 0);
+ RTE_ETHER_CRC_LEN : 0);
return 0;
fail:
struct enetc_eth_hw *hw =
ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct enetc_hw *enetc_hw = &hw->hw;
- uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
/* check that mtu is within the allowed range */
if (mtu < ENETC_MAC_MINFRM_SIZE || frame_size > ENETC_MAC_MAXFRM_SIZE)
return -EINVAL;
}
- if (frame_size > ETHER_MAX_LEN)
+ if (frame_size > RTE_ETHER_MAX_LEN)
dev->data->dev_conf.rxmode.offloads &=
DEV_RX_OFFLOAD_JUMBO_FRAME;
else
ENETC_MAC_MAXFRM_SIZE);
enetc_port_wr(enetc_hw, ENETC_PTXMBAR,
2 * ENETC_MAC_MAXFRM_SIZE);
- dev->data->mtu = ETHER_MAX_LEN - ETHER_HDR_LEN - ETHER_CRC_LEN;
+ dev->data->mtu = RTE_ETHER_MAX_LEN - RTE_ETHER_HDR_LEN -
+ RTE_ETHER_CRC_LEN;
}
if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
}
/* Allocate memory for storing MAC addresses */
- eth_dev->data->mac_addrs = rte_zmalloc("enetc_eth", ETHER_ADDR_LEN, 0);
+ eth_dev->data->mac_addrs = rte_zmalloc("enetc_eth",
+ RTE_ETHER_ADDR_LEN, 0);
if (!eth_dev->data->mac_addrs) {
ENETC_PMD_ERR("Failed to allocate %d bytes needed to "
"store MAC addresses",
- ETHER_ADDR_LEN * 1);
+ RTE_ETHER_ADDR_LEN * 1);
error = -ENOMEM;
return -1;
}
/* Set MTU */
enetc_port_wr(&hw->hw, ENETC_PM0_MAXFRM,
- ENETC_SET_MAXFRM(ETHER_MAX_LEN));
- eth_dev->data->mtu = ETHER_MAX_LEN - ETHER_HDR_LEN - ETHER_CRC_LEN;
+ ENETC_SET_MAXFRM(RTE_ETHER_MAX_LEN));
+ eth_dev->data->mtu = RTE_ETHER_MAX_LEN - RTE_ETHER_HDR_LEN -
+ RTE_ETHER_CRC_LEN;
ENETC_PMD_DEBUG("port_id %d vendorID=0x%x deviceID=0x%x",
eth_dev->data->port_id, pci_dev->id.vendor_id,
static inline uint32_t enic_mtu_to_max_rx_pktlen(uint32_t mtu)
{
/* ethdev max size includes eth whereas NIC MTU does not */
- return mtu + ETHER_HDR_LEN;
+ return mtu + RTE_ETHER_HDR_LEN;
}
/* Get the CQ index from a Start of Packet(SOP) RQ index */
static void debug_log_add_del_addr(struct rte_ether_addr *addr, bool add)
{
- char mac_str[ETHER_ADDR_FMT_SIZE];
+ char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
- rte_ether_format_addr(mac_str, ETHER_ADDR_FMT_SIZE, addr);
+ rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, addr);
PMD_INIT_LOG(DEBUG, " %s address %s\n",
add ? "add" : "remove", mac_str);
}
uint32_t nb_mc_addr)
{
struct enic *enic = pmd_priv(eth_dev);
- char mac_str[ETHER_ADDR_FMT_SIZE];
+ char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
struct rte_ether_addr *addr;
uint32_t i, j;
int ret;
if (!rte_is_multicast_ether_addr(addr) ||
rte_is_broadcast_ether_addr(addr)) {
rte_ether_format_addr(mac_str,
- ETHER_ADDR_FMT_SIZE, addr);
+ RTE_ETHER_ADDR_FMT_SIZE, addr);
PMD_INIT_LOG(ERR, " invalid multicast address %s\n",
mac_str);
return -EINVAL;
arg->l2_proto_off = *off + offsetof(struct rte_vlan_hdr, eth_proto);
return copy_inner_common(&arg->filter->u.generic_1, off,
arg->item->spec, mask, sizeof(struct rte_vlan_hdr),
- eth_type_off, rte_cpu_to_be_16(ETHER_TYPE_VLAN), 2);
+ eth_type_off, rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN), 2);
}
static int
arg->l3_proto_off = *off + offsetof(struct ipv4_hdr, next_proto_id);
return copy_inner_common(&arg->filter->u.generic_1, off,
arg->item->spec, mask, sizeof(struct ipv4_hdr),
- arg->l2_proto_off, rte_cpu_to_be_16(ETHER_TYPE_IPv4), 2);
+ arg->l2_proto_off, rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4), 2);
}
static int
arg->l3_proto_off = *off + offsetof(struct ipv6_hdr, proto);
return copy_inner_common(&arg->filter->u.generic_1, off,
arg->item->spec, mask, sizeof(struct ipv6_hdr),
- arg->l2_proto_off, rte_cpu_to_be_16(ETHER_TYPE_IPv6), 2);
+ arg->l2_proto_off, rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6), 2);
}
static int
mask = &rte_flow_item_eth_mask;
memcpy(enic_spec.d_addr.addr_bytes, spec->dst.addr_bytes,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
memcpy(enic_spec.s_addr.addr_bytes, spec->src.addr_bytes,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
memcpy(enic_mask.d_addr.addr_bytes, mask->dst.addr_bytes,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
memcpy(enic_mask.s_addr.addr_bytes, mask->src.addr_bytes,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
enic_spec.ether_type = spec->type;
enic_mask.ether_type = mask->type;
* and will be 0 for legacy firmware and VICs
*/
if (c->max_pkt_size > ENIC_DEFAULT_RX_MAX_PKT_SIZE)
- enic->max_mtu = c->max_pkt_size - ETHER_HDR_LEN;
+ enic->max_mtu = c->max_pkt_size - RTE_ETHER_HDR_LEN;
else
- enic->max_mtu = ENIC_DEFAULT_RX_MAX_PKT_SIZE - ETHER_HDR_LEN;
+ enic->max_mtu = ENIC_DEFAULT_RX_MAX_PKT_SIZE -
+ RTE_ETHER_HDR_LEN;
if (c->mtu == 0)
c->mtu = 1500;
&ea->addr_bytes[0], &ea->addr_bytes[1],
&ea->addr_bytes[2], &ea->addr_bytes[3],
&ea->addr_bytes[4], &ea->addr_bytes[5]);
- return ret != ETHER_ADDR_LEN;
+ return ret != RTE_ETHER_ADDR_LEN;
}
int
ret = rte_eth_dev_mac_addr_add(PORT_ID(sdev), ea,
PRIV(dev)->mac_addr_pool[i]);
if (ret) {
- char ea_fmt[ETHER_ADDR_FMT_SIZE];
+ char ea_fmt[RTE_ETHER_ADDR_FMT_SIZE];
- rte_ether_format_addr(ea_fmt, ETHER_ADDR_FMT_SIZE, ea);
+ rte_ether_format_addr(ea_fmt,
+ RTE_ETHER_ADDR_FMT_SIZE, ea);
ERROR("Adding MAC address %s failed", ea_fmt);
return ret;
}
/* 8B aligned, and max Ethernet frame would not cross a 4KB boundary? */
if (RTE_ALIGN(addr, 8) == addr) {
boundary1 = RTE_ALIGN_FLOOR(addr, 4096);
- boundary2 = RTE_ALIGN_FLOOR(addr + ETHER_MAX_VLAN_FRAME_LEN,
+ boundary2 = RTE_ALIGN_FLOOR(addr + RTE_ETHER_MAX_VLAN_FRAME_LEN,
4096);
if (boundary1 == boundary2)
return 1;
/* reset MAC/VLAN as it's based on VMDQ or PF main VSI */
memset(dev->data->mac_addrs, 0,
- ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM);
+ RTE_ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM);
rte_ether_addr_copy((const struct rte_ether_addr *)hw->mac.addr,
&dev->data->mac_addrs[0]);
memset(macvlan, 0, sizeof(*macvlan));
/* Initialize MAC address(es) */
dev->data->mac_addrs = rte_zmalloc("fm10k",
- ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM, 0);
+ RTE_ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM, 0);
if (dev->data->mac_addrs == NULL) {
PMD_INIT_LOG(ERR, "Cannot allocate memory for MAC addresses");
return -ENOMEM;
/* Set the global registers with default ether type value */
if (!pf->support_multi_driver) {
ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
- ETHER_TYPE_VLAN);
+ RTE_ETHER_TYPE_VLAN);
if (ret != I40E_SUCCESS) {
PMD_INIT_LOG(ERR,
"Failed to set the default outer "
}
if (!vsi->max_macaddrs)
- len = ETHER_ADDR_LEN;
+ len = RTE_ETHER_ADDR_LEN;
else
- len = ETHER_ADDR_LEN * vsi->max_macaddrs;
+ len = RTE_ETHER_ADDR_LEN * vsi->max_macaddrs;
/* Should be after VSI initialized */
dev->data->mac_addrs = rte_zmalloc("i40e", len, 0);
&nes->rx_broadcast);
/* exclude CRC bytes */
nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
- nes->rx_broadcast) * ETHER_CRC_LEN;
+ nes->rx_broadcast) * RTE_ETHER_CRC_LEN;
i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded,
&oes->rx_discards, &nes->rx_discards);
/* exclude CRC size */
pf->internal_stats.rx_bytes -= (pf->internal_stats.rx_unicast +
pf->internal_stats.rx_multicast +
- pf->internal_stats.rx_broadcast) * ETHER_CRC_LEN;
+ pf->internal_stats.rx_broadcast) * RTE_ETHER_CRC_LEN;
/* Get statistics of struct i40e_eth_stats */
i40e_stat_update_48(hw, I40E_GLPRT_GORCH(hw->port),
pf->offset_loaded, &os->eth.rx_broadcast,
&ns->eth.rx_broadcast);
/* Workaround: CRC size should not be included in byte statistics,
- * so subtract ETHER_CRC_LEN from the byte counter for each rx packet.
+ * so subtract RTE_ETHER_CRC_LEN from the byte counter for each rx
+ * packet.
*/
ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
- ns->eth.rx_broadcast) * ETHER_CRC_LEN;
+ ns->eth.rx_broadcast) * RTE_ETHER_CRC_LEN;
/* exclude internal rx bytes
* Workaround: it is possible I40E_GLV_GORCH[H/L] is updated before
pf->offset_loaded, &os->eth.tx_broadcast,
&ns->eth.tx_broadcast);
ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
- ns->eth.tx_broadcast) * ETHER_CRC_LEN;
+ ns->eth.tx_broadcast) * RTE_ETHER_CRC_LEN;
/* exclude internal tx bytes
* Workaround: it is possible I40E_GLV_GOTCH[H/L] is updated before
dev_info->max_mac_addrs = vsi->max_macaddrs;
dev_info->max_vfs = pci_dev->max_vfs;
dev_info->max_mtu = dev_info->max_rx_pktlen - I40E_ETH_OVERHEAD;
- dev_info->min_mtu = ETHER_MIN_MTU;
+ dev_info->min_mtu = RTE_ETHER_MIN_MTU;
dev_info->rx_queue_offload_capa = 0;
dev_info->rx_offload_capa =
DEV_RX_OFFLOAD_VLAN_STRIP |
i40e_vsi_config_double_vlan(vsi, TRUE);
/* Set global registers with default ethertype. */
i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
- ETHER_TYPE_VLAN);
+ RTE_ETHER_TYPE_VLAN);
i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER,
- ETHER_TYPE_VLAN);
+ RTE_ETHER_TYPE_VLAN);
}
else
i40e_vsi_config_double_vlan(vsi, FALSE);
return -EINVAL;
}
- rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN);
+ rte_memcpy(&mac_filter.mac_addr, mac_addr, RTE_ETHER_ADDR_LEN);
if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
else
}
if (add) {
- rte_memcpy(&old_mac, hw->mac.addr, ETHER_ADDR_LEN);
+ rte_memcpy(&old_mac, hw->mac.addr, RTE_ETHER_ADDR_LEN);
rte_memcpy(hw->mac.addr, new_mac->addr_bytes,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
rte_memcpy(&mac_filter.mac_addr, &filter->mac_addr,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
mac_filter.filter_type = filter->filter_type;
ret = i40e_vsi_add_mac(vf->vsi, &mac_filter);
rte_ether_addr_copy(new_mac, &pf->dev_addr);
} else {
rte_memcpy(hw->mac.addr, hw->mac.perm_addr,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
ret = i40e_vsi_delete_mac(vf->vsi, &filter->mac_addr);
if (ret != I40E_SUCCESS) {
PMD_DRV_LOG(ERR, "Failed to delete MAC filter.");
}
/* MAC/VLAN configuration */
- rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
+ rte_memcpy(&filter.mac_addr, &broadcast, RTE_ETHER_ADDR_LEN);
filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
ret = i40e_vsi_add_mac(vsi, &filter);
int mac_num;
int ret = I40E_SUCCESS;
- if (!vsi || vlan > ETHER_MAX_VLAN_ID)
+ if (!vsi || vlan > RTE_ETHER_MAX_VLAN_ID)
return I40E_ERR_PARAM;
/* If it's already set, just return */
* Vlan 0 is the generic filter for untagged packets
* and can't be removed.
*/
- if (!vsi || vlan == 0 || vlan > ETHER_MAX_VLAN_ID)
+ if (!vsi || vlan == 0 || vlan > RTE_ETHER_MAX_VLAN_ID)
return I40E_ERR_PARAM;
/* If can't find it, just return */
return -EINVAL;
}
- if (filter->inner_vlan > ETHER_MAX_VLAN_ID) {
+ if (filter->inner_vlan > RTE_ETHER_MAX_VLAN_ID) {
PMD_DRV_LOG(ERR, "Invalid inner VLAN ID");
return -EINVAL;
}
i40e_ethertype_filter_convert(const struct rte_eth_ethertype_filter *input,
struct i40e_ethertype_filter *filter)
{
- rte_memcpy(&filter->input.mac_addr, &input->mac_addr, ETHER_ADDR_LEN);
+ rte_memcpy(&filter->input.mac_addr, &input->mac_addr,
+ RTE_ETHER_ADDR_LEN);
filter->input.ether_type = input->ether_type;
filter->flags = input->flags;
filter->queue = input->queue;
PMD_DRV_LOG(ERR, "Invalid queue ID");
return -EINVAL;
}
- if (filter->ether_type == ETHER_TYPE_IPv4 ||
- filter->ether_type == ETHER_TYPE_IPv6) {
+ if (filter->ether_type == RTE_ETHER_TYPE_IPv4 ||
+ filter->ether_type == RTE_ETHER_TYPE_IPv6) {
PMD_DRV_LOG(ERR,
"unsupported ether_type(0x%04x) in control packet filter.",
filter->ether_type);
return -EINVAL;
}
- if (filter->ether_type == ETHER_TYPE_VLAN)
+ if (filter->ether_type == RTE_ETHER_TYPE_VLAN)
PMD_DRV_LOG(WARNING,
"filter vlan ether_type in first tag is not supported.");
int ret = 0;
/* check if mtu is within the allowed range */
- if ((mtu < ETHER_MIN_MTU) || (frame_size > I40E_FRAME_SIZE_MAX))
+ if (mtu < RTE_ETHER_MIN_MTU || frame_size > I40E_FRAME_SIZE_MAX)
return -EINVAL;
/* mtu setting is forbidden if port is start */
return -EBUSY;
}
- if (frame_size > ETHER_MAX_LEN)
+ if (frame_size > RTE_ETHER_MAX_LEN)
dev_data->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
else
* Considering QinQ packet, the VLAN tag needs to be counted twice.
*/
#define I40E_ETH_OVERHEAD \
- (ETHER_HDR_LEN + ETHER_CRC_LEN + I40E_VLAN_TAG_SIZE * 2)
+ (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + I40E_VLAN_TAG_SIZE * 2)
struct i40e_adapter;
/* copy mac addr */
eth_dev->data->mac_addrs = rte_zmalloc("i40evf_mac",
- ETHER_ADDR_LEN * I40E_NUM_MACADDR_MAX,
- 0);
+ RTE_ETHER_ADDR_LEN * I40E_NUM_MACADDR_MAX,
+ 0);
if (eth_dev->data->mac_addrs == NULL) {
PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to"
" store MAC addresses",
- ETHER_ADDR_LEN * I40E_NUM_MACADDR_MAX);
+ RTE_ETHER_ADDR_LEN * I40E_NUM_MACADDR_MAX);
return -ENOMEM;
}
rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
* Check if the jumbo frame and maximum packet length are set correctly
*/
if (dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
- if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
+ if (rxq->max_pkt_len <= RTE_ETHER_MAX_LEN ||
rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
PMD_DRV_LOG(ERR, "maximum packet length must be "
"larger than %u and smaller than %u, as jumbo "
- "frame is enabled", (uint32_t)ETHER_MAX_LEN,
+ "frame is enabled", (uint32_t)RTE_ETHER_MAX_LEN,
(uint32_t)I40E_FRAME_SIZE_MAX);
return I40E_ERR_CONFIG;
}
} else {
- if (rxq->max_pkt_len < ETHER_MIN_LEN ||
- rxq->max_pkt_len > ETHER_MAX_LEN) {
+ if (rxq->max_pkt_len < RTE_ETHER_MIN_LEN ||
+ rxq->max_pkt_len > RTE_ETHER_MAX_LEN) {
PMD_DRV_LOG(ERR, "maximum packet length must be "
"larger than %u and smaller than %u, as jumbo "
- "frame is disabled", (uint32_t)ETHER_MIN_LEN,
- (uint32_t)ETHER_MAX_LEN);
+ "frame is disabled",
+ (uint32_t)RTE_ETHER_MIN_LEN,
+ (uint32_t)RTE_ETHER_MAX_LEN);
return I40E_ERR_CONFIG;
}
}
dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
dev_info->max_mtu = dev_info->max_rx_pktlen - I40E_ETH_OVERHEAD;
- dev_info->min_mtu = ETHER_MIN_MTU;
+ dev_info->min_mtu = RTE_ETHER_MIN_MTU;
dev_info->hash_key_size = (I40E_VFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
dev_info->reta_size = ETH_RSS_RETA_SIZE_64;
dev_info->flow_type_rss_offloads = vf->adapter->flow_types_mask;
int ret = 0;
/* check if mtu is within the allowed range */
- if ((mtu < ETHER_MIN_MTU) || (frame_size > I40E_FRAME_SIZE_MAX))
+ if (mtu < RTE_ETHER_MIN_MTU || frame_size > I40E_FRAME_SIZE_MAX)
return -EINVAL;
/* mtu setting is forbidden if port is start */
return -EBUSY;
}
- if (frame_size > ETHER_MAX_LEN)
+ if (frame_size > RTE_ETHER_MAX_LEN)
dev_data->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
else
#endif
rx_ctx.dtype = i40e_header_split_none;
rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_NONE;
- rx_ctx.rxmax = ETHER_MAX_LEN;
+ rx_ctx.rxmax = RTE_ETHER_MAX_LEN;
rx_ctx.tphrdesc_ena = 1;
rx_ctx.tphwdesc_ena = 1;
rx_ctx.tphdata_ena = 1;
case RTE_ETH_FLOW_FRAG_IPV4:
ip = (struct ipv4_hdr *)raw_pkt;
- *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+ *ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);
ip->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL;
/* set len to by default */
ip->total_length = rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN);
case RTE_ETH_FLOW_FRAG_IPV6:
ip6 = (struct ipv6_hdr *)raw_pkt;
- *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+ *ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6);
ip6->vtc_flow =
rte_cpu_to_be_32(I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
(fdir_input->flow.ipv6_flow.tc <<
* starts after the whole ARP header
*/
if (fdir_input->flow.l2_flow.ether_type ==
- rte_cpu_to_be_16(ETHER_TYPE_ARP))
+ rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP))
payload += sizeof(struct rte_arp_hdr);
set_idx = I40E_FLXPLD_L2_IDX;
break;
is_customized_pctype) {
ip = (struct ipv4_hdr *)raw_pkt;
- *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+ *ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);
ip->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL;
/* set len to by default */
ip->total_length = rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN);
pctype == I40E_FILTER_PCTYPE_FRAG_IPV6) {
ip6 = (struct ipv6_hdr *)raw_pkt;
- *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+ *ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6);
ip6->vtc_flow =
rte_cpu_to_be_32(I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
(fdir_input->flow.ipv6_flow.tc <<
* starts after the whole ARP header
*/
if (fdir_input->flow.l2_flow.ether_type ==
- rte_cpu_to_be_16(ETHER_TYPE_ARP))
+ rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP))
payload += sizeof(struct rte_arp_hdr);
set_idx = I40E_FLXPLD_L2_IDX;
} else if (fdir_input->flow_ext.customized_pctype) {
}
filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
- if (filter->ether_type == ETHER_TYPE_IPv4 ||
- filter->ether_type == ETHER_TYPE_IPv6 ||
- filter->ether_type == ETHER_TYPE_LLDP ||
+ if (filter->ether_type == RTE_ETHER_TYPE_IPv4 ||
+ filter->ether_type == RTE_ETHER_TYPE_IPv6 ||
+ filter->ether_type == RTE_ETHER_TYPE_LLDP ||
filter->ether_type == outer_tpid) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
ether_type = rte_be_to_cpu_16(eth_spec->type);
if (next == RTE_FLOW_ITEM_TYPE_VLAN ||
- ether_type == ETHER_TYPE_IPv4 ||
- ether_type == ETHER_TYPE_IPv6 ||
- ether_type == ETHER_TYPE_ARP ||
+ ether_type == RTE_ETHER_TYPE_IPv4 ||
+ ether_type == RTE_ETHER_TYPE_IPv6 ||
+ ether_type == RTE_ETHER_TYPE_ARP ||
ether_type == outer_tpid) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
ether_type =
rte_be_to_cpu_16(vlan_spec->inner_type);
- if (ether_type == ETHER_TYPE_IPv4 ||
- ether_type == ETHER_TYPE_IPv6 ||
- ether_type == ETHER_TYPE_ARP ||
+ if (ether_type == RTE_ETHER_TYPE_IPv4 ||
+ ether_type == RTE_ETHER_TYPE_IPv6 ||
+ ether_type == RTE_ETHER_TYPE_ARP ||
ether_type == outer_tpid) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
if (!vxlan_flag) {
rte_memcpy(&filter->outer_mac,
ð_spec->dst,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
filter_type |= ETH_TUNNEL_FILTER_OMAC;
} else {
rte_memcpy(&filter->inner_mac,
ð_spec->dst,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
filter_type |= ETH_TUNNEL_FILTER_IMAC;
}
}
if (!nvgre_flag) {
rte_memcpy(&filter->outer_mac,
ð_spec->dst,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
filter_type |= ETH_TUNNEL_FILTER_OMAC;
} else {
rte_memcpy(&filter->inner_mac,
ð_spec->dst,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
filter_type |= ETH_TUNNEL_FILTER_IMAC;
}
}
for (i = 0; i < addr_list->num_elements; i++) {
mac = (struct rte_ether_addr *)(addr_list->list[i].addr);
- rte_memcpy(&filter.mac_addr, mac, ETHER_ADDR_LEN);
+ rte_memcpy(&filter.mac_addr, mac, RTE_ETHER_ADDR_LEN);
filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
if (rte_is_zero_ether_addr(mac) ||
i40e_vsi_add_mac(vf->vsi, &filter)) {
*/
rxm->next = NULL;
if (unlikely(rxq->crc_len > 0)) {
- first_seg->pkt_len -= ETHER_CRC_LEN;
- if (rx_packet_len <= ETHER_CRC_LEN) {
+ first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
+ if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
rte_pktmbuf_free_seg(rxm);
first_seg->nb_segs--;
last_seg->data_len =
(uint16_t)(last_seg->data_len -
- (ETHER_CRC_LEN - rx_packet_len));
+ (RTE_ETHER_CRC_LEN - rx_packet_len));
last_seg->next = NULL;
} else
rxm->data_len = (uint16_t)(rx_packet_len -
- ETHER_CRC_LEN);
+ RTE_ETHER_CRC_LEN);
}
first_seg->port = rxq->port_id;
rxq->reg_idx = reg_idx;
rxq->port_id = dev->data->port_id;
if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
- rxq->crc_len = ETHER_CRC_LEN;
+ rxq->crc_len = RTE_ETHER_CRC_LEN;
else
rxq->crc_len = 0;
rxq->drop_en = rx_conf->rx_drop_en;
len = hw->func_caps.rx_buf_chain_len * rxq->rx_buf_len;
rxq->max_pkt_len = RTE_MIN(len, data->dev_conf.rxmode.max_rx_pkt_len);
if (data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
- if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
+ if (rxq->max_pkt_len <= RTE_ETHER_MAX_LEN ||
rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
PMD_DRV_LOG(ERR, "maximum packet length must "
"be larger than %u and smaller than %u,"
"as jumbo frame is enabled",
- (uint32_t)ETHER_MAX_LEN,
+ (uint32_t)RTE_ETHER_MAX_LEN,
(uint32_t)I40E_FRAME_SIZE_MAX);
return I40E_ERR_CONFIG;
}
} else {
- if (rxq->max_pkt_len < ETHER_MIN_LEN ||
- rxq->max_pkt_len > ETHER_MAX_LEN) {
+ if (rxq->max_pkt_len < RTE_ETHER_MIN_LEN ||
+ rxq->max_pkt_len > RTE_ETHER_MAX_LEN) {
PMD_DRV_LOG(ERR, "maximum packet length must be "
"larger than %u and smaller than %u, "
"as jumbo frame is disabled",
- (uint32_t)ETHER_MIN_LEN,
- (uint32_t)ETHER_MAX_LEN);
+ (uint32_t)RTE_ETHER_MIN_LEN,
+ (uint32_t)RTE_ETHER_MAX_LEN);
return I40E_ERR_CONFIG;
}
}
RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
- if (vlan_id > ETHER_MAX_VLAN_ID) {
+ if (vlan_id > RTE_ETHER_MAX_VLAN_ID) {
PMD_DRV_LOG(ERR, "Invalid VLAN ID.");
return -EINVAL;
}
}
if (on) {
- rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
+ rte_memcpy(&filter.mac_addr, &broadcast, RTE_ETHER_ADDR_LEN);
filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
ret = i40e_vsi_add_mac(vsi, &filter);
} else {
if (!is_i40e_supported(dev))
return -ENOTSUP;
- if (vlan_id > ETHER_MAX_VLAN_ID || !vlan_id) {
+ if (vlan_id > RTE_ETHER_MAX_VLAN_ID || !vlan_id) {
PMD_DRV_LOG(ERR, "Invalid VLAN ID.");
return -EINVAL;
}
*/
#define IAVF_VLAN_TAG_SIZE 4
#define IAVF_ETH_OVERHEAD \
- (ETHER_HDR_LEN + ETHER_CRC_LEN + IAVF_VLAN_TAG_SIZE * 2)
+ (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + IAVF_VLAN_TAG_SIZE * 2)
#define IAVF_32_BIT_WIDTH (CHAR_BIT * 4)
#define IAVF_48_BIT_WIDTH (CHAR_BIT * 6)
* correctly.
*/
if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
- if (max_pkt_len <= ETHER_MAX_LEN ||
+ if (max_pkt_len <= RTE_ETHER_MAX_LEN ||
max_pkt_len > IAVF_FRAME_SIZE_MAX) {
PMD_DRV_LOG(ERR, "maximum packet length must be "
"larger than %u and smaller than %u, "
"as jumbo frame is enabled",
- (uint32_t)ETHER_MAX_LEN,
+ (uint32_t)RTE_ETHER_MAX_LEN,
(uint32_t)IAVF_FRAME_SIZE_MAX);
return -EINVAL;
}
} else {
- if (max_pkt_len < ETHER_MIN_LEN ||
- max_pkt_len > ETHER_MAX_LEN) {
+ if (max_pkt_len < RTE_ETHER_MIN_LEN ||
+ max_pkt_len > RTE_ETHER_MAX_LEN) {
PMD_DRV_LOG(ERR, "maximum packet length must be "
"larger than %u and smaller than %u, "
"as jumbo frame is disabled",
- (uint32_t)ETHER_MIN_LEN,
- (uint32_t)ETHER_MAX_LEN);
+ (uint32_t)RTE_ETHER_MIN_LEN,
+ (uint32_t)RTE_ETHER_MAX_LEN);
return -EINVAL;
}
}
uint32_t frame_size = mtu + IAVF_ETH_OVERHEAD;
int ret = 0;
- if (mtu < ETHER_MIN_MTU || frame_size > IAVF_FRAME_SIZE_MAX)
+ if (mtu < RTE_ETHER_MIN_MTU || frame_size > IAVF_FRAME_SIZE_MAX)
return -EINVAL;
/* mtu setting is forbidden if port is start */
return -EBUSY;
}
- if (frame_size > ETHER_MAX_LEN)
+ if (frame_size > RTE_ETHER_MAX_LEN)
dev->data->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
else
/* copy mac addr */
eth_dev->data->mac_addrs = rte_zmalloc(
- "iavf_mac",
- ETHER_ADDR_LEN * IAVF_NUM_MACADDR_MAX,
- 0);
+ "iavf_mac", RTE_ETHER_ADDR_LEN * IAVF_NUM_MACADDR_MAX, 0);
if (!eth_dev->data->mac_addrs) {
PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to"
" store MAC addresses",
- ETHER_ADDR_LEN * IAVF_NUM_MACADDR_MAX);
+ RTE_ETHER_ADDR_LEN * IAVF_NUM_MACADDR_MAX);
return -ENOMEM;
}
/* If the MAC address is not configured by host,
*/
rxm->next = NULL;
if (unlikely(rxq->crc_len > 0)) {
- first_seg->pkt_len -= ETHER_CRC_LEN;
- if (rx_packet_len <= ETHER_CRC_LEN) {
+ first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
+ if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
rte_pktmbuf_free_seg(rxm);
first_seg->nb_segs--;
last_seg->data_len =
(uint16_t)(last_seg->data_len -
- (ETHER_CRC_LEN - rx_packet_len));
+ (RTE_ETHER_CRC_LEN - rx_packet_len));
last_seg->next = NULL;
} else
rxm->data_len = (uint16_t)(rx_packet_len -
- ETHER_CRC_LEN);
+ RTE_ETHER_CRC_LEN);
}
first_seg->port = rxq->port_id;
struct ice_hw *hw;
int ret = 0;
- if (!vsi || vlan_id > ETHER_MAX_VLAN_ID)
+ if (!vsi || vlan_id > RTE_ETHER_MAX_VLAN_ID)
return -EINVAL;
hw = ICE_VSI_TO_HW(vsi);
* Vlan 0 is the generic filter for untagged packets
* and can't be removed.
*/
- if (!vsi || vlan_id == 0 || vlan_id > ETHER_MAX_VLAN_ID)
+ if (!vsi || vlan_id == 0 || vlan_id > RTE_ETHER_MAX_VLAN_ID)
return -EINVAL;
hw = ICE_VSI_TO_HW(vsi);
hw->port_info->mac.perm_addr,
ETH_ADDR_LEN);
- rte_memcpy(&mac_addr, &pf->dev_addr, ETHER_ADDR_LEN);
+ rte_memcpy(&mac_addr, &pf->dev_addr, RTE_ETHER_ADDR_LEN);
ret = ice_add_mac_filter(vsi, &mac_addr);
if (ret != ICE_SUCCESS)
PMD_INIT_LOG(ERR, "Failed to add dflt MAC filter");
- rte_memcpy(&mac_addr, &broadcast, ETHER_ADDR_LEN);
+ rte_memcpy(&mac_addr, &broadcast, RTE_ETHER_ADDR_LEN);
ret = ice_add_mac_filter(vsi, &mac_addr);
if (ret != ICE_SUCCESS)
PMD_INIT_LOG(ERR, "Failed to add MAC filter");
dev_info->max_mac_addrs = vsi->max_macaddrs;
dev_info->max_vfs = pci_dev->max_vfs;
dev_info->max_mtu = dev_info->max_rx_pktlen - ICE_ETH_OVERHEAD;
- dev_info->min_mtu = ETHER_MIN_MTU;
+ dev_info->min_mtu = RTE_ETHER_MIN_MTU;
dev_info->rx_offload_capa =
DEV_RX_OFFLOAD_VLAN_STRIP |
uint32_t frame_size = mtu + ICE_ETH_OVERHEAD;
/* check if mtu is within the allowed range */
- if (mtu < ETHER_MIN_MTU || frame_size > ICE_FRAME_SIZE_MAX)
+ if (mtu < RTE_ETHER_MIN_MTU || frame_size > ICE_FRAME_SIZE_MAX)
return -EINVAL;
/* mtu setting is forbidden if port is start */
return -EBUSY;
}
- if (frame_size > ETHER_MAX_LEN)
+ if (frame_size > RTE_ETHER_MAX_LEN)
dev_data->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
else
&nes->rx_broadcast);
/* exclude CRC bytes */
nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
- nes->rx_broadcast) * ETHER_CRC_LEN;
+ nes->rx_broadcast) * RTE_ETHER_CRC_LEN;
ice_stat_update_32(hw, GLV_RDPC(idx), vsi->offset_loaded,
&oes->rx_discards, &nes->rx_discards);
&ns->eth.rx_discards);
/* Workaround: CRC size should not be included in byte statistics,
- * so subtract ETHER_CRC_LEN from the byte counter for each rx packet.
+ * so subtract RTE_ETHER_CRC_LEN from the byte counter for each rx
+ * packet.
*/
ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
- ns->eth.rx_broadcast) * ETHER_CRC_LEN;
+ ns->eth.rx_broadcast) * RTE_ETHER_CRC_LEN;
/* GLPRT_REPC not supported */
/* GLPRT_RMPC not supported */
pf->offset_loaded, &os->eth.tx_broadcast,
&ns->eth.tx_broadcast);
ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
- ns->eth.tx_broadcast) * ETHER_CRC_LEN;
+ ns->eth.tx_broadcast) * RTE_ETHER_CRC_LEN;
/* GLPRT_TEPC not supported */
* Considering QinQ packet, the VLAN tag needs to be counted twice.
*/
#define ICE_ETH_OVERHEAD \
- (ETHER_HDR_LEN + ETHER_CRC_LEN + ICE_VLAN_TAG_SIZE * 2)
+ (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + ICE_VLAN_TAG_SIZE * 2)
struct ice_adapter;
dev->data->dev_conf.rxmode.max_rx_pkt_len);
if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
- if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
+ if (rxq->max_pkt_len <= RTE_ETHER_MAX_LEN ||
rxq->max_pkt_len > ICE_FRAME_SIZE_MAX) {
PMD_DRV_LOG(ERR, "maximum packet length must "
"be larger than %u and smaller than %u,"
"as jumbo frame is enabled",
- (uint32_t)ETHER_MAX_LEN,
+ (uint32_t)RTE_ETHER_MAX_LEN,
(uint32_t)ICE_FRAME_SIZE_MAX);
return -EINVAL;
}
} else {
- if (rxq->max_pkt_len < ETHER_MIN_LEN ||
- rxq->max_pkt_len > ETHER_MAX_LEN) {
+ if (rxq->max_pkt_len < RTE_ETHER_MIN_LEN ||
+ rxq->max_pkt_len > RTE_ETHER_MAX_LEN) {
PMD_DRV_LOG(ERR, "maximum packet length must be "
"larger than %u and smaller than %u, "
"as jumbo frame is disabled",
- (uint32_t)ETHER_MIN_LEN,
- (uint32_t)ETHER_MAX_LEN);
+ (uint32_t)RTE_ETHER_MIN_LEN,
+ (uint32_t)RTE_ETHER_MAX_LEN);
return -EINVAL;
}
}
rxq->reg_idx = vsi->base_queue + queue_idx;
rxq->port_id = dev->data->port_id;
if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
- rxq->crc_len = ETHER_CRC_LEN;
+ rxq->crc_len = RTE_ETHER_CRC_LEN;
else
rxq->crc_len = 0;
*/
rxm->next = NULL;
if (unlikely(rxq->crc_len > 0)) {
- first_seg->pkt_len -= ETHER_CRC_LEN;
- if (rx_packet_len <= ETHER_CRC_LEN) {
+ first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
+ if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
rte_pktmbuf_free_seg(rxm);
first_seg->nb_segs--;
last_seg->data_len =
(uint16_t)(last_seg->data_len -
- (ETHER_CRC_LEN - rx_packet_len));
+ (RTE_ETHER_CRC_LEN - rx_packet_len));
last_seg->next = NULL;
} else
rxm->data_len = (uint16_t)(rx_packet_len -
- ETHER_CRC_LEN);
+ RTE_ETHER_CRC_LEN);
}
first_seg->port = rxq->port_id;
* Considering QinQ packet, the VLAN tag needs to be counted twice.
*/
#define IPN3KE_ETH_OVERHEAD \
- (ETHER_HDR_LEN + ETHER_CRC_LEN + IPN3KE_VLAN_TAG_SIZE * 2)
+ (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + IPN3KE_VLAN_TAG_SIZE * 2)
#define IPN3KE_MAC_FRAME_SIZE_MAX 9728
#define IPN3KE_MAC_RX_FRAME_MAXLENGTH 0x00AE
rte_memcpy(&parser->key[0],
eth->src.addr_bytes,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
break;
case RTE_FLOW_ITEM_TYPE_VXLAN:
rte_memcpy(parser->key,
eth->src.addr_bytes,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
break;
default:
&rpst->mac_addr);
rte_ether_addr_copy(&rpst->mac_addr, &dev->data->mac_addrs[0]);
- dev->data->mac_addrs->addr_bytes[ETHER_ADDR_LEN - 1] =
+ dev->data->mac_addrs->addr_bytes[RTE_ETHER_ADDR_LEN - 1] =
(uint8_t)rpst->port_id + 1;
if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
uint32_t frame_size = mtu + IPN3KE_ETH_OVERHEAD;
/* check if mtu is within the allowed range */
- if (mtu < ETHER_MIN_MTU ||
+ if (mtu < RTE_ETHER_MIN_MTU ||
frame_size > IPN3KE_MAC_FRAME_SIZE_MAX)
return -EINVAL;
return -EBUSY;
}
- if (frame_size > ETHER_MAX_LEN)
+ if (frame_size > RTE_ETHER_MAX_LEN)
dev_data->dev_conf.rxmode.offloads |=
(uint64_t)(DEV_RX_OFFLOAD_JUMBO_FRAME);
else
rpst->i40e_pf_eth = NULL;
rpst->i40e_pf_eth_port_id = 0xFFFF;
- ethdev->data->mac_addrs = rte_zmalloc("ipn3ke", ETHER_ADDR_LEN, 0);
+ ethdev->data->mac_addrs = rte_zmalloc("ipn3ke", RTE_ETHER_ADDR_LEN, 0);
if (!ethdev->data->mac_addrs) {
IPN3KE_AFU_PMD_ERR("Failed to "
"allocated memory for storing mac address");
ethdev->data->nb_tx_queues = 1;
ethdev->data->mac_addrs = rte_zmalloc("ipn3ke_afu_representor",
- ETHER_ADDR_LEN,
+ RTE_ETHER_ADDR_LEN,
0);
if (!ethdev->data->mac_addrs) {
IPN3KE_AFU_PMD_ERR("Failed to "
ixgbe_reset_qstat_mappings(hw);
/* Allocate memory for storing MAC addresses */
- eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
+ eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", RTE_ETHER_ADDR_LEN *
hw->mac.num_rar_entries, 0);
if (eth_dev->data->mac_addrs == NULL) {
PMD_INIT_LOG(ERR,
"Failed to allocate %u bytes needed to store "
"MAC addresses",
- ETHER_ADDR_LEN * hw->mac.num_rar_entries);
+ RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
return -ENOMEM;
}
/* Copy the permanent MAC address */
ð_dev->data->mac_addrs[0]);
/* Allocate memory for storing hash filter MAC addresses */
- eth_dev->data->hash_mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
- IXGBE_VMDQ_NUM_UC_MAC, 0);
+ eth_dev->data->hash_mac_addrs = rte_zmalloc(
+ "ixgbe", RTE_ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC, 0);
if (eth_dev->data->hash_mac_addrs == NULL) {
PMD_INIT_LOG(ERR,
"Failed to allocate %d bytes needed to store MAC addresses",
- ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC);
+ RTE_ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC);
return -ENOMEM;
}
}
l2_tn_info->e_tag_en = FALSE;
l2_tn_info->e_tag_fwd_en = FALSE;
- l2_tn_info->e_tag_ether_type = ETHER_TYPE_ETAG;
+ l2_tn_info->e_tag_ether_type = RTE_ETHER_TYPE_ETAG;
return 0;
}
mac_addr->addr_bytes[1] = 0x09;
mac_addr->addr_bytes[2] = 0xC0;
/* Force indication of locally assigned MAC address. */
- mac_addr->addr_bytes[0] |= ETHER_LOCAL_ADMIN_ADDR;
+ mac_addr->addr_bytes[0] |= RTE_ETHER_LOCAL_ADMIN_ADDR;
/* Generate the last 3 bytes of the MAC address with a random number. */
random = rte_rand();
memcpy(&mac_addr->addr_bytes[3], &random, 3);
ixgbevf_get_queues(hw, &tcs, &tc);
/* Allocate memory for storing MAC addresses */
- eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", ETHER_ADDR_LEN *
+ eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", RTE_ETHER_ADDR_LEN *
hw->mac.num_rar_entries, 0);
if (eth_dev->data->mac_addrs == NULL) {
PMD_INIT_LOG(ERR,
"Failed to allocate %u bytes needed to store "
"MAC addresses",
- ETHER_ADDR_LEN * hw->mac.num_rar_entries);
+ RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
return -ENOMEM;
}
hw_stats->qbrc[i] +=
((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)) << 32);
if (crc_strip == 0)
- hw_stats->qbrc[i] -= delta_qprc * ETHER_CRC_LEN;
+ hw_stats->qbrc[i] -= delta_qprc * RTE_ETHER_CRC_LEN;
hw_stats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
hw_stats->qbtc[i] +=
hw_stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
if (crc_strip == 0)
- hw_stats->gorc -= delta_gprc * ETHER_CRC_LEN;
+ hw_stats->gorc -= delta_gprc * RTE_ETHER_CRC_LEN;
uint64_t delta_gptc = IXGBE_READ_REG(hw, IXGBE_GPTC);
hw_stats->gptc += delta_gptc;
- hw_stats->gotc -= delta_gptc * ETHER_CRC_LEN;
- hw_stats->tor -= (hw_stats->tpr - old_tpr) * ETHER_CRC_LEN;
+ hw_stats->gotc -= delta_gptc * RTE_ETHER_CRC_LEN;
+ hw_stats->tor -= (hw_stats->tpr - old_tpr) * RTE_ETHER_CRC_LEN;
/*
* Workaround: mprc hardware is incorrectly counting
hw_stats->gptc -= total;
hw_stats->mptc -= total;
hw_stats->ptc64 -= total;
- hw_stats->gotc -= total * ETHER_MIN_LEN;
+ hw_stats->gotc -= total * RTE_ETHER_MIN_LEN;
hw_stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
hw_stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
else
dev_info->max_vmdq_pools = ETH_64_POOLS;
dev_info->max_mtu = dev_info->max_rx_pktlen - IXGBE_ETH_OVERHEAD;
- dev_info->min_mtu = ETHER_MIN_MTU;
+ dev_info->min_mtu = RTE_ETHER_MIN_MTU;
dev_info->vmdq_queue_num = dev_info->max_rx_queues;
dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
* At least reserve one Ethernet frame for watermark
* high_water/low_water in kilo bytes for ixgbe
*/
- max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
+ max_high_water = (rx_buf_size -
+ RTE_ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
if ((fc_conf->high_water > max_high_water) ||
(fc_conf->high_water < fc_conf->low_water)) {
PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
* At least reserve one Ethernet frame for watermark
* high_water/low_water in kilo bytes for ixgbe
*/
- max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
+ max_high_water = (rx_buf_size -
+ RTE_ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
if ((pfc_conf->fc.high_water > max_high_water) ||
(pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) {
PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
ixgbe_dev_info_get(dev, &dev_info);
/* check that mtu is within the allowed range */
- if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
+ if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
return -EINVAL;
/* If device is started, refuse mtu that requires the support of
hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
/* switch to jumbo mode if needed */
- if (frame_size > ETHER_MAX_LEN) {
+ if (frame_size > RTE_ETHER_MAX_LEN) {
dev->data->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
hlreg0 |= IXGBE_HLREG0_JUMBOEN;
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if ((mtu < ETHER_MIN_MTU) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN))
+ if (mtu < RTE_ETHER_MIN_MTU ||
+ max_frame > RTE_ETHER_MAX_JUMBO_FRAME_LEN)
return -EINVAL;
/* If device is started, refuse mtu that requires the support of
if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
return -EINVAL;
- if (filter->ether_type == ETHER_TYPE_IPv4 ||
- filter->ether_type == ETHER_TYPE_IPv6) {
+ if (filter->ether_type == RTE_ETHER_TYPE_IPv4 ||
+ filter->ether_type == RTE_ETHER_TYPE_IPv6) {
PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
" ethertype filter.", filter->ether_type);
return -EINVAL;
/* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588),
- (ETHER_TYPE_1588 |
+ (RTE_ETHER_TYPE_1588 |
IXGBE_ETQF_FILTER_EN |
IXGBE_ETQF_1588));
#define IXGBE_5TUPLE_MIN_PRI 1
/* The overhead from MTU to max frame size. */
-#define IXGBE_ETH_OVERHEAD (ETHER_HDR_LEN + ETHER_CRC_LEN)
+#define IXGBE_ETH_OVERHEAD (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN)
/* bit of VXLAN tunnel type | 7 bits of zeros | 8 bits of zeros*/
#define IXGBE_FDIR_VXLAN_TUNNEL_TYPE 0x8000
};
struct ixgbe_vf_info {
- uint8_t vf_mac_addresses[ETHER_ADDR_LEN];
+ uint8_t vf_mac_addresses[RTE_ETHER_ADDR_LEN];
uint16_t vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES];
uint16_t num_vf_mc_hashes;
uint16_t default_vf_vlan_id;
return -rte_errno;
}
- if (filter->ether_type == ETHER_TYPE_IPv4 ||
- filter->ether_type == ETHER_TYPE_IPv6) {
+ if (filter->ether_type == RTE_ETHER_TYPE_IPv4 ||
+ filter->ether_type == RTE_ETHER_TYPE_IPv6) {
memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
eth_spec = item->spec;
/* Get the dst MAC. */
- for (j = 0; j < ETHER_ADDR_LEN; j++) {
+ for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
rule->ixgbe_fdir.formatted.inner_mac[j] =
eth_spec->dst.addr_bytes[j];
}
* src MAC address must be masked,
* and don't support dst MAC address mask.
*/
- for (j = 0; j < ETHER_ADDR_LEN; j++) {
+ for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
if (eth_mask->src.addr_bytes[j] ||
eth_mask->dst.addr_bytes[j] != 0xFF) {
memset(rule, 0,
}
/* src MAC address should be masked. */
- for (j = 0; j < ETHER_ADDR_LEN; j++) {
+ for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
if (eth_mask->src.addr_bytes[j]) {
memset(rule, 0,
sizeof(struct ixgbe_fdir_rule));
}
}
rule->mask.mac_addr_byte_mask = 0;
- for (j = 0; j < ETHER_ADDR_LEN; j++) {
+ for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
/* It's a per byte mask. */
if (eth_mask->dst.addr_bytes[j] == 0xFF) {
rule->mask.mac_addr_byte_mask |= 0x1 << j;
eth_spec = item->spec;
/* Get the dst MAC. */
- for (j = 0; j < ETHER_ADDR_LEN; j++) {
+ for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
rule->ixgbe_fdir.formatted.inner_mac[j] =
eth_spec->dst.addr_bytes[j];
}
static inline
int ixgbe_vf_perm_addr_gen(struct rte_eth_dev *dev, uint16_t vf_num)
{
- unsigned char vf_mac_addr[ETHER_ADDR_LEN];
+ unsigned char vf_mac_addr[RTE_ETHER_ADDR_LEN];
struct ixgbe_vf_info *vfinfo =
*IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
uint16_t vfn;
rte_eth_random_addr(vf_mac_addr);
/* keep the random address as default */
memcpy(vfinfo[vfn].vf_mac_addresses, vf_mac_addr,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
}
return 0;
/* reply to reset with ack and vf mac address */
msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK;
- rte_memcpy(new_mac, vf_mac, ETHER_ADDR_LEN);
+ rte_memcpy(new_mac, vf_mac, RTE_ETHER_ADDR_LEN);
/*
* Piggyback the multicast filter type so VF can compute the
* correct vectors
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t new_mtu = msgbuf[1];
uint32_t max_frs;
- int max_frame = new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ int max_frame = new_mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
/* X540 and X550 support jumbo frames in IOV mode */
if (hw->mac.type != ixgbe_mac_X540 &&
hw->mac.type != ixgbe_mac_X550EM_a)
return -1;
- if ((max_frame < ETHER_MIN_LEN) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN))
+ if (max_frame < RTE_ETHER_MIN_LEN ||
+ max_frame > RTE_ETHER_MAX_JUMBO_FRAME_LEN)
return -1;
max_frs = (IXGBE_READ_REG(hw, IXGBE_MAXFRS) &
queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
rxq->port_id = dev->data->port_id;
if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
- rxq->crc_len = ETHER_CRC_LEN;
+ rxq->crc_len = RTE_ETHER_CRC_LEN;
else
rxq->crc_len = 0;
rxq->drop_en = rx_conf->rx_drop_en;
uint16_t max[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
uint8_t map[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
struct ixgbe_dcb_tc_config *tc;
- uint32_t max_frame = dev->data->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ uint32_t max_frame = dev->data->mtu + RTE_ETHER_HDR_LEN +
+ RTE_ETHER_CRC_LEN;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_bw_conf *bw_conf =
* call to configure.
*/
if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
- rxq->crc_len = ETHER_CRC_LEN;
+ rxq->crc_len = RTE_ETHER_CRC_LEN;
else
rxq->crc_len = 0;
if (rte_is_valid_assigned_ether_addr(
(struct rte_ether_addr *)new_mac)) {
rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
return hw->mac.ops.set_rar(hw, rar_entry, new_mac, vf,
IXGBE_RAH_AV);
}
if (vf >= pci_dev->max_vfs)
return -EINVAL;
- if (vlan_id > ETHER_MAX_VLAN_ID)
+ if (vlan_id > RTE_ETHER_MAX_VLAN_ID)
return -EINVAL;
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
if (!is_ixgbe_supported(dev))
return -ENOTSUP;
- if ((vlan > ETHER_MAX_VLAN_ID) || (vf_mask == 0))
+ if (vlan > RTE_ETHER_MAX_VLAN_ID || vf_mask == 0)
return -EINVAL;
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
#define MAX_KNI_PORTS 8
#define KNI_ETHER_MTU(mbuf_size) \
- ((mbuf_size) - ETHER_HDR_LEN) /**< Ethernet MTU. */
+ ((mbuf_size) - RTE_ETHER_HDR_LEN) /**< Ethernet MTU. */
#define ETH_KNI_NO_REQUEST_THREAD_ARG "no_request_thread"
static const char * const valid_arguments[] = {
{
struct lio_device *lio_dev = LIO_DEV(eth_dev);
uint16_t pf_mtu = lio_dev->linfo.link.s.mtu;
- uint32_t frame_len = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ uint32_t frame_len = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
struct lio_dev_ctrl_cmd ctrl_cmd;
struct lio_ctrl_pkt ctrl_pkt;
/* check if VF MTU is within allowed range.
* New value should not exceed PF MTU.
*/
- if ((mtu < ETHER_MIN_MTU) || (mtu > pf_mtu)) {
+ if (mtu < RTE_ETHER_MIN_MTU || mtu > pf_mtu) {
lio_dev_err(lio_dev, "VF MTU should be >= %d and <= %d\n",
- ETHER_MIN_MTU, pf_mtu);
+ RTE_ETHER_MIN_MTU, pf_mtu);
return -EINVAL;
}
return -1;
}
- if (frame_len > ETHER_MAX_LEN)
+ if (frame_len > RTE_ETHER_MAX_LEN)
eth_dev->data->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
else
goto dev_mtu_set_error;
}
- mtu = (uint16_t)(frame_len - ETHER_HDR_LEN - ETHER_CRC_LEN);
- if (mtu < ETHER_MIN_MTU)
- mtu = ETHER_MIN_MTU;
+ mtu = (uint16_t)(frame_len - RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN);
+ if (mtu < RTE_ETHER_MIN_MTU)
+ mtu = RTE_ETHER_MIN_MTU;
if (eth_dev->data->mtu != mtu) {
ret = lio_dev_mtu_set(eth_dev, mtu);
struct lio_device *lio_dev = LIO_DEV(eth_dev);
uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
int retval, num_iqueues, num_oqueues;
- uint8_t mac[ETHER_ADDR_LEN], i;
+ uint8_t mac[RTE_ETHER_ADDR_LEN], i;
struct lio_if_cfg_resp *resp;
struct lio_soft_command *sc;
union lio_if_cfg if_cfg;
/* 64-bit swap required on LE machines */
lio_swap_8B_data(&lio_dev->linfo.hw_addr, 1);
- for (i = 0; i < ETHER_ADDR_LEN; i++)
+ for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
mac[i] = *((uint8_t *)(((uint8_t *)&lio_dev->linfo.hw_addr) +
2 + i));
}
eth_dev->dev_ops = &liovf_eth_dev_ops;
- eth_dev->data->mac_addrs = rte_zmalloc("lio", ETHER_ADDR_LEN, 0);
+ eth_dev->data->mac_addrs = rte_zmalloc("lio", RTE_ETHER_ADDR_LEN, 0);
if (eth_dev->data->mac_addrs == NULL) {
lio_dev_err(lio_dev,
"MAC addresses memory allocation failed\n");
priv->device_attr = device_attr;
priv->port = port;
priv->pd = pd;
- priv->mtu = ETHER_MTU;
+ priv->mtu = RTE_ETHER_MTU;
priv->vf = vf;
priv->hw_csum = !!(device_attr.device_cap_flags &
IBV_DEVICE_RAW_IP_CSUM);
/* mlx4_ethdev.c */
int mlx4_get_ifname(const struct mlx4_priv *priv, char (*ifname)[IF_NAMESIZE]);
-int mlx4_get_mac(struct mlx4_priv *priv, uint8_t (*mac)[ETHER_ADDR_LEN]);
+int mlx4_get_mac(struct mlx4_priv *priv, uint8_t (*mac)[RTE_ETHER_ADDR_LEN]);
int mlx4_mtu_get(struct mlx4_priv *priv, uint16_t *mtu);
int mlx4_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
int mlx4_dev_set_link_down(struct rte_eth_dev *dev);
* 0 on success, negative errno value otherwise and rte_errno is set.
*/
int
-mlx4_get_mac(struct mlx4_priv *priv, uint8_t (*mac)[ETHER_ADDR_LEN])
+mlx4_get_mac(struct mlx4_priv *priv, uint8_t (*mac)[RTE_ETHER_ADDR_LEN])
{
struct ifreq request;
int ret = mlx4_ifreq(priv, SIOCGIFHWADDR, &request);
if (ret)
return ret;
- memcpy(mac, request.ifr_hwaddr.sa_data, ETHER_ADDR_LEN);
+ memcpy(mac, request.ifr_hwaddr.sa_data, RTE_ETHER_ADDR_LEN);
return 0;
}
goto error;
}
flow->allmulti = 1;
- } else if (sum_dst != (UINT8_C(0xff) * ETHER_ADDR_LEN)) {
+ } else if (sum_dst != (UINT8_C(0xff) * RTE_ETHER_ADDR_LEN)) {
msg = "mlx4 does not support matching partial"
" Ethernet fields";
goto error;
flow->promisc = 1;
return 0;
}
- memcpy(eth->val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
- memcpy(eth->mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
+ memcpy(eth->val.dst_mac, spec->dst.addr_bytes, RTE_ETHER_ADDR_LEN);
+ memcpy(eth->mask.dst_mac, mask->dst.addr_bytes, RTE_ETHER_ADDR_LEN);
/* Remove unwanted bits from values. */
- for (i = 0; i < ETHER_ADDR_LEN; ++i) {
+ for (i = 0; i < RTE_ETHER_ADDR_LEN; ++i)
eth->val.dst_mac[i] &= eth->mask.dst_mac[i];
- }
+
return 0;
error:
return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
pkt->ol_flags = PKT_RX_RSS_HASH;
pkt->hash.rss = cqe->immed_rss_invalid;
if (rxq->crc_present)
- len -= ETHER_CRC_LEN;
+ len -= RTE_ETHER_CRC_LEN;
pkt->pkt_len = len;
if (rxq->csum | rxq->csum_l2tun) {
uint32_t flags =
}
priv->sh = sh;
priv->ibv_port = spawn->ibv_port;
- priv->mtu = ETHER_MTU;
+ priv->mtu = RTE_ETHER_MTU;
#ifndef RTE_ARCH_64
/* Initialize UAR access locks for 32bit implementations. */
rte_spinlock_init(&priv->uar_lock_cq);
/* mlx5_mac.c */
-int mlx5_get_mac(struct rte_eth_dev *dev, uint8_t (*mac)[ETHER_ADDR_LEN]);
+int mlx5_get_mac(struct rte_eth_dev *dev, uint8_t (*mac)[RTE_ETHER_ADDR_LEN]);
void mlx5_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index);
int mlx5_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac,
uint32_t index, uint32_t vmdq);
(void *)items->type,
"eth header not found");
if (!eth->ether_type)
- eth->ether_type = RTE_BE16(ETHER_TYPE_VLAN);
+ eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
ipv4 = (struct ipv4_hdr *)&buf[temp_size];
"neither eth nor vlan"
" header found");
if (vlan && !vlan->eth_proto)
- vlan->eth_proto = RTE_BE16(ETHER_TYPE_IPv4);
+ vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPv4);
else if (eth && !eth->ether_type)
- eth->ether_type = RTE_BE16(ETHER_TYPE_IPv4);
+ eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPv4);
if (!ipv4->version_ihl)
ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
MLX5_ENCAP_IPV4_IHL_MIN;
"neither eth nor vlan"
" header found");
if (vlan && !vlan->eth_proto)
- vlan->eth_proto = RTE_BE16(ETHER_TYPE_IPv6);
+ vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPv6);
else if (eth && !eth->ether_type)
- eth->ether_type = RTE_BE16(ETHER_TYPE_IPv6);
+ eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPv6);
if (!ipv6->vtc_flow)
ipv6->vtc_flow =
RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
case MLX5_FLOW_LAYER_GRE:
MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
- ETHER_TYPE_MPLS);
+ RTE_ETHER_TYPE_MPLS);
break;
default:
MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
p_parser->keys_ex[idx].cmd = TCA_PEDIT_KEY_EX_CMD_SET;
memcpy(&p_parser->keys[idx].val,
conf->mac_addr + SZ_PEDIT_KEY_VAL,
- ETHER_ADDR_LEN - SZ_PEDIT_KEY_VAL);
+ RTE_ETHER_ADDR_LEN - SZ_PEDIT_KEY_VAL);
p_parser->sel.nkeys = (++idx);
}
flags |= MLX5_FLOW_ACTION_DEC_TTL;
break;
case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
- keys += NUM_OF_PEDIT_KEYS(ETHER_ADDR_LEN);
+ keys += NUM_OF_PEDIT_KEYS(RTE_ETHER_ADDR_LEN);
flags |= MLX5_FLOW_ACTION_SET_MAC_SRC;
break;
case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
- keys += NUM_OF_PEDIT_KEYS(ETHER_ADDR_LEN);
+ keys += NUM_OF_PEDIT_KEYS(RTE_ETHER_ADDR_LEN);
flags |= MLX5_FLOW_ACTION_SET_MAC_DST;
break;
default:
case RTE_FLOW_ITEM_TYPE_PORT_ID:
break;
case RTE_FLOW_ITEM_TYPE_ETH:
- size += SZ_NLATTR_DATA_OF(ETHER_ADDR_LEN) * 4;
+ size += SZ_NLATTR_DATA_OF(RTE_ETHER_ADDR_LEN) * 4;
/* dst/src MAC addr and mask. */
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
}
if (!rte_is_zero_ether_addr(&mask.eth->dst)) {
mnl_attr_put(nlh, TCA_FLOWER_KEY_ETH_DST,
- ETHER_ADDR_LEN,
+ RTE_ETHER_ADDR_LEN,
spec.eth->dst.addr_bytes);
mnl_attr_put(nlh, TCA_FLOWER_KEY_ETH_DST_MASK,
- ETHER_ADDR_LEN,
+ RTE_ETHER_ADDR_LEN,
mask.eth->dst.addr_bytes);
}
if (!rte_is_zero_ether_addr(&mask.eth->src)) {
mnl_attr_put(nlh, TCA_FLOWER_KEY_ETH_SRC,
- ETHER_ADDR_LEN,
+ RTE_ETHER_ADDR_LEN,
spec.eth->src.addr_bytes);
mnl_attr_put(nlh, TCA_FLOWER_KEY_ETH_SRC_MASK,
- ETHER_ADDR_LEN,
+ RTE_ETHER_ADDR_LEN,
mask.eth->src.addr_bytes);
}
assert(dev_flow->tcf.nlsize >= nlh->nlmsg_len);
/* Neigh rule with permanent attribute found. */
size = MNL_ALIGN(sizeof(struct nlmsghdr)) +
MNL_ALIGN(sizeof(struct ndmsg)) +
- SZ_NLATTR_DATA_OF(ETHER_ADDR_LEN) +
+ SZ_NLATTR_DATA_OF(RTE_ETHER_ADDR_LEN) +
(family == AF_INET6 ? SZ_NLATTR_DATA_OF(IPV6_ADDR_LEN)
: SZ_NLATTR_TYPE_OF(uint32_t));
cmd = flow_tcf_alloc_nlcmd(ctx, size);
mnl_attr_put(cmd, NDA_DST, IPV6_ADDR_LEN,
mnl_attr_get_payload(na_ip));
}
- mnl_attr_put(cmd, NDA_LLADDR, ETHER_ADDR_LEN,
+ mnl_attr_put(cmd, NDA_LLADDR, RTE_ETHER_ADDR_LEN,
mnl_attr_get_payload(na_mac));
assert(size == cmd->nlmsg_len);
return 1;
if (spec) {
unsigned int i;
- memcpy(ð.val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
- memcpy(ð.val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
+ memcpy(ð.val.dst_mac, spec->dst.addr_bytes,
+ RTE_ETHER_ADDR_LEN);
+ memcpy(ð.val.src_mac, spec->src.addr_bytes,
+ RTE_ETHER_ADDR_LEN);
eth.val.ether_type = spec->type;
- memcpy(ð.mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
- memcpy(ð.mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);
+ memcpy(ð.mask.dst_mac, mask->dst.addr_bytes,
+ RTE_ETHER_ADDR_LEN);
+ memcpy(ð.mask.src_mac, mask->src.addr_bytes,
+ RTE_ETHER_ADDR_LEN);
eth.mask.ether_type = mask->type;
/* Remove unwanted bits from values. */
- for (i = 0; i < ETHER_ADDR_LEN; ++i) {
+ for (i = 0; i < RTE_ETHER_ADDR_LEN; ++i) {
eth.val.dst_mac[i] &= eth.mask.dst_mac[i];
eth.val.src_mac[i] &= eth.mask.src_mac[i];
}
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
-mlx5_get_mac(struct rte_eth_dev *dev, uint8_t (*mac)[ETHER_ADDR_LEN])
+mlx5_get_mac(struct rte_eth_dev *dev, uint8_t (*mac)[RTE_ETHER_ADDR_LEN])
{
struct ifreq request;
int ret;
ret = mlx5_ifreq(dev, SIOCGIFHWADDR, &request);
if (ret)
return ret;
- memcpy(mac, request.ifr_hwaddr.sa_data, ETHER_ADDR_LEN);
+ memcpy(mac, request.ifr_hwaddr.sa_data, RTE_ETHER_ADDR_LEN);
return 0;
}
DRV_LOG(DEBUG, "bridge MAC address %s", m);
#endif
memcpy(&(*data->mac)[data->mac_n++],
- RTA_DATA(attribute), ETHER_ADDR_LEN);
+ RTA_DATA(attribute), RTE_ETHER_ADDR_LEN);
}
}
return 0;
struct nlmsghdr hdr;
struct ndmsg ndm;
struct rtattr rta;
- uint8_t buffer[ETHER_ADDR_LEN];
+ uint8_t buffer[RTE_ETHER_ADDR_LEN];
} req = {
.hdr = {
.nlmsg_len = NLMSG_LENGTH(sizeof(struct ndmsg)),
},
.rta = {
.rta_type = NDA_LLADDR,
- .rta_len = RTA_LENGTH(ETHER_ADDR_LEN),
+ .rta_len = RTA_LENGTH(RTE_ETHER_ADDR_LEN),
},
};
int fd;
if (priv->nl_socket_route == -1)
return 0;
fd = priv->nl_socket_route;
- memcpy(RTA_DATA(&req.rta), mac, ETHER_ADDR_LEN);
+ memcpy(RTA_DATA(&req.rta), mac, RTE_ETHER_ADDR_LEN);
req.hdr.nlmsg_len = NLMSG_ALIGN(req.hdr.nlmsg_len) +
RTA_ALIGN(req.rta.rta_len);
ret = mlx5_nl_send(fd, &req.hdr, sn);
if (buf->ol_flags & PKT_TX_VLAN_PKT) {
uint32_t vlan = rte_cpu_to_be_32(0x81000000 |
buf->vlan_tci);
- unsigned int len = 2 * ETHER_ADDR_LEN - 2;
+ unsigned int len = 2 * RTE_ETHER_ADDR_LEN - 2;
addr += 2;
length -= 2;
mcqe->rx_hash_result);
rxq_cq_to_mbuf(rxq, pkt, cqe, rss_hash_res);
if (rxq->crc_present)
- len -= ETHER_CRC_LEN;
+ len -= RTE_ETHER_CRC_LEN;
PKT_LEN(pkt) = len;
}
DATA_LEN(rep) = DATA_LEN(seg);
len = (byte_cnt & MLX5_MPRQ_LEN_MASK) >> MLX5_MPRQ_LEN_SHIFT;
assert((int)len >= (rxq->crc_present << 2));
if (rxq->crc_present)
- len -= ETHER_CRC_LEN;
+ len -= RTE_ETHER_CRC_LEN;
offset = strd_idx * strd_sz + strd_shift;
addr = RTE_PTR_ADD(mlx5_mprq_buf_addr(buf), offset);
/* Initialize the offload flag. */
};
/* Restore the compressed count. Must be 16 bits. */
const uint16_t mcqe_n = t_pkt->data_len +
- (rxq->crc_present * ETHER_CRC_LEN);
+ (rxq->crc_present * RTE_ETHER_CRC_LEN);
const uint64x2_t rearm =
vld1q_u64((void *)&t_pkt->rearm_data);
const uint32x4_t rxdf_mask = {
vreinterpretq_u8_u32(rxdf_mask));
const uint16x8_t crc_adj = {
0, 0,
- rxq->crc_present * ETHER_CRC_LEN, 0,
- rxq->crc_present * ETHER_CRC_LEN, 0,
+ rxq->crc_present * RTE_ETHER_CRC_LEN, 0,
+ rxq->crc_present * RTE_ETHER_CRC_LEN, 0,
0, 0
};
const uint32_t flow_tag = t_pkt->hash.fdir.hi;
12, 13, 14, -1 /* 1st CQE */
};
const uint16x8_t crc_adj = {
- 0, 0, rxq->crc_present * ETHER_CRC_LEN, 0, 0, 0, 0, 0
+ 0, 0, rxq->crc_present * RTE_ETHER_CRC_LEN, 0, 0, 0, 0, 0
};
const uint32x4_t flow_mark_adj = { 0, 0, 0, rxq->mark * (-1) };
-1, -1, -1, -1 /* skip packet_type */);
/* Restore the compressed count. Must be 16 bits. */
const uint16_t mcqe_n = t_pkt->data_len +
- (rxq->crc_present * ETHER_CRC_LEN);
+ (rxq->crc_present * RTE_ETHER_CRC_LEN);
const __m128i rearm =
_mm_loadu_si128((__m128i *)&t_pkt->rearm_data);
const __m128i rxdf =
_mm_loadu_si128((__m128i *)&t_pkt->rx_descriptor_fields1);
const __m128i crc_adj =
_mm_set_epi16(0, 0, 0,
- rxq->crc_present * ETHER_CRC_LEN,
+ rxq->crc_present * RTE_ETHER_CRC_LEN,
0,
- rxq->crc_present * ETHER_CRC_LEN,
+ rxq->crc_present * RTE_ETHER_CRC_LEN,
0, 0);
const uint32_t flow_tag = t_pkt->hash.fdir.hi;
#ifdef MLX5_PMD_SOFT_COUNTERS
const __m128i ones = _mm_cmpeq_epi32(zero, zero);
const __m128i crc_adj =
_mm_set_epi16(0, 0, 0, 0, 0,
- rxq->crc_present * ETHER_CRC_LEN,
+ rxq->crc_present * RTE_ETHER_CRC_LEN,
0,
- rxq->crc_present * ETHER_CRC_LEN);
+ rxq->crc_present * RTE_ETHER_CRC_LEN);
const __m128i flow_mark_adj = _mm_set_epi32(rxq->mark * (-1), 0, 0, 0);
assert(rxq->sges_n == 0);
continue;
memcpy(&unicast.dst.addr_bytes,
mac->addr_bytes,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
for (j = 0; j != vlan_filter_n; ++j) {
uint16_t vlan = priv->vlan_filter[j];
mbuf_data_size, mtu, mru);
}
- if (mtu < ETHER_MIN_MTU || mru > MVNETA_PKT_SIZE_MAX) {
+ if (mtu < RTE_ETHER_MIN_MTU || mru > MVNETA_PKT_SIZE_MAX) {
MVNETA_LOG(ERR, "Invalid MTU [%u] or MRU [%u]", mtu, mru);
return -EINVAL;
}
mvneta_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
{
struct mvneta_priv *priv = dev->data->dev_private;
- char buf[ETHER_ADDR_FMT_SIZE];
+ char buf[RTE_ETHER_ADDR_FMT_SIZE];
int ret;
if (!priv->ppio)
uint32_t index, uint32_t vmdq __rte_unused)
{
struct mvneta_priv *priv = dev->data->dev_private;
- char buf[ETHER_ADDR_FMT_SIZE];
+ char buf[RTE_ETHER_ADDR_FMT_SIZE];
int ret;
if (index == 0)
ret = neta_ppio_set_mac_addr(priv->ppio, mac_addr->addr_bytes);
if (ret) {
- char buf[ETHER_ADDR_FMT_SIZE];
+ char buf[RTE_ETHER_ADDR_FMT_SIZE];
rte_ether_format_addr(buf, sizeof(buf), mac_addr);
MVNETA_LOG(ERR, "Failed to set mac to %s", buf);
}
eth_dev->data->mac_addrs =
rte_zmalloc("mac_addrs",
- ETHER_ADDR_LEN * MVNETA_MAC_ADDRS_MAX, 0);
+ RTE_ETHER_ADDR_LEN * MVNETA_MAC_ADDRS_MAX, 0);
if (!eth_dev->data->mac_addrs) {
MVNETA_LOG(ERR, "Failed to allocate space for eth addrs");
ret = -ENOMEM;
goto out_free;
memcpy(eth_dev->data->mac_addrs[0].addr_bytes,
- req.ifr_addr.sa_data, ETHER_ADDR_LEN);
+ req.ifr_addr.sa_data, RTE_ETHER_ADDR_LEN);
eth_dev->data->kdrv = RTE_KDRV_NONE;
eth_dev->device = &vdev->device;
/** Rx queue descriptors alignment in B */
#define MRVL_NETA_RXD_ALIGN 32
-#define MRVL_NETA_VLAN_TAG_LEN 4
-#define MRVL_NETA_ETH_HDRS_LEN (ETHER_HDR_LEN + ETHER_CRC_LEN + \
- MRVL_NETA_VLAN_TAG_LEN)
+#define MRVL_NETA_VLAN_TAG_LEN 4
+#define MRVL_NETA_ETH_HDRS_LEN (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + \
+ MRVL_NETA_VLAN_TAG_LEN)
#define MRVL_NETA_HDRS_LEN (MV_MH_SIZE + MRVL_NETA_ETH_HDRS_LEN)
#define MRVL_NETA_MTU_TO_MRU(mtu) ((mtu) + MRVL_NETA_HDRS_LEN)
mbuf_data_size, mtu, mru);
}
- if (mtu < ETHER_MIN_MTU || mru > MRVL_PKT_SIZE_MAX) {
+ if (mtu < RTE_ETHER_MIN_MTU || mru > MRVL_PKT_SIZE_MAX) {
MRVL_LOG(ERR, "Invalid MTU [%u] or MRU [%u]", mtu, mru);
return -EINVAL;
}
mrvl_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
{
struct mrvl_priv *priv = dev->data->dev_private;
- char buf[ETHER_ADDR_FMT_SIZE];
+ char buf[RTE_ETHER_ADDR_FMT_SIZE];
int ret;
if (!priv->ppio)
uint32_t index, uint32_t vmdq __rte_unused)
{
struct mrvl_priv *priv = dev->data->dev_private;
- char buf[ETHER_ADDR_FMT_SIZE];
+ char buf[RTE_ETHER_ADDR_FMT_SIZE];
int ret;
if (priv->isolated)
ret = pp2_ppio_set_mac_addr(priv->ppio, mac_addr->addr_bytes);
if (ret) {
- char buf[ETHER_ADDR_FMT_SIZE];
+ char buf[RTE_ETHER_ADDR_FMT_SIZE];
rte_ether_format_addr(buf, sizeof(buf), mac_addr);
MRVL_LOG(ERR, "Failed to set mac to %s", buf);
}
eth_dev->data->mac_addrs =
rte_zmalloc("mac_addrs",
- ETHER_ADDR_LEN * MRVL_MAC_ADDRS_MAX, 0);
+ RTE_ETHER_ADDR_LEN * MRVL_MAC_ADDRS_MAX, 0);
if (!eth_dev->data->mac_addrs) {
MRVL_LOG(ERR, "Failed to allocate space for eth addrs");
ret = -ENOMEM;
goto out_free;
memcpy(eth_dev->data->mac_addrs[0].addr_bytes,
- req.ifr_addr.sa_data, ETHER_ADDR_LEN);
+ req.ifr_addr.sa_data, RTE_ETHER_ADDR_LEN);
eth_dev->data->kdrv = RTE_KDRV_NONE;
eth_dev->device = &vdev->device;
/** Minimum number of sent buffers to release from shadow queue to BM */
#define MRVL_PP2_BUF_RELEASE_BURST_SIZE 64
-#define MRVL_PP2_VLAN_TAG_LEN 4
-#define MRVL_PP2_ETH_HDRS_LEN (ETHER_HDR_LEN + ETHER_CRC_LEN + \
- (2 * MRVL_PP2_VLAN_TAG_LEN))
+#define MRVL_PP2_VLAN_TAG_LEN 4
+#define MRVL_PP2_ETH_HDRS_LEN (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + \
+ (2 * MRVL_PP2_VLAN_TAG_LEN))
#define MRVL_PP2_HDRS_LEN (MV_MH_SIZE + MRVL_PP2_ETH_HDRS_LEN)
#define MRVL_PP2_MTU_TO_MRU(mtu) ((mtu) + MRVL_PP2_HDRS_LEN)
#define MRVL_PP2_MRU_TO_MTU(mru) ((mru) - MRVL_PP2_HDRS_LEN)
if (!hv->primary)
return -ENOMEM;
- err = hn_attach(hv, ETHER_MTU);
+ err = hn_attach(hv, RTE_ETHER_MTU);
if (err)
goto failed;
memset(&conf, 0, sizeof(conf));
conf.type = NVS_TYPE_NDIS_CONF;
- conf.mtu = mtu + ETHER_HDR_LEN;
+ conf.mtu = mtu + RTE_ETHER_HDR_LEN;
conf.caps = NVS_NDIS_CONF_VLAN;
/* enable SRIOV */
uint32_t eaddr_len;
int error;
- eaddr_len = ETHER_ADDR_LEN;
+ eaddr_len = RTE_ETHER_ADDR_LEN;
error = hn_rndis_query(hv, OID_802_3_PERMANENT_ADDRESS, NULL, 0,
eaddr, eaddr_len);
if (error)
/* Minimum space required for a packet */
#define HN_PKTSIZE_MIN(align) \
- RTE_ALIGN(ETHER_MIN_LEN + HN_RNDIS_PKT_LEN, align)
+ RTE_ALIGN(RTE_ETHER_MIN_LEN + HN_RNDIS_PKT_LEN, align)
#define DEFAULT_TX_FREE_THRESH 32U
if (unlikely(data_off + data_len > pkt->len))
goto error;
- if (unlikely(data_len < ETHER_HDR_LEN))
+ if (unlikely(data_len < RTE_ETHER_HDR_LEN))
goto error;
hn_rxpkt(rxq, rxb, data, data_off, data_len, &info);
if (!rte_is_valid_assigned_ether_addr(mac_addr))
return -EINVAL;
- for (i = 0; i < ETHER_ADDR_LEN; i++) {
+ for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) {
mac <<= 8;
mac |= mac_addr->addr_bytes[i] & 0xFF;
}
dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;
dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;
- dev_info->min_rx_bufsize = ETHER_MIN_MTU;
+ dev_info->min_rx_bufsize = RTE_ETHER_MIN_MTU;
dev_info->max_rx_pktlen = hw->max_mtu;
/* Next should change when PF support is implemented */
dev_info->max_mac_addrs = 1;
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
/* check that mtu is within the allowed range */
- if ((mtu < ETHER_MIN_MTU) || ((uint32_t)mtu > hw->max_mtu))
+ if (mtu < RTE_ETHER_MIN_MTU || (uint32_t)mtu > hw->max_mtu)
return -EINVAL;
/* mtu setting is forbidden if port is started */
}
/* switch to jumbo mode if needed */
- if ((uint32_t)mtu > ETHER_MAX_LEN)
+ if ((uint32_t)mtu > RTE_ETHER_MAX_LEN)
dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
else
dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
hw->ver = nn_cfg_readl(hw, NFP_NET_CFG_VERSION);
hw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP);
hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU);
- hw->mtu = ETHER_MTU;
+ hw->mtu = RTE_ETHER_MTU;
/* VLAN insertion is incompatible with LSOv2 */
if (hw->cap & NFP_NET_CFG_CTRL_LSO2)
rte_spinlock_init(&hw->reconfig_lock);
/* Allocating memory for mac addr */
- eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", ETHER_ADDR_LEN, 0);
+ eth_dev->data->mac_addrs = rte_zmalloc("mac_addr",
+ RTE_ETHER_ADDR_LEN, 0);
if (eth_dev->data->mac_addrs == NULL) {
PMD_INIT_LOG(ERR, "Failed to space for MAC address");
err = -ENOMEM;
#endif
#endif
- uint8_t mac_addr[ETHER_ADDR_LEN];
+ uint8_t mac_addr[RTE_ETHER_ADDR_LEN];
/* Records starting point for counters */
struct rte_eth_stats eth_stats_base;
nic->mcast_mode = bgx_port_conf.mcast_mode;
nic->speed = bgx_port_conf.mode;
- memcpy(&nic->mac_addr[0], &bgx_port_conf.macaddr[0], ETHER_ADDR_LEN);
+ memcpy(&nic->mac_addr[0], &bgx_port_conf.macaddr[0],
+ RTE_ETHER_ADDR_LEN);
octeontx_log_dbg("port opened %d", nic->port_id);
return res;
data->all_multicast = 0;
data->scattered_rx = 0;
- data->mac_addrs = rte_zmalloc_socket(octtx_name, ETHER_ADDR_LEN, 0,
+ data->mac_addrs = rte_zmalloc_socket(octtx_name, RTE_ETHER_ADDR_LEN, 0,
socket_id);
if (data->mac_addrs == NULL) {
octeontx_log_err("failed to allocate memory for mac_addrs");
}
/* Update port_id mac to eth_dev */
- memcpy(data->mac_addrs, nic->mac_addr, ETHER_ADDR_LEN);
+ memcpy(data->mac_addrs, nic->mac_addr, RTE_ETHER_ADDR_LEN);
PMD_INIT_LOG(DEBUG, "ethdev info: ");
PMD_INIT_LOG(DEBUG, "port %d, port_ena %d ochan %d num_ochan %d tx_q %d",
uint8_t duplex;
uint8_t speed;
uint16_t mtu;
- uint8_t mac_addr[ETHER_ADDR_LEN];
+ uint8_t mac_addr[RTE_ETHER_ADDR_LEN];
/* Rx port parameters */
struct {
bool classifier_enable;
#include <rte_string_fns.h>
#define RTE_ETH_PCAP_SNAPSHOT_LEN 65535
-#define RTE_ETH_PCAP_SNAPLEN ETHER_MAX_JUMBO_FRAME_LEN
+#define RTE_ETH_PCAP_SNAPLEN RTE_ETHER_MAX_JUMBO_FRAME_LEN
#define RTE_ETH_PCAP_PROMISC 1
#define RTE_ETH_PCAP_TIMEOUT -1
pcap_dump((u_char *)dumper, &header,
rte_pktmbuf_mtod(mbuf, void*));
} else {
- if (mbuf->pkt_len <= ETHER_MAX_JUMBO_FRAME_LEN) {
+ if (mbuf->pkt_len <= RTE_ETHER_MAX_JUMBO_FRAME_LEN) {
eth_pcap_gather_data(tx_pcap_data, mbuf);
pcap_dump((u_char *)dumper, &header,
tx_pcap_data);
PMD_LOG(ERR,
"Dropping PCAP packet. Size (%d) > max jumbo size (%d).",
mbuf->pkt_len,
- ETHER_MAX_JUMBO_FRAME_LEN);
+ RTE_ETHER_MAX_JUMBO_FRAME_LEN);
rte_pktmbuf_free(mbuf);
break;
rte_pktmbuf_mtod(mbuf, u_char *),
mbuf->pkt_len);
} else {
- if (mbuf->pkt_len <= ETHER_MAX_JUMBO_FRAME_LEN) {
+ if (mbuf->pkt_len <= RTE_ETHER_MAX_JUMBO_FRAME_LEN) {
eth_pcap_gather_data(tx_pcap_data, mbuf);
ret = pcap_sendpacket(pcap,
tx_pcap_data, mbuf->pkt_len);
PMD_LOG(ERR,
"Dropping PCAP packet. Size (%d) > max jumbo size (%d).",
mbuf->pkt_len,
- ETHER_MAX_JUMBO_FRAME_LEN);
+ RTE_ETHER_MAX_JUMBO_FRAME_LEN);
rte_pktmbuf_free(mbuf);
break;
return -1;
}
- mac_addrs = rte_zmalloc_socket(NULL, ETHER_ADDR_LEN, 0, numa_node);
+ mac_addrs = rte_zmalloc_socket(NULL, RTE_ETHER_ADDR_LEN, 0, numa_node);
if (!mac_addrs) {
close(if_fd);
return -1;
PMD_LOG(INFO, "Setting phy MAC for %s", if_name);
eth_dev->data->mac_addrs = mac_addrs;
rte_memcpy(eth_dev->data->mac_addrs[0].addr_bytes,
- ifr.ifr_hwaddr.sa_data, ETHER_ADDR_LEN);
+ ifr.ifr_hwaddr.sa_data, RTE_ETHER_ADDR_LEN);
close(if_fd);
ifm = (struct if_msghdr *)buf;
sdl = (struct sockaddr_dl *)(ifm + 1);
- mac_addrs = rte_zmalloc_socket(NULL, ETHER_ADDR_LEN, 0, numa_node);
+ mac_addrs = rte_zmalloc_socket(NULL, RTE_ETHER_ADDR_LEN, 0, numa_node);
if (!mac_addrs) {
rte_free(buf);
return -1;
PMD_LOG(INFO, "Setting phy MAC for %s", if_name);
eth_dev->data->mac_addrs = mac_addrs;
rte_memcpy(eth_dev->data->mac_addrs[0].addr_bytes,
- LLADDR(sdl), ETHER_ADDR_LEN);
+ LLADDR(sdl), RTE_ETHER_ADDR_LEN);
rte_free(buf);
qede_find_first_zero_bit(bitmap, length)
#define OSAL_BUILD_BUG_ON(cond) nothing
-#define ETH_ALEN ETHER_ADDR_LEN
+#define ETH_ALEN RTE_ETHER_ADDR_LEN
+#define ETHER_TYPE_VLAN RTE_ETHER_TYPE_VLAN
+#define ETHER_TYPE_QINQ RTE_ETHER_TYPE_QINQ
#define OSAL_BITMAP_WEIGHT(bitmap, count) 0
if (add) {
SLIST_FOREACH(tmp, &qdev->uc_list_head, list) {
if ((memcmp(mac_addr, &tmp->mac,
- ETHER_ADDR_LEN) == 0) &&
+ RTE_ETHER_ADDR_LEN) == 0) &&
ucast->vni == tmp->vni &&
ucast->vlan == tmp->vlan) {
DP_INFO(edev, "Unicast MAC is already added"
} else {
SLIST_FOREACH(tmp, &qdev->uc_list_head, list) {
if ((memcmp(mac_addr, &tmp->mac,
- ETHER_ADDR_LEN) == 0) &&
+ RTE_ETHER_ADDR_LEN) == 0) &&
ucast->vlan == tmp->vlan &&
ucast->vni == tmp->vni)
break;
if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
eth_dev->data->mtu =
eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
- ETHER_HDR_LEN - QEDE_ETH_OVERHEAD;
+ RTE_ETHER_HDR_LEN - QEDE_ETH_OVERHEAD;
if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER)
eth_dev->data->scattered_rx = 1;
qede_dev_info_get(dev, &dev_info);
max_rx_pkt_len = mtu + QEDE_MAX_ETHER_HDR_LEN;
frame_size = max_rx_pkt_len;
- if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) {
+ if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen) {
DP_ERR(edev, "MTU %u out of range, %u is maximum allowable\n",
- mtu, dev_info.max_rx_pktlen - ETHER_HDR_LEN -
+ mtu, dev_info.max_rx_pktlen - RTE_ETHER_HDR_LEN -
QEDE_ETH_OVERHEAD);
return -EINVAL;
}
fp->rxq->rx_buf_size = rc;
}
}
- if (max_rx_pkt_len > ETHER_MAX_LEN)
+ if (max_rx_pkt_len > RTE_ETHER_MAX_LEN)
dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
else
dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
struct qed_slowpath_params params;
static bool do_once = true;
uint8_t bulletin_change;
- uint8_t vf_mac[ETHER_ADDR_LEN];
+ uint8_t vf_mac[RTE_ETHER_ADDR_LEN];
uint8_t is_mac_forced;
bool is_mac_exist;
/* Fix up ecore debug level */
/* Allocate memory for storing MAC addr */
eth_dev->data->mac_addrs = rte_zmalloc(edev->name,
- (ETHER_ADDR_LEN *
+ (RTE_ETHER_ADDR_LEN *
adapter->dev_info.num_mac_filters),
RTE_CACHE_LINE_SIZE);
SLIST_INIT(&adapter->vlan_list_head);
SLIST_INIT(&adapter->uc_list_head);
SLIST_INIT(&adapter->mc_list_head);
- adapter->mtu = ETHER_MTU;
+ adapter->mtu = RTE_ETHER_MTU;
adapter->vport_started = false;
/* VF tunnel offloads is enabled by default in PF driver */
case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
/* fill the common ip header */
- arfs->tuple.eth_proto = ETHER_TYPE_IPv4;
+ arfs->tuple.eth_proto = RTE_ETHER_TYPE_IPv4;
arfs->tuple.dst_ipv4 = input->flow.ip4_flow.dst_ip;
arfs->tuple.src_ipv4 = input->flow.ip4_flow.src_ip;
arfs->tuple.ip_proto = next_proto[input->flow_type];
break;
case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
- arfs->tuple.eth_proto = ETHER_TYPE_IPv6;
+ arfs->tuple.eth_proto = RTE_ETHER_TYPE_IPv6;
arfs->tuple.ip_proto = next_proto[input->flow_type];
rte_memcpy(arfs->tuple.dst_ipv6,
&input->flow.ipv6_flow.dst_ip,
*ether_type = rte_cpu_to_be_16(arfs->tuple.eth_proto);
switch (arfs->tuple.eth_proto) {
- case ETHER_TYPE_IPv4:
+ case RTE_ETHER_TYPE_IPv4:
ip = (struct ipv4_hdr *)raw_pkt;
ip->version_ihl = QEDE_FDIR_IP_DEFAULT_VERSION_IHL;
ip->total_length = sizeof(struct ipv4_hdr);
params->tcp = true;
}
break;
- case ETHER_TYPE_IPv6:
+ case RTE_ETHER_TYPE_IPv6:
ip6 = (struct ipv6_hdr *)raw_pkt;
ip6->proto = arfs->tuple.ip_proto;
ip6->vtc_flow =
break;
case ECORE_FILTER_MAC:
memcpy(ucast->mac, conf->outer_mac.addr_bytes,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
break;
case ECORE_FILTER_INNER_MAC:
memcpy(ucast->mac, conf->inner_mac.addr_bytes,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
break;
case ECORE_FILTER_MAC_VNI_PAIR:
memcpy(ucast->mac, conf->outer_mac.addr_bytes,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
ucast->vni = conf->tenant_id;
break;
case ECORE_FILTER_INNER_MAC_VNI_PAIR:
memcpy(ucast->mac, conf->inner_mac.addr_bytes,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
ucast->vni = conf->tenant_id;
break;
case ECORE_FILTER_INNER_PAIR:
memcpy(ucast->mac, conf->inner_mac.addr_bytes,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
ucast->vlan = conf->inner_vlan;
break;
default:
spec = pattern->spec;
flow->entry.tuple.src_ipv4 = spec->hdr.src_addr;
flow->entry.tuple.dst_ipv4 = spec->hdr.dst_addr;
- flow->entry.tuple.eth_proto = ETHER_TYPE_IPv4;
+ flow->entry.tuple.eth_proto =
+ RTE_ETHER_TYPE_IPv4;
}
break;
rte_memcpy(flow->entry.tuple.dst_ipv6,
spec->hdr.dst_addr,
IPV6_ADDR_LEN);
- flow->entry.tuple.eth_proto = ETHER_TYPE_IPv6;
+ flow->entry.tuple.eth_proto =
+ RTE_ETHER_TYPE_IPv6;
}
break;
struct qed_dev_info {
uint8_t num_hwfns;
- uint8_t hw_mac[ETHER_ADDR_LEN];
+ uint8_t hw_mac[RTE_ETHER_ADDR_LEN];
bool is_mf_default;
/* FW version */
dev_info->dev_type = edev->type;
rte_memcpy(&dev_info->hw_mac, &edev->hwfns[0].hw_info.hw_mac_addr,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
dev_info->fw_major = FW_MAJOR_VERSION;
dev_info->fw_minor = FW_MINOR_VERSION;
max_vf_vlan_filters;
rte_memcpy(&info->port_mac, &edev->hwfns[0].hw_info.hw_mac_addr,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
} else {
ecore_vf_get_num_rxqs(ECORE_LEADING_HWFN(edev),
&info->num_queues);
qed_fill_dev_info(edev, &info->common);
if (IS_VF(edev))
- memset(&info->common.hw_mac, 0, ETHER_ADDR_LEN);
+ memset(&info->common.hw_mac, 0, RTE_ETHER_ADDR_LEN);
return 0;
}
ethertype = rte_cpu_to_be_16(eth_hdr->ether_type);
/* Note: Valid only if VLAN stripping is disabled */
- if (ethertype == ETHER_TYPE_VLAN) {
+ if (ethertype == RTE_ETHER_TYPE_VLAN) {
vlan_tagged = 1;
vlan_hdr = (struct rte_vlan_hdr *)(eth_hdr + 1);
len += sizeof(struct rte_vlan_hdr);
ethertype = rte_cpu_to_be_16(vlan_hdr->eth_proto);
}
- if (ethertype == ETHER_TYPE_IPv4) {
+ if (ethertype == RTE_ETHER_TYPE_IPv4) {
packet_type |= RTE_PTYPE_L3_IPV4;
ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, len);
if (ipv4_hdr->next_proto_id == IPPROTO_TCP)
packet_type |= RTE_PTYPE_L4_TCP;
else if (ipv4_hdr->next_proto_id == IPPROTO_UDP)
packet_type |= RTE_PTYPE_L4_UDP;
- } else if (ethertype == ETHER_TYPE_IPv6) {
+ } else if (ethertype == RTE_ETHER_TYPE_IPv6) {
packet_type |= RTE_PTYPE_L3_IPV6;
ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct ipv6_hdr *, len);
if (ipv6_hdr->proto == IPPROTO_TCP)
#define QEDE_ETH_OVERHEAD (((2 * QEDE_VLAN_TAG_SIZE)) \
+ (QEDE_LLC_SNAP_HDR_LEN) + 2)
-#define QEDE_MAX_ETHER_HDR_LEN (ETHER_HDR_LEN + QEDE_ETH_OVERHEAD)
+#define QEDE_MAX_ETHER_HDR_LEN (RTE_ETHER_HDR_LEN + QEDE_ETH_OVERHEAD)
#define QEDE_RSS_OFFLOAD_ALL (ETH_RSS_IPV4 |\
ETH_RSS_NONFRAG_IPV4_TCP |\
sfc_log_init(sa, "entry");
- dev_info->min_mtu = ETHER_MIN_MTU;
+ dev_info->min_mtu = RTE_ETHER_MIN_MTU;
dev_info->max_mtu = EFX_MAC_SDU_MAX;
dev_info->max_rx_pktlen = EFX_MAC_PDU_MAX;
* The driver does not use it, but other PMDs update jumbo frame
* flag and max_rx_pkt_len when MTU is set.
*/
- if (mtu > ETHER_MAX_LEN) {
+ if (mtu > RTE_ETHER_MAX_LEN) {
struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
rxmode->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
}
sfc_log_init(sa, "entry");
- dev->data->mac_addrs = rte_zmalloc("sfc", ETHER_ADDR_LEN, 0);
+ dev->data->mac_addrs = rte_zmalloc("sfc", RTE_ETHER_ADDR_LEN, 0);
if (dev->data->mac_addrs == NULL) {
rc = ENOMEM;
goto fail_mac_addrs;
return 0;
if (mask->protocol == supp_mask.protocol) {
- if (spec->protocol != rte_cpu_to_be_16(ETHER_TYPE_TEB)) {
+ if (spec->protocol != rte_cpu_to_be_16(RTE_ETHER_TYPE_TEB)) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"GENEVE encap. protocol must be Ethernet "
{
int i;
char *end;
- unsigned long o[ETHER_ADDR_LEN];
+ unsigned long o[RTE_ETHER_ADDR_LEN];
static struct rte_ether_addr ether_addr;
i = 0;
return NULL;
/* Support the format XX:XX:XX:XX:XX:XX */
- if (i == ETHER_ADDR_LEN) {
+ if (i == RTE_ETHER_ADDR_LEN) {
while (i-- != 0) {
if (o[i] > UINT8_MAX)
return NULL;
ether_addr.addr_bytes[i] = (uint8_t)o[i];
}
/* Support the format XXXX:XXXX:XXXX */
- } else if (i == ETHER_ADDR_LEN / 2) {
+ } else if (i == RTE_ETHER_ADDR_LEN / 2) {
while (i-- != 0) {
if (o[i] > UINT16_MAX)
return NULL;
char *buff_data = rte_pktmbuf_mtod(seg, void *);
proto = (*buff_data & 0xf0);
pi.proto = (proto == 0x40) ?
- rte_cpu_to_be_16(ETHER_TYPE_IPv4) :
+ rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4) :
((proto == 0x60) ?
- rte_cpu_to_be_16(ETHER_TYPE_IPv6) :
+ rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6) :
0x00);
}
return 0;
struct rte_mbuf *gso_mbufs[MAX_GSO_MBUFS];
- max_size = *txq->mtu + (ETHER_HDR_LEN + ETHER_CRC_LEN + 4);
+ max_size = *txq->mtu + (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + 4);
for (i = 0; i < nb_pkts; i++) {
struct rte_mbuf *mbuf_in = bufs[num_tx];
struct rte_mbuf **mbuf;
/* TCP segmentation implies TCP checksum offload */
mbuf_in->ol_flags |= PKT_TX_TCP_CKSUM;
- /* gso size is calculated without ETHER_CRC_LEN */
+ /* gso size is calculated without RTE_ETHER_CRC_LEN */
hdrs_len = mbuf_in->l2_len + mbuf_in->l3_len +
mbuf_in->l4_len;
tso_segsz = mbuf_in->tso_segsz + hdrs_len;
dev_info->if_index = internals->if_index;
dev_info->max_mac_addrs = 1;
- dev_info->max_rx_pktlen = (uint32_t)ETHER_MAX_VLAN_FRAME_LEN;
+ dev_info->max_rx_pktlen = (uint32_t)RTE_ETHER_MAX_VLAN_FRAME_LEN;
dev_info->max_rx_queues = RTE_PMD_TAP_MAX_QUEUES;
dev_info->max_tx_queues = RTE_PMD_TAP_MAX_QUEUES;
dev_info->min_rx_bufsize = 0;
mac_addr))
mode = LOCAL_AND_REMOTE;
ifr.ifr_hwaddr.sa_family = AF_LOCAL;
- rte_memcpy(ifr.ifr_hwaddr.sa_data, mac_addr, ETHER_ADDR_LEN);
+ rte_memcpy(ifr.ifr_hwaddr.sa_data, mac_addr, RTE_ETHER_ADDR_LEN);
ret = tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 1, mode);
if (ret < 0)
return ret;
- rte_memcpy(&pmd->eth_addr, mac_addr, ETHER_ADDR_LEN);
+ rte_memcpy(&pmd->eth_addr, mac_addr, RTE_ETHER_ADDR_LEN);
if (pmd->remote_if_index && !pmd->flow_isolate) {
/* Replace MAC redirection rule after a MAC change */
ret = tap_flow_implicit_destroy(pmd, TAP_REMOTE_LOCAL_MAC);
memset(&ifr, 0, sizeof(struct ifreq));
ifr.ifr_hwaddr.sa_family = AF_LOCAL;
rte_memcpy(ifr.ifr_hwaddr.sa_data, &pmd->eth_addr,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
if (tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 0, LOCAL_ONLY) < 0)
goto error_exit;
}
goto error_remote;
}
rte_memcpy(&pmd->eth_addr, ifr.ifr_hwaddr.sa_data,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
/* The desired MAC is already in ifreq after SIOCGIFHWADDR. */
if (tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 0, LOCAL_ONLY) < 0) {
TAP_LOG(ERR, "%s: failed to get %s MAC address.",
static int iface_idx;
/* fixed mac = 00:64:74:61:70:<iface_idx> */
- memcpy((char *)user_mac->addr_bytes, "\0dtap", ETHER_ADDR_LEN);
- user_mac->addr_bytes[ETHER_ADDR_LEN - 1] = iface_idx++ + '0';
+ memcpy((char *)user_mac->addr_bytes, "\0dtap",
+ RTE_ETHER_ADDR_LEN);
+ user_mac->addr_bytes[RTE_ETHER_ADDR_LEN - 1] =
+ iface_idx++ + '0';
goto success;
}
return 0;
msg = &flow->msg;
if (!rte_is_zero_ether_addr(&mask->dst)) {
- tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_ETH_DST, ETHER_ADDR_LEN,
+ tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_ETH_DST,
+ RTE_ETHER_ADDR_LEN,
&spec->dst.addr_bytes);
tap_nlattr_add(&msg->nh,
- TCA_FLOWER_KEY_ETH_DST_MASK, ETHER_ADDR_LEN,
+ TCA_FLOWER_KEY_ETH_DST_MASK, RTE_ETHER_ADDR_LEN,
&mask->dst.addr_bytes);
}
if (!rte_is_zero_ether_addr(&mask->src)) {
- tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_ETH_SRC, ETHER_ADDR_LEN,
- &spec->src.addr_bytes);
+ tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_ETH_SRC,
+ RTE_ETHER_ADDR_LEN,
+ &spec->src.addr_bytes);
tap_nlattr_add(&msg->nh,
- TCA_FLOWER_KEY_ETH_SRC_MASK, ETHER_ADDR_LEN,
+ TCA_FLOWER_KEY_ETH_SRC_MASK, RTE_ETHER_ADDR_LEN,
&mask->src.addr_bytes);
}
return 0;
/* Constants */
#include <rte_ether.h>
-#define NICVF_MAC_ADDR_SIZE ETHER_ADDR_LEN
+#define NICVF_MAC_ADDR_SIZE RTE_ETHER_ADDR_LEN
/* Ethernet */
-#define ether_addr_copy(x, y) memcpy(y, x, ETHER_ADDR_LEN)
+#define ether_addr_copy(x, y) memcpy(y, x, RTE_ETHER_ADDR_LEN)
#include <rte_io.h>
#define nicvf_addr_write(addr, val) rte_write64_relaxed((val), (void *)(addr))
(frame_size + 2 * VLAN_TAG_SIZE > buffsz * NIC_HW_MAX_SEGS))
return -EINVAL;
- if (frame_size > ETHER_MAX_LEN)
+ if (frame_size > RTE_ETHER_MAX_LEN)
rxmode->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
else
rxmode->offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
return -EINVAL;
/* Update max_rx_pkt_len */
- rxmode->max_rx_pkt_len = mtu + ETHER_HDR_LEN;
+ rxmode->max_rx_pkt_len = mtu + RTE_ETHER_HDR_LEN;
nic->mtu = mtu;
for (i = 0; i < nic->sqs_count; i++)
if (nicvf_hw_version(nic) != PCI_SUB_DEVICE_ID_CN81XX_NICVF)
dev_info->speed_capa |= ETH_LINK_SPEED_40G;
- dev_info->min_rx_bufsize = ETHER_MIN_MTU;
- dev_info->max_rx_pktlen = NIC_HW_MAX_MTU + ETHER_HDR_LEN;
+ dev_info->min_rx_bufsize = RTE_ETHER_MIN_MTU;
+ dev_info->max_rx_pktlen = NIC_HW_MAX_MTU + RTE_ETHER_HDR_LEN;
dev_info->max_rx_queues =
(uint16_t)MAX_RCV_QUEUES_PER_QS * (MAX_SQS_PER_VF + 1);
dev_info->max_tx_queues =
/* Setup MTU based on max_rx_pkt_len or default */
mtu = dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME ?
dev->data->dev_conf.rxmode.max_rx_pkt_len
- - ETHER_HDR_LEN : ETHER_MTU;
+ - RTE_ETHER_HDR_LEN : RTE_ETHER_MTU;
if (nicvf_dev_set_mtu(dev, mtu)) {
PMD_INIT_LOG(ERR, "Failed to set default mtu size");
return ENOTSUP;
}
- eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", ETHER_ADDR_LEN, 0);
+ eth_dev->data->mac_addrs = rte_zmalloc("mac_addr",
+ RTE_ETHER_ADDR_LEN, 0);
if (eth_dev->data->mac_addrs == NULL) {
PMD_INIT_LOG(ERR, "Failed to allocate memory for mac addr");
ret = -ENOMEM;
uint16_t mtu;
int skip_bytes;
bool vlan_filter_en;
- uint8_t mac_addr[ETHER_ADDR_LEN];
+ uint8_t mac_addr[RTE_ETHER_ADDR_LEN];
/* secondary queue set support */
uint8_t sqs_id;
uint8_t sqs_count;
virtio_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
{
struct virtio_hw *hw = dev->data->dev_private;
- uint32_t ether_hdr_len = ETHER_HDR_LEN + VLAN_TAG_LEN +
+ uint32_t ether_hdr_len = RTE_ETHER_HDR_LEN + VLAN_TAG_LEN +
hw->vtnet_hdr_size;
uint32_t frame_size = mtu + ether_hdr_len;
uint32_t max_frame_size = hw->max_mtu + ether_hdr_len;
max_frame_size = RTE_MIN(max_frame_size, VIRTIO_MAX_RX_PKTLEN);
- if (mtu < ETHER_MIN_MTU || frame_size > max_frame_size) {
+ if (mtu < RTE_ETHER_MIN_MTU || frame_size > max_frame_size) {
PMD_INIT_LOG(ERR, "MTU should be between %d and %d",
- ETHER_MIN_MTU, max_frame_size - ether_hdr_len);
+ RTE_ETHER_MIN_MTU, max_frame_size - ether_hdr_len);
return -EINVAL;
}
return 0;
{
vtpci_write_dev_config(hw,
offsetof(struct virtio_net_config, mac),
- &hw->mac_addr, ETHER_ADDR_LEN);
+ &hw->mac_addr, RTE_ETHER_ADDR_LEN);
}
static void
if (vtpci_with_feature(hw, VIRTIO_NET_F_MAC)) {
vtpci_read_dev_config(hw,
offsetof(struct virtio_net_config, mac),
- &hw->mac_addr, ETHER_ADDR_LEN);
+ &hw->mac_addr, RTE_ETHER_ADDR_LEN);
} else {
rte_eth_random_addr(&hw->mac_addr[0]);
virtio_set_hwaddr(hw);
ctrl.hdr.class = VIRTIO_NET_CTRL_MAC;
ctrl.hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
- len[0] = uc->entries * ETHER_ADDR_LEN + sizeof(uc->entries);
+ len[0] = uc->entries * RTE_ETHER_ADDR_LEN + sizeof(uc->entries);
memcpy(ctrl.data, uc, len[0]);
- len[1] = mc->entries * ETHER_ADDR_LEN + sizeof(mc->entries);
+ len[1] = mc->entries * RTE_ETHER_ADDR_LEN + sizeof(mc->entries);
memcpy(ctrl.data + len[0], mc, len[1]);
err = virtio_send_command(hw->cvq, &ctrl, len, 2);
return -EINVAL;
}
- uc = alloca(VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN + sizeof(uc->entries));
+ uc = alloca(VIRTIO_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN +
+ sizeof(uc->entries));
uc->entries = 0;
- mc = alloca(VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN + sizeof(mc->entries));
+ mc = alloca(VIRTIO_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN +
+ sizeof(mc->entries));
mc->entries = 0;
for (i = 0; i < VIRTIO_MAX_MAC_ADDRS; i++) {
struct virtio_net_ctrl_mac *tbl
= rte_is_multicast_ether_addr(addr) ? mc : uc;
- memcpy(&tbl->macs[tbl->entries++], addr, ETHER_ADDR_LEN);
+ memcpy(&tbl->macs[tbl->entries++], addr, RTE_ETHER_ADDR_LEN);
}
return virtio_mac_table_set(hw, uc, mc);
return;
}
- uc = alloca(VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN + sizeof(uc->entries));
+ uc = alloca(VIRTIO_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN +
+ sizeof(uc->entries));
uc->entries = 0;
- mc = alloca(VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN + sizeof(mc->entries));
+ mc = alloca(VIRTIO_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN +
+ sizeof(mc->entries));
mc->entries = 0;
for (i = 0; i < VIRTIO_MAX_MAC_ADDRS; i++) {
continue;
tbl = rte_is_multicast_ether_addr(addrs + i) ? mc : uc;
- memcpy(&tbl->macs[tbl->entries++], addrs + i, ETHER_ADDR_LEN);
+ memcpy(&tbl->macs[tbl->entries++], addrs + i,
+ RTE_ETHER_ADDR_LEN);
}
virtio_mac_table_set(hw, uc, mc);
{
struct virtio_hw *hw = dev->data->dev_private;
- memcpy(hw->mac_addr, mac_addr, ETHER_ADDR_LEN);
+ memcpy(hw->mac_addr, mac_addr, RTE_ETHER_ADDR_LEN);
/* Use atomic update if available */
if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
struct virtio_pmd_ctrl ctrl;
- int len = ETHER_ADDR_LEN;
+ int len = RTE_ETHER_ADDR_LEN;
ctrl.hdr.class = VIRTIO_NET_CTRL_MAC;
ctrl.hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET;
- memcpy(ctrl.data, mac_addr, ETHER_ADDR_LEN);
+ memcpy(ctrl.data, mac_addr, RTE_ETHER_ADDR_LEN);
return virtio_send_command(hw->cvq, &ctrl, &len, 1);
}
offsetof(struct virtio_net_config, mtu),
&config.mtu, sizeof(config.mtu));
- if (config.mtu < ETHER_MIN_MTU)
+ if (config.mtu < RTE_ETHER_MIN_MTU)
req_features &= ~(1ULL << VIRTIO_NET_F_MTU);
}
* time, but check again in case it has changed since
* then, which should not happen.
*/
- if (config->mtu < ETHER_MIN_MTU) {
+ if (config->mtu < RTE_ETHER_MIN_MTU) {
PMD_INIT_LOG(ERR, "invalid max MTU value (%u)",
config->mtu);
return -1;
eth_dev->data->mtu = config->mtu;
} else {
- hw->max_mtu = VIRTIO_MAX_RX_PKTLEN - ETHER_HDR_LEN -
+ hw->max_mtu = VIRTIO_MAX_RX_PKTLEN - RTE_ETHER_HDR_LEN -
VLAN_TAG_LEN - hw->vtnet_hdr_size;
}
} else {
PMD_INIT_LOG(DEBUG, "config->max_virtqueue_pairs=1");
hw->max_queue_pairs = 1;
- hw->max_mtu = VIRTIO_MAX_RX_PKTLEN - ETHER_HDR_LEN -
+ hw->max_mtu = VIRTIO_MAX_RX_PKTLEN - RTE_ETHER_HDR_LEN -
VLAN_TAG_LEN - hw->vtnet_hdr_size;
}
}
/* Allocate memory for storing MAC addresses */
- eth_dev->data->mac_addrs = rte_zmalloc("virtio", VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN, 0);
+ eth_dev->data->mac_addrs = rte_zmalloc("virtio",
+ VIRTIO_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN, 0);
if (eth_dev->data->mac_addrs == NULL) {
PMD_INIT_LOG(ERR,
"Failed to allocate %d bytes needed to store MAC addresses",
- VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN);
+ VIRTIO_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN);
return -ENOMEM;
}
const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
const struct rte_eth_txmode *txmode = &dev->data->dev_conf.txmode;
struct virtio_hw *hw = dev->data->dev_private;
- uint32_t ether_hdr_len = ETHER_HDR_LEN + VLAN_TAG_LEN +
+ uint32_t ether_hdr_len = RTE_ETHER_HDR_LEN + VLAN_TAG_LEN +
hw->vtnet_hdr_size;
uint64_t rx_offloads = rxmode->offloads;
uint64_t tx_offloads = txmode->offloads;
bool has_tx_offload;
bool has_rx_offload;
uint16_t port_id;
- uint8_t mac_addr[ETHER_ADDR_LEN];
+ uint8_t mac_addr[RTE_ETHER_ADDR_LEN];
uint32_t notify_off_multiplier;
uint8_t *isr;
uint16_t *notify_base;
*/
struct virtio_net_config {
/* The config defining mac address (if VIRTIO_NET_F_MAC) */
- uint8_t mac[ETHER_ADDR_LEN];
+ uint8_t mac[RTE_ETHER_ADDR_LEN];
/* See VIRTIO_NET_F_STATUS and VIRTIO_NET_S_* above */
uint16_t status;
uint16_t max_virtqueue_pairs;
PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
- if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
+ if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
PMD_RX_LOG(ERR, "Packet drop");
nb_enqueued++;
virtio_discard_rxbuf(vq, rxm);
PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
- if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
+ if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
PMD_RX_LOG(ERR, "Packet drop");
nb_enqueued++;
virtio_discard_rxbuf(vq, rxm);
rxm = rcv_pkts[i];
- if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
+ if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
PMD_RX_LOG(ERR, "Packet drop");
nb_enqueued++;
virtio_discard_rxbuf_inorder(vq, rxm);
rxm = rcv_pkts[i];
- if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
+ if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
PMD_RX_LOG(ERR, "Packet drop");
nb_enqueued++;
virtio_discard_rxbuf(vq, rxm);
rxm = rcv_pkts[i];
- if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
+ if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
PMD_RX_LOG(ERR, "Packet drop");
nb_enqueued++;
virtio_discard_rxbuf(vq, rxm);
memset(&ifr, 0, sizeof(ifr));
ifr.ifr_hwaddr.sa_family = ARPHRD_ETHER;
- memcpy(ifr.ifr_hwaddr.sa_data, mac, ETHER_ADDR_LEN);
+ memcpy(ifr.ifr_hwaddr.sa_data, mac, RTE_ETHER_ADDR_LEN);
if (ioctl(tapfd, SIOCSIFHWADDR, (void *)&ifr) == -1) {
PMD_DRV_LOG(ERR, "SIOCSIFHWADDR failed: %s", strerror(errno));
goto error;
parse_mac(struct virtio_user_dev *dev, const char *mac)
{
int i, r;
- uint32_t tmp[ETHER_ADDR_LEN];
+ uint32_t tmp[RTE_ETHER_ADDR_LEN];
if (!mac)
return;
r = sscanf(mac, "%x:%x:%x:%x:%x:%x", &tmp[0],
&tmp[1], &tmp[2], &tmp[3], &tmp[4], &tmp[5]);
- if (r == ETHER_ADDR_LEN) {
- for (i = 0; i < ETHER_ADDR_LEN; ++i)
+ if (r == RTE_ETHER_ADDR_LEN) {
+ for (i = 0; i < RTE_ETHER_ADDR_LEN; ++i)
dev->mac_addr[i] = (uint8_t)tmp[i];
dev->mac_specified = 1;
} else {
uint64_t unsupported_features; /* unsupported features mask */
uint8_t status;
uint16_t port_id;
- uint8_t mac_addr[ETHER_ADDR_LEN];
+ uint8_t mac_addr[RTE_ETHER_ADDR_LEN];
char path[PATH_MAX];
union {
struct vring vrings[VIRTIO_MAX_VIRTQUEUES];
struct virtio_user_dev *dev = virtio_user_get_dev(hw);
if (offset == offsetof(struct virtio_net_config, mac) &&
- length == ETHER_ADDR_LEN) {
- for (i = 0; i < ETHER_ADDR_LEN; ++i)
+ length == RTE_ETHER_ADDR_LEN) {
+ for (i = 0; i < RTE_ETHER_ADDR_LEN; ++i)
((uint8_t *)dst)[i] = dev->mac_addr[i];
return;
}
struct virtio_user_dev *dev = virtio_user_get_dev(hw);
if ((offset == offsetof(struct virtio_net_config, mac)) &&
- (length == ETHER_ADDR_LEN))
- for (i = 0; i < ETHER_ADDR_LEN; ++i)
+ (length == RTE_ETHER_ADDR_LEN))
+ for (i = 0; i < RTE_ETHER_ADDR_LEN; ++i)
dev->mac_addr[i] = ((const uint8_t *)src)[i];
else
PMD_DRV_LOG(ERR, "not supported offset=%zu, len=%d",
*/
struct virtio_net_ctrl_mac {
uint32_t entries;
- uint8_t macs[][ETHER_ADDR_LEN];
+ uint8_t macs[][RTE_ETHER_ADDR_LEN];
} __attribute__((__packed__));
#define VIRTIO_NET_CTRL_MAC 1
memcpy(hw->perm_addr + 4, &mac_hi, 2);
/* Allocate memory for storing MAC addresses */
- eth_dev->data->mac_addrs = rte_zmalloc("vmxnet3", ETHER_ADDR_LEN *
+ eth_dev->data->mac_addrs = rte_zmalloc("vmxnet3", RTE_ETHER_ADDR_LEN *
VMXNET3_MAX_MAC_ADDRS, 0);
if (eth_dev->data->mac_addrs == NULL) {
PMD_INIT_LOG(ERR,
"Failed to allocate %d bytes needed to store MAC addresses",
- ETHER_ADDR_LEN * VMXNET3_MAX_MAC_ADDRS);
+ RTE_ETHER_ADDR_LEN * VMXNET3_MAX_MAC_ADDRS);
return -ENOMEM;
}
/* Copy the permanent MAC address */
uint16_t subsystem_vendor_id;
bool adapter_stopped;
- uint8_t perm_addr[ETHER_ADDR_LEN];
+ uint8_t perm_addr[RTE_ETHER_ADDR_LEN];
uint8_t num_tx_queues;
uint8_t num_rx_queues;
uint8_t bufs_per_pkt;
static const struct rte_eth_conf port_conf = {
.rxmode = {
.mq_mode = ETH_MQ_RX_NONE,
- .max_rx_pkt_len = ETHER_MAX_LEN,
+ .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
.split_hdr_size = 0,
},
.txmode = {
static struct rte_eth_conf port_conf = {
.rxmode = {
.mq_mode = ETH_MQ_RX_NONE,
- .max_rx_pkt_len = ETHER_MAX_LEN,
+ .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
.split_hdr_size = 0,
},
.rx_adv_conf = {
{
size_t vlan_offset = 0;
- if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) {
+ if (rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) == *proto) {
struct rte_vlan_hdr *vlan_hdr =
(struct rte_vlan_hdr *)(eth_hdr + 1);
vlan_offset = sizeof(struct rte_vlan_hdr);
*proto = vlan_hdr->eth_proto;
- if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) {
+ if (rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) == *proto) {
vlan_hdr = vlan_hdr + 1;
*proto = vlan_hdr->eth_proto;
eth_hdr = rte_pktmbuf_mtod(pkts[i],
struct rte_ether_hdr *);
ether_type = eth_hdr->ether_type;
- if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_VLAN))
+ if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN))
printf("VLAN taged frame, offset:");
offset = get_vlan_offset(eth_hdr, ðer_type);
if (offset > 0)
printf("%d\n", offset);
- if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
+ if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) {
if (rte_spinlock_trylock(&global_flag_stru_p->lock) == 1) {
global_flag_stru_p->port_packets[1]++;
rte_spinlock_unlock(&global_flag_stru_p->lock);
rte_eth_tx_burst(BOND_PORT, 0, NULL, 0);
}
}
- } else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
+ } else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4)) {
if (rte_spinlock_trylock(&global_flag_stru_p->lock) == 1) {
global_flag_stru_p->port_packets[2]++;
rte_spinlock_unlock(&global_flag_stru_p->lock);
eth_hdr = rte_pktmbuf_mtod(created_pkt, struct rte_ether_hdr *);
rte_eth_macaddr_get(BOND_PORT, ð_hdr->s_addr);
- memset(ð_hdr->d_addr, 0xFF, ETHER_ADDR_LEN);
- eth_hdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_ARP);
+ memset(ð_hdr->d_addr, 0xFF, RTE_ETHER_ADDR_LEN);
+ eth_hdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP);
arp_hdr = (struct rte_arp_hdr *)(
(char *)eth_hdr + sizeof(struct rte_ether_hdr));
arp_hdr->arp_hardware = rte_cpu_to_be_16(RTE_ARP_HRD_ETHER);
- arp_hdr->arp_protocol = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
- arp_hdr->arp_hlen = ETHER_ADDR_LEN;
+ arp_hdr->arp_protocol = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);
+ arp_hdr->arp_hlen = RTE_ETHER_ADDR_LEN;
arp_hdr->arp_plen = sizeof(uint32_t);
arp_hdr->arp_opcode = rte_cpu_to_be_16(RTE_ARP_OP_REQUEST);
rte_eth_macaddr_get(BOND_PORT, &arp_hdr->arp_data.arp_sha);
arp_hdr->arp_data.arp_sip = bond_ip;
- memset(&arp_hdr->arp_data.arp_tha, 0, ETHER_ADDR_LEN);
+ memset(&arp_hdr->arp_data.arp_tha, 0, RTE_ETHER_ADDR_LEN);
arp_hdr->arp_data.arp_tip =
((unsigned char *)&res->ip.addr.ipv4)[0] |
(((unsigned char *)&res->ip.addr.ipv4)[1] << 8) |
static const struct rte_eth_conf port_conf_default = {
.rxmode = {
.mq_mode = ETH_MQ_RX_RSS,
- .max_rx_pkt_len = ETHER_MAX_LEN,
+ .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
new_mtu = atoi(params->opt);
new_mtu = strtoul(params->opt, &ptr_parse_end, 10);
if (*ptr_parse_end != '\0' ||
- new_mtu < ETHER_MIN_MTU ||
- new_mtu > ETHER_MAX_JUMBO_FRAME_LEN) {
+ new_mtu < RTE_ETHER_MIN_MTU ||
+ new_mtu > RTE_ETHER_MAX_JUMBO_FRAME_LEN) {
printf("Port %i: Invalid MTU value\n", params->port);
return;
}
static const struct rte_eth_conf port_conf_default = {
.rxmode = {
.mq_mode = ETH_MQ_RX_RSS,
- .max_rx_pkt_len = ETHER_MAX_LEN,
+ .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
},
.rx_adv_conf = {
.rss_conf = {
static const struct rte_eth_conf port_conf_default = {
.rxmode = {
- .max_rx_pkt_len = ETHER_MAX_LEN,
+ .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
},
};
static inline void
print_ether_addr(const char *what, struct rte_ether_addr *eth_addr)
{
- char buf[ETHER_ADDR_FMT_SIZE];
- rte_ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);
+ char buf[RTE_ETHER_ADDR_FMT_SIZE];
+ rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
printf("%s%s", what, buf);
}
* Default byte size for the IPv6 Maximum Transfer Unit (MTU).
* This value includes the size of IPv6 header.
*/
-#define IPV4_MTU_DEFAULT ETHER_MTU
-#define IPV6_MTU_DEFAULT ETHER_MTU
+#define IPV4_MTU_DEFAULT RTE_ETHER_MTU
+#define IPV6_MTU_DEFAULT RTE_ETHER_MTU
/*
* The overhead from max frame size to MTU.
* We have to consider the max possible overhead.
*/
#define MTU_OVERHEAD \
- (ETHER_HDR_LEN + ETHER_CRC_LEN + 2 * sizeof(struct rte_vlan_hdr))
+ (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + \
+ 2 * sizeof(struct rte_vlan_hdr))
/*
* Default payload in bytes for the IPv6 packet.
rte_ether_addr_copy(&ports_eth_addr[port_out],
ð_hdr->s_addr);
if (ipv6)
- eth_hdr->ether_type = rte_be_to_cpu_16(ETHER_TYPE_IPv6);
+ eth_hdr->ether_type =
+ rte_be_to_cpu_16(RTE_ETHER_TYPE_IPv6);
else
- eth_hdr->ether_type = rte_be_to_cpu_16(ETHER_TYPE_IPv4);
+ eth_hdr->ether_type =
+ rte_be_to_cpu_16(RTE_ETHER_TYPE_IPv4);
}
len += len2;
static void
print_ethaddr(const char *name, struct rte_ether_addr *eth_addr)
{
- char buf[ETHER_ADDR_FMT_SIZE];
- rte_ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);
+ char buf[RTE_ETHER_ADDR_FMT_SIZE];
+ rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
printf("%s%s", name, buf);
}
eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
ether_type = eth_hdr->ether_type;
- if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4))
+ if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4))
packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
- else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv6))
+ else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6))
packet_type |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
m->packet_type = packet_type;
if (!rte_eth_dev_is_valid_port(port_id))
return -EINVAL;
- if (new_mtu > ETHER_MAX_LEN)
+ if (new_mtu > RTE_ETHER_MAX_LEN)
return -EINVAL;
/* Set new MTU */
{
int i;
char *end;
- unsigned long o[ETHER_ADDR_LEN];
+ unsigned long o[RTE_ETHER_ADDR_LEN];
static struct rte_ether_addr ether_addr;
i = 0;
return NULL;
/* Support the format XX:XX:XX:XX:XX:XX */
- if (i == ETHER_ADDR_LEN) {
+ if (i == RTE_ETHER_ADDR_LEN) {
while (i-- != 0) {
if (o[i] > UINT8_MAX)
return NULL;
ether_addr.addr_bytes[i] = (uint8_t)o[i];
}
/* Support the format XXXX:XXXX:XXXX */
- } else if (i == ETHER_ADDR_LEN / 2) {
+ } else if (i == RTE_ETHER_ADDR_LEN / 2) {
while (i-- != 0) {
if (o[i] > UINT16_MAX)
return NULL;
dst_port = next_hop;
}
- eth_hdr->ether_type = rte_be_to_cpu_16(ETHER_TYPE_IPv4);
+ eth_hdr->ether_type = rte_be_to_cpu_16(RTE_ETHER_TYPE_IPv4);
} else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) {
/* if packet is IPv6 */
struct ipv6_extension_fragment *frag_hdr;
dst_port = next_hop;
}
- eth_hdr->ether_type = rte_be_to_cpu_16(ETHER_TYPE_IPv6);
+ eth_hdr->ether_type = rte_be_to_cpu_16(RTE_ETHER_TYPE_IPv6);
}
/* if packet wasn't IPv4 or IPv6, it's forwarded to the port it came from */
static void
print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
{
- char buf[ETHER_ADDR_FMT_SIZE];
- rte_ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);
+ char buf[RTE_ETHER_ADDR_FMT_SIZE];
+ rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
printf("%s%s", name, buf);
}
static struct rte_eth_conf port_conf = {
.rxmode = {
.mq_mode = ETH_MQ_RX_RSS,
- .max_rx_pkt_len = ETHER_MAX_LEN,
+ .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
.split_hdr_size = 0,
.offloads = DEV_RX_OFFLOAD_CHECKSUM,
},
struct rte_ether_hdr *eth;
eth = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
- if (eth->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
- nlp = (uint8_t *)rte_pktmbuf_adj(pkt, ETHER_HDR_LEN);
+ if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4)) {
+ nlp = (uint8_t *)rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN);
nlp = RTE_PTR_ADD(nlp, offsetof(struct ip, ip_p));
if (*nlp == IPPROTO_ESP)
t->ipsec.pkts[(t->ipsec.num)++] = pkt;
}
pkt->l2_len = 0;
pkt->l3_len = sizeof(struct ip);
- } else if (eth->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv6)) {
- nlp = (uint8_t *)rte_pktmbuf_adj(pkt, ETHER_HDR_LEN);
+ } else if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6)) {
+ nlp = (uint8_t *)rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN);
nlp = RTE_PTR_ADD(nlp, offsetof(struct ip6_hdr, ip6_nxt));
if (*nlp == IPPROTO_ESP)
t->ipsec.pkts[(t->ipsec.num)++] = pkt;
ip = rte_pktmbuf_mtod(pkt, struct ip *);
ethhdr = (struct rte_ether_hdr *)
- rte_pktmbuf_prepend(pkt, ETHER_HDR_LEN);
+ rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN);
if (ip->ip_v == IPVERSION) {
pkt->ol_flags |= qconf->outbound.ipv4_offloads;
pkt->l3_len = sizeof(struct ip);
- pkt->l2_len = ETHER_HDR_LEN;
+ pkt->l2_len = RTE_ETHER_HDR_LEN;
ip->ip_sum = 0;
if ((pkt->ol_flags & PKT_TX_IP_CKSUM) == 0)
ip->ip_sum = rte_ipv4_cksum((struct ipv4_hdr *)ip);
- ethhdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+ ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);
} else {
pkt->ol_flags |= qconf->outbound.ipv6_offloads;
pkt->l3_len = sizeof(struct ip6_hdr);
- pkt->l2_len = ETHER_HDR_LEN;
+ pkt->l2_len = RTE_ETHER_HDR_LEN;
- ethhdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+ ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6);
}
memcpy(ðhdr->s_addr, ðaddr_tbl[port].src,
static void
print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
{
- char buf[ETHER_ADDR_FMT_SIZE];
- rte_ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);
+ char buf[RTE_ETHER_ADDR_FMT_SIZE];
+ rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
printf("%s%s", name, buf);
}
rte_ether_addr_copy(dest_addr, ðdr->d_addr);
rte_ether_addr_copy(&ports_eth_addr[port], ðdr->s_addr);
- ethdr->ether_type = rte_be_to_cpu_16(ETHER_TYPE_IPv4);
+ ethdr->ether_type = rte_be_to_cpu_16(RTE_ETHER_TYPE_IPv4);
/* Put new packet into the output queue */
len = qconf->tx_mbufs[port].len;
static void
print_ethaddr(const char *name, struct rte_ether_addr *eth_addr)
{
- char buf[ETHER_ADDR_FMT_SIZE];
- rte_ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);
+ char buf[RTE_ETHER_ADDR_FMT_SIZE];
+ rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
printf("%s%s", name, buf);
}
memcpy(&conf, &port_conf, sizeof(conf));
/* Set new MTU */
- if (new_mtu > ETHER_MAX_LEN)
+ if (new_mtu > RTE_ETHER_MAX_LEN)
conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
else
conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
static void
print_ethaddr(const char *name, struct rte_ether_addr *mac_addr)
{
- char buf[ETHER_ADDR_FMT_SIZE];
- rte_ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, mac_addr);
+ char buf[RTE_ETHER_ADDR_FMT_SIZE];
+ rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, mac_addr);
RTE_LOG(INFO, APP, "\t%s%s\n", name, buf);
}
#define BURST_SIZE 32
static const struct rte_eth_conf port_conf_default = {
- .rxmode = { .max_rx_pkt_len = ETHER_MAX_LEN }
+ .rxmode = { .max_rx_pkt_len = RTE_ETHER_MAX_LEN }
};
/* l2fwd-cat.c: CAT enabled, basic DPDK skeleton forwarding example. */
static struct rte_eth_conf port_conf = {
.rxmode = {
.mq_mode = ETH_MQ_RX_NONE,
- .max_rx_pkt_len = ETHER_MAX_LEN,
+ .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
.split_hdr_size = 0,
},
.txmode = {
eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
- if (eth_hdr->ether_type != rte_cpu_to_be_16(ETHER_TYPE_IPv4))
+ if (eth_hdr->ether_type != rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4))
return -1;
ipdata_offset = sizeof(struct rte_ether_hdr);
static struct rte_eth_conf port_conf = {
.rxmode = {
.mq_mode = ETH_MQ_RX_RSS,
- .max_rx_pkt_len = ETHER_MAX_LEN,
+ .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
.split_hdr_size = 0,
.offloads = DEV_RX_OFFLOAD_CHECKSUM,
},
/*
* if no max-pkt-len set, then use the
- * default value ETHER_MAX_LEN
+ * default value RTE_ETHER_MAX_LEN
*/
if (0 == getopt_long(argc, argvopt, "",
&lenopts, &option_index)) {
static void
print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
{
- char buf[ETHER_ADDR_FMT_SIZE];
- rte_ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);
+ char buf[RTE_ETHER_ADDR_FMT_SIZE];
+ rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
printf("%s%s", name, buf);
}
static struct rte_eth_conf port_conf = {
.rxmode = {
.mq_mode = ETH_MQ_RX_RSS,
- .max_rx_pkt_len = ETHER_MAX_LEN,
+ .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
.split_hdr_size = 0,
.offloads = DEV_RX_OFFLOAD_CHECKSUM,
},
eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
ether_type = eth_hdr->ether_type;
- if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4))
+ if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4))
packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
- else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv6))
+ else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6))
packet_type |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
m->packet_type = packet_type;
/**
* if no max-pkt-len set, use the default value
- * ETHER_MAX_LEN
+ * RTE_ETHER_MAX_LEN
*/
if (0 == getopt_long(argc, argvopt, "",
&lenopts, &option_index)) {
static void
print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
{
- char buf[ETHER_ADDR_FMT_SIZE];
- rte_ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);
+ char buf[RTE_ETHER_ADDR_FMT_SIZE];
+ rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
printf("%s%s", name, buf);
}
static struct rte_eth_conf port_conf = {
.rxmode = {
.mq_mode = ETH_MQ_RX_RSS,
- .max_rx_pkt_len = ETHER_MAX_LEN,
+ .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
.split_hdr_size = 0,
.offloads = DEV_RX_OFFLOAD_CHECKSUM,
},
static void
print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
{
- char buf[ETHER_ADDR_FMT_SIZE];
- rte_ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);
+ char buf[RTE_ETHER_ADDR_FMT_SIZE];
+ rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
printf("%s%s", name, buf);
}
eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
ether_type = eth_hdr->ether_type;
l3 = (uint8_t *)eth_hdr + sizeof(struct rte_ether_hdr);
- if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
+ if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4)) {
ipv4_hdr = (struct ipv4_hdr *)l3;
hdr_len = (ipv4_hdr->version_ihl & IPV4_HDR_IHL_MASK) *
IPV4_IHL_MULTIPLIER;
packet_type |= RTE_PTYPE_L4_UDP;
} else
packet_type |= RTE_PTYPE_L3_IPV4_EXT;
- } else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv6)) {
+ } else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6)) {
ipv6_hdr = (struct ipv6_hdr *)l3;
if (ipv6_hdr->proto == IPPROTO_TCP)
packet_type |= RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP;
eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
ether_type = eth_hdr->ether_type;
- if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4))
+ if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4))
packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
- else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv6))
+ else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6))
packet_type |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
m->packet_type = packet_type;
static struct rte_eth_conf port_conf = {
.rxmode = {
.mq_mode = ETH_MQ_RX_RSS,
- .max_rx_pkt_len = ETHER_MAX_LEN,
+ .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
.split_hdr_size = 0,
.offloads = DEV_RX_OFFLOAD_CHECKSUM,
},
/*
* if no max-pkt-len set, use the default
- * value ETHER_MAX_LEN.
+ * value RTE_ETHER_MAX_LEN.
*/
if (getopt_long(argc, argvopt, "",
&lenopts, &option_index) == 0) {
static void
print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
{
- char buf[ETHER_ADDR_FMT_SIZE];
- rte_ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);
+ char buf[RTE_ETHER_ADDR_FMT_SIZE];
+ rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
printf("%s%s", name, buf);
}
/* pre-init dst MACs for all ports to 02:00:00:00:00:xx */
for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
dest_eth_addr[portid] =
- ETHER_LOCAL_ADMIN_ADDR + ((uint64_t)portid << 40);
+ RTE_ETHER_LOCAL_ADMIN_ADDR + ((uint64_t)portid << 40);
*(uint64_t *)(val_eth + portid) = dest_eth_addr[portid];
}
eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
ether_type = eth_hdr->ether_type;
- if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4))
+ if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4))
packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
- else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv6))
+ else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6))
packet_type |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
m->packet_type = packet_type;
static struct rte_eth_conf port_conf = {
.rxmode = {
.mq_mode = ETH_MQ_RX_RSS,
- .max_rx_pkt_len = ETHER_MAX_LEN,
+ .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
.split_hdr_size = 0,
.offloads = DEV_RX_OFFLOAD_CHECKSUM,
},
port_conf.txmode.offloads |=
DEV_TX_OFFLOAD_MULTI_SEGS;
- /* if no max-pkt-len set, use the default value ETHER_MAX_LEN */
+ /* if no max-pkt-len set, use the default value
+ * RTE_ETHER_MAX_LEN
+ */
if (0 == getopt_long(argc, argvopt, "", &lenopts,
&option_index)) {
static void
print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
{
- char buf[ETHER_ADDR_FMT_SIZE];
+ char buf[RTE_ETHER_ADDR_FMT_SIZE];
- rte_ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);
+ rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
printf("%s%s", name, buf);
}
/* pre-init dst MACs for all ports to 02:00:00:00:00:xx */
for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
- dest_eth_addr[portid] = ETHER_LOCAL_ADMIN_ADDR +
+ dest_eth_addr[portid] = RTE_ETHER_LOCAL_ADMIN_ADDR +
((uint64_t)portid << 40);
*(uint64_t *)(val_eth + portid) = dest_eth_addr[portid];
}
static const struct rte_eth_conf port_conf_default = {
.rxmode = {
- .max_rx_pkt_len = ETHER_MAX_LEN,
+ .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
},
};
static struct rte_eth_conf port_conf = {
.rxmode = {
.mq_mode = ETH_MQ_RX_RSS,
- .max_rx_pkt_len = ETHER_MAX_LEN,
+ .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
.split_hdr_size = 0,
.offloads = DEV_RX_OFFLOAD_CHECKSUM,
},
static struct rte_eth_conf port_conf = {
.rxmode = {
- .max_rx_pkt_len = ETHER_MAX_LEN,
+ .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
.split_hdr_size = 0,
},
.txmode = {
static const struct rte_eth_conf port_conf_default = {
.rxmode = {
- .max_rx_pkt_len = ETHER_MAX_LEN,
+ .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
},
};
static const struct rte_eth_conf port_conf_default = {
.rxmode = {
- .max_rx_pkt_len = ETHER_MAX_LEN,
+ .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
},
};
static uint16_t
get_psd_sum(void *l3_hdr, uint16_t ethertype, uint64_t ol_flags)
{
- if (ethertype == ETHER_TYPE_IPv4)
+ if (ethertype == RTE_ETHER_TYPE_IPv4)
return rte_ipv4_phdr_cksum(l3_hdr, ol_flags);
- else /* assume ethertype == ETHER_TYPE_IPv6 */
+ else /* assume ethertype == RTE_ETHER_TYPE_IPv6 */
return rte_ipv6_phdr_cksum(l3_hdr, ol_flags);
}
info->outer_l2_len = sizeof(struct rte_ether_hdr);
ethertype = rte_be_to_cpu_16(eth_hdr->ether_type);
- if (ethertype == ETHER_TYPE_VLAN) {
+ if (ethertype == RTE_ETHER_TYPE_VLAN) {
struct rte_vlan_hdr *vlan_hdr =
(struct rte_vlan_hdr *)(eth_hdr + 1);
info->outer_l2_len += sizeof(struct rte_vlan_hdr);
}
switch (ethertype) {
- case ETHER_TYPE_IPv4:
+ case RTE_ETHER_TYPE_IPv4:
ipv4_hdr = (struct ipv4_hdr *)
((char *)eth_hdr + info->outer_l2_len);
info->outer_l3_len = sizeof(struct ipv4_hdr);
*l4_proto = ipv4_hdr->next_proto_id;
break;
- case ETHER_TYPE_IPv6:
+ case RTE_ETHER_TYPE_IPv6:
ipv6_hdr = (struct ipv6_hdr *)
((char *)eth_hdr + info->outer_l2_len);
info->outer_l3_len = sizeof(struct ipv6_hdr);
info->l2_len = sizeof(struct rte_ether_hdr);
ethertype = rte_be_to_cpu_16(eth_hdr->ether_type);
- if (ethertype == ETHER_TYPE_VLAN) {
+ if (ethertype == RTE_ETHER_TYPE_VLAN) {
struct rte_vlan_hdr *vlan_hdr =
(struct rte_vlan_hdr *)(eth_hdr + 1);
info->l2_len += sizeof(struct rte_vlan_hdr);
l3_hdr = (char *)eth_hdr + info->l2_len;
- if (ethertype == ETHER_TYPE_IPv4) {
+ if (ethertype == RTE_ETHER_TYPE_IPv4) {
ipv4_hdr = (struct ipv4_hdr *)l3_hdr;
ipv4_hdr->hdr_checksum = 0;
ol_flags |= PKT_TX_IPV4;
ol_flags |= PKT_TX_IP_CKSUM;
info->l3_len = sizeof(struct ipv4_hdr);
l4_proto = ipv4_hdr->next_proto_id;
- } else if (ethertype == ETHER_TYPE_IPv6) {
+ } else if (ethertype == RTE_ETHER_TYPE_IPv6) {
ipv6_hdr = (struct ipv6_hdr *)l3_hdr;
info->l3_len = sizeof(struct ipv6_hdr);
l4_proto = ipv6_hdr->proto;
m->l2_len = tx_offload.l2_len;
m->l3_len = tx_offload.l3_len;
m->l4_len = tx_offload.l4_len;
- m->l2_len += ETHER_VXLAN_HLEN;
+ m->l2_len += RTE_ETHER_VXLAN_HLEN;
}
m->outer_l2_len = sizeof(struct rte_ether_hdr);
+ sizeof(struct rte_vxlan_hdr));
udp->dst_port = rte_cpu_to_be_16(vxdev.dst_port);
- hash = rte_hash_crc(phdr, 2 * ETHER_ADDR_LEN, phdr->ether_type);
+ hash = rte_hash_crc(phdr, 2 * RTE_ETHER_ADDR_LEN, phdr->ether_type);
udp->src_port = rte_cpu_to_be_16((((uint64_t) hash * PORT_RANGE) >> 32)
+ PORT_MIN);
return -1;
}
- for (i = 0; i < ETHER_ADDR_LEN; i++) {
+ for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) {
vdev->mac_address.addr_bytes[i] =
vxdev.port[portid].vport_mac.addr_bytes[i] =
pkt_hdr->s_addr.addr_bytes[i];
&app_l2_hdr[portid].d_addr);
rte_ether_addr_copy(&ports_eth_addr[0],
&app_l2_hdr[portid].s_addr);
- app_l2_hdr[portid].ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+ app_l2_hdr[portid].ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);
ip = &app_ip_hdr[portid];
ip->version_ihl = IP_VHL_DEF;
vdev->rx_q);
return;
}
- for (i = 0; i < ETHER_ADDR_LEN; i++)
+ for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
vdev->mac_address.addr_bytes[i] = 0;
/* Clear out the receive buffers */
return -1;
}
- for (i = 0; i < ETHER_ADDR_LEN; i++)
+ for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
vdev->mac_address.addr_bytes[i] = pkt_hdr->s_addr.addr_bytes[i];
/* vlan_tag currently uses the device_id. */
{
if (ol_flags & PKT_TX_IPV4)
return rte_ipv4_phdr_cksum(l3_hdr, ol_flags);
- else /* assume ethertype == ETHER_TYPE_IPv6 */
+ else /* assume ethertype == RTE_ETHER_TYPE_IPv6 */
return rte_ipv6_phdr_cksum(l3_hdr, ol_flags);
}
tx_q = &lcore_tx_queue[lcore_id];
nh = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
- if (unlikely(nh->ether_type == rte_cpu_to_be_16(ETHER_TYPE_VLAN))) {
+ if (unlikely(nh->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN))) {
/* Guest has inserted the vlan tag. */
struct rte_vlan_hdr *vh = (struct rte_vlan_hdr *) (nh + 1);
uint16_t vlan_tag_be = rte_cpu_to_be_16(vlan_tag);
{
int i;
char *end;
- unsigned long o[ETHER_ADDR_LEN];
+ unsigned long o[RTE_ETHER_ADDR_LEN];
i = 0;
do {
return -1;
/* Support the format XX:XX:XX:XX:XX:XX */
- if (i == ETHER_ADDR_LEN) {
+ if (i == RTE_ETHER_ADDR_LEN) {
while (i-- != 0) {
if (o[i] > UINT8_MAX)
return -1;
ether_addr->addr_bytes[i] = (uint8_t)o[i];
}
/* Support the format XXXX:XXXX:XXXX */
- } else if (i == ETHER_ADDR_LEN / 2) {
+ } else if (i == RTE_ETHER_ADDR_LEN / 2) {
while (i-- != 0) {
if (o[i] > UINT16_MAX)
return -1;
/****************/
static const struct rte_eth_conf port_conf_default = {
.rxmode = {
- .max_rx_pkt_len = ETHER_MAX_LEN,
+ .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
},
};
eth_dev = eth_dev_get(port_id);
strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name));
eth_dev->data->port_id = port_id;
- eth_dev->data->mtu = ETHER_MTU;
+ eth_dev->data->mtu = RTE_ETHER_MTU;
unlock:
rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
dev_info.max_rx_pktlen);
ret = -EINVAL;
goto rollback;
- } else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
+ } else if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN) {
RTE_ETHDEV_LOG(ERR,
"Ethdev port_id=%u max_rx_pkt_len %u < min valid value %u\n",
port_id, dev_conf->rxmode.max_rx_pkt_len,
- (unsigned)ETHER_MIN_LEN);
+ (unsigned int)RTE_ETHER_MIN_LEN);
ret = -EINVAL;
goto rollback;
}
} else {
- if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
- dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
+ if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN ||
+ dev_conf->rxmode.max_rx_pkt_len > RTE_ETHER_MAX_LEN)
/* Use default value */
dev->data->dev_conf.rxmode.max_rx_pkt_len =
- ETHER_MAX_LEN;
+ RTE_ETHER_MAX_LEN;
}
/* Any requested offloading must be within its device capabilities */
dev_info->rx_desc_lim = lim;
dev_info->tx_desc_lim = lim;
dev_info->device = dev->device;
- dev_info->min_mtu = ETHER_MIN_MTU;
+ dev_info->min_mtu = RTE_ETHER_MIN_MTU;
dev_info->max_mtu = UINT16_MAX;
RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
rte_eth_dev_info_get(port_id, &dev_info);
for (i = 0; i < dev_info.max_mac_addrs; i++)
- if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
+ if (memcmp(addr, &dev->data->mac_addrs[i],
+ RTE_ETHER_ADDR_LEN) == 0)
return i;
return -1;
for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
if (memcmp(addr, &dev->data->hash_mac_addrs[i],
- ETHER_ADDR_LEN) == 0)
+ RTE_ETHER_ADDR_LEN) == 0)
return i;
return -1;
* };
*
* device = dev->device
- * min_mtu = ETHER_MIN_MTU
+ * min_mtu = RTE_ETHER_MIN_MTU
* max_mtu = UINT16_MAX
*
* The following fields will be populated if support for dev_infos_get()
* Matches an 802.1Q/ad VLAN tag.
*
* The corresponding standard outer EtherType (TPID) values are
- * ETHER_TYPE_VLAN or ETHER_TYPE_QINQ. It can be overridden by the preceding
- * pattern item.
+ * RTE_ETHER_TYPE_VLAN or RTE_ETHER_TYPE_QINQ. It can be overridden by
+ * the preceding pattern item.
*/
struct rte_flow_item_vlan {
rte_be16_t tci; /**< Tag control information. */
* Matches a E-tag header.
*
* The corresponding standard outer EtherType (TPID) value is
- * ETHER_TYPE_ETAG. It can be overridden by the preceding pattern item.
+ * RTE_ETHER_TYPE_ETAG. It can be overridden by the preceding pattern item.
*/
struct rte_flow_item_e_tag {
/**
* Set MAC address from the matched flow
*/
struct rte_flow_action_set_mac {
- uint8_t mac_addr[ETHER_ADDR_LEN];
+ uint8_t mac_addr[RTE_ETHER_ADDR_LEN];
};
/*
*ipv6_hdr = NULL;
switch (eth_hdr->ether_type) {
- case RTE_BE16(ETHER_TYPE_IPv4):
+ case RTE_BE16(RTE_ETHER_TYPE_IPv4):
*ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
break;
- case RTE_BE16(ETHER_TYPE_IPv6):
+ case RTE_BE16(RTE_ETHER_TYPE_IPv6):
*ipv6_hdr = (struct ipv6_hdr *)(eth_hdr + 1);
break;
- case RTE_BE16(ETHER_TYPE_VLAN):
+ case RTE_BE16(RTE_ETHER_TYPE_VLAN):
vlan_hdr = (struct rte_vlan_hdr *)(eth_hdr + 1);
switch (vlan_hdr->eth_proto) {
- case RTE_BE16(ETHER_TYPE_IPv4):
+ case RTE_BE16(RTE_ETHER_TYPE_IPv4):
*ipv4_hdr = (struct ipv4_hdr *)(vlan_hdr + 1);
break;
- case RTE_BE16(ETHER_TYPE_IPv6):
+ case RTE_BE16(RTE_ETHER_TYPE_IPv6):
*ipv6_hdr = (struct ipv6_hdr *)(vlan_hdr + 1);
break;
default:
dev_info.mbuf_size = conf->mbuf_size;
dev_info.mtu = conf->mtu;
- memcpy(dev_info.mac_addr, conf->mac_addr, ETHER_ADDR_LEN);
+ memcpy(dev_info.mac_addr, conf->mac_addr, RTE_ETHER_ADDR_LEN);
strlcpy(dev_info.name, conf->name, RTE_KNI_NAMESIZE);
__extension__
uint8_t force_bind : 1; /* Flag to bind kernel thread */
- uint8_t mac_addr[ETHER_ADDR_LEN]; /* MAC address assigned to KNI */
+ uint8_t mac_addr[RTE_ETHER_ADDR_LEN]; /* MAC address assigned to KNI */
uint16_t mtu;
};
}
/* Ethernet header. */
- memset(eth_hdr->d_addr.addr_bytes, 0xff, ETHER_ADDR_LEN);
+ memset(eth_hdr->d_addr.addr_bytes, 0xff, RTE_ETHER_ADDR_LEN);
rte_ether_addr_copy(mac, ð_hdr->s_addr);
- eth_hdr->ether_type = htons(ETHER_TYPE_RARP);
+ eth_hdr->ether_type = htons(RTE_ETHER_TYPE_RARP);
/* RARP header. */
rarp = (struct rte_arp_hdr *)(eth_hdr + 1);
rarp->arp_hardware = htons(RTE_ARP_HRD_ETHER);
- rarp->arp_protocol = htons(ETHER_TYPE_IPv4);
- rarp->arp_hlen = ETHER_ADDR_LEN;
+ rarp->arp_protocol = htons(RTE_ETHER_TYPE_IPv4);
+ rarp->arp_hlen = RTE_ETHER_ADDR_LEN;
rarp->arp_plen = 4;
rarp->arp_opcode = htons(RTE_ARP_OP_REVREQUEST);
#include <rte_mbuf.h>
#include <rte_byteorder.h>
-#define ETHER_ADDR_LEN 6 /**< Length of Ethernet address. */
-#define ETHER_TYPE_LEN 2 /**< Length of Ethernet type field. */
-#define ETHER_CRC_LEN 4 /**< Length of Ethernet CRC. */
-#define ETHER_HDR_LEN \
- (ETHER_ADDR_LEN * 2 + ETHER_TYPE_LEN) /**< Length of Ethernet header. */
-#define ETHER_MIN_LEN 64 /**< Minimum frame len, including CRC. */
-#define ETHER_MAX_LEN 1518 /**< Maximum frame len, including CRC. */
-#define ETHER_MTU \
- (ETHER_MAX_LEN - ETHER_HDR_LEN - ETHER_CRC_LEN) /**< Ethernet MTU. */
-
-#define ETHER_MAX_VLAN_FRAME_LEN \
- (ETHER_MAX_LEN + 4) /**< Maximum VLAN frame length, including CRC. */
-
-#define ETHER_MAX_JUMBO_FRAME_LEN \
+#define RTE_ETHER_ADDR_LEN 6 /**< Length of Ethernet address. */
+#define RTE_ETHER_TYPE_LEN 2 /**< Length of Ethernet type field. */
+#define RTE_ETHER_CRC_LEN 4 /**< Length of Ethernet CRC. */
+#define RTE_ETHER_HDR_LEN \
+ (RTE_ETHER_ADDR_LEN * 2 + \
+ RTE_ETHER_TYPE_LEN) /**< Length of Ethernet header. */
+#define RTE_ETHER_MIN_LEN 64 /**< Minimum frame len, including CRC. */
+#define RTE_ETHER_MAX_LEN 1518 /**< Maximum frame len, including CRC. */
+#define RTE_ETHER_MTU \
+ (RTE_ETHER_MAX_LEN - RTE_ETHER_HDR_LEN - \
+ RTE_ETHER_CRC_LEN) /**< Ethernet MTU. */
+
+#define RTE_ETHER_MAX_VLAN_FRAME_LEN \
+ (RTE_ETHER_MAX_LEN + 4)
+ /**< Maximum VLAN frame length, including CRC. */
+
+#define RTE_ETHER_MAX_JUMBO_FRAME_LEN \
0x3F00 /**< Maximum Jumbo frame length, including CRC. */
-#define ETHER_MAX_VLAN_ID 4095 /**< Maximum VLAN ID. */
+#define RTE_ETHER_MAX_VLAN_ID 4095 /**< Maximum VLAN ID. */
-#define ETHER_MIN_MTU 68 /**< Minimum MTU for IPv4 packets, see RFC 791. */
+#define RTE_ETHER_MIN_MTU 68 /**< Minimum MTU for IPv4 packets, see RFC 791. */
/**
* Ethernet address:
* See http://standards.ieee.org/regauth/groupmac/tutorial.html
*/
struct rte_ether_addr {
- uint8_t addr_bytes[ETHER_ADDR_LEN]; /**< Addr bytes in tx order */
+ uint8_t addr_bytes[RTE_ETHER_ADDR_LEN]; /**< Addr bytes in tx order */
} __attribute__((__packed__));
-#define ETHER_LOCAL_ADMIN_ADDR 0x02 /**< Locally assigned Eth. address. */
-#define ETHER_GROUP_ADDR 0x01 /**< Multicast or broadcast Eth. address. */
+#define RTE_ETHER_LOCAL_ADMIN_ADDR 0x02 /**< Locally assigned Eth. address. */
+#define RTE_ETHER_GROUP_ADDR 0x01 /**< Multicast or broadcast Eth. address. */
/**
* Check if two Ethernet addresses are the same.
const struct rte_ether_addr *ea2)
{
int i;
- for (i = 0; i < ETHER_ADDR_LEN; i++)
+ for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
if (ea1->addr_bytes[i] != ea2->addr_bytes[i])
return 0;
return 1;
static inline int rte_is_zero_ether_addr(const struct rte_ether_addr *ea)
{
int i;
- for (i = 0; i < ETHER_ADDR_LEN; i++)
+ for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
if (ea->addr_bytes[i] != 0x00)
return 0;
return 1;
*/
static inline int rte_is_unicast_ether_addr(const struct rte_ether_addr *ea)
{
- return (ea->addr_bytes[0] & ETHER_GROUP_ADDR) == 0;
+ return (ea->addr_bytes[0] & RTE_ETHER_GROUP_ADDR) == 0;
}
/**
*/
static inline int rte_is_multicast_ether_addr(const struct rte_ether_addr *ea)
{
- return ea->addr_bytes[0] & ETHER_GROUP_ADDR;
+ return ea->addr_bytes[0] & RTE_ETHER_GROUP_ADDR;
}
/**
*/
static inline int rte_is_universal_ether_addr(const struct rte_ether_addr *ea)
{
- return (ea->addr_bytes[0] & ETHER_LOCAL_ADMIN_ADDR) == 0;
+ return (ea->addr_bytes[0] & RTE_ETHER_LOCAL_ADMIN_ADDR) == 0;
}
/**
*/
static inline int rte_is_local_admin_ether_addr(const struct rte_ether_addr *ea)
{
- return (ea->addr_bytes[0] & ETHER_LOCAL_ADMIN_ADDR) != 0;
+ return (ea->addr_bytes[0] & RTE_ETHER_LOCAL_ADMIN_ADDR) != 0;
}
/**
uint64_t rand = rte_rand();
uint8_t *p = (uint8_t *)&rand;
- rte_memcpy(addr, p, ETHER_ADDR_LEN);
- addr[0] &= (uint8_t)~ETHER_GROUP_ADDR; /* clear multicast bit */
- addr[0] |= ETHER_LOCAL_ADMIN_ADDR; /* set local assignment bit */
+ rte_memcpy(addr, p, RTE_ETHER_ADDR_LEN);
+ addr[0] &= (uint8_t)~RTE_ETHER_GROUP_ADDR; /* clear multicast bit */
+ addr[0] |= RTE_ETHER_LOCAL_ADMIN_ADDR; /* set local assignment bit */
}
/**
#endif
}
-#define ETHER_ADDR_FMT_SIZE 18
+#define RTE_ETHER_ADDR_FMT_SIZE 18
/**
* Format 48bits Ethernet address in pattern xx:xx:xx:xx:xx:xx.
*
} __attribute__((__packed__));
/* Ethernet frame types */
-#define ETHER_TYPE_IPv4 0x0800 /**< IPv4 Protocol. */
-#define ETHER_TYPE_IPv6 0x86DD /**< IPv6 Protocol. */
-#define ETHER_TYPE_ARP 0x0806 /**< Arp Protocol. */
-#define ETHER_TYPE_RARP 0x8035 /**< Reverse Arp Protocol. */
-#define ETHER_TYPE_VLAN 0x8100 /**< IEEE 802.1Q VLAN tagging. */
-#define ETHER_TYPE_QINQ 0x88A8 /**< IEEE 802.1ad QinQ tagging. */
+#define RTE_ETHER_TYPE_IPv4 0x0800 /**< IPv4 Protocol. */
+#define RTE_ETHER_TYPE_IPv6 0x86DD /**< IPv6 Protocol. */
+#define RTE_ETHER_TYPE_ARP 0x0806 /**< Arp Protocol. */
+#define RTE_ETHER_TYPE_RARP 0x8035 /**< Reverse Arp Protocol. */
+#define RTE_ETHER_TYPE_VLAN 0x8100 /**< IEEE 802.1Q VLAN tagging. */
+#define RTE_ETHER_TYPE_QINQ 0x88A8 /**< IEEE 802.1ad QinQ tagging. */
#define ETHER_TYPE_PPPOE_DISCOVERY 0x8863 /**< PPPoE Discovery Stage. */
#define ETHER_TYPE_PPPOE_SESSION 0x8864 /**< PPPoE Session Stage. */
-#define ETHER_TYPE_ETAG 0x893F /**< IEEE 802.1BR E-Tag. */
-#define ETHER_TYPE_1588 0x88F7 /**< IEEE 802.1AS 1588 Precise Time Protocol. */
-#define ETHER_TYPE_SLOW 0x8809 /**< Slow protocols (LACP and Marker). */
-#define ETHER_TYPE_TEB 0x6558 /**< Transparent Ethernet Bridging. */
-#define ETHER_TYPE_LLDP 0x88CC /**< LLDP Protocol. */
-#define ETHER_TYPE_MPLS 0x8847 /**< MPLS ethertype. */
-#define ETHER_TYPE_MPLSM 0x8848 /**< MPLS multicast ethertype. */
-
-#define ETHER_VXLAN_HLEN (sizeof(struct udp_hdr) + sizeof(struct rte_vxlan_hdr))
-/**< VXLAN tunnel header length. */
+#define RTE_ETHER_TYPE_ETAG 0x893F /**< IEEE 802.1BR E-Tag. */
+#define RTE_ETHER_TYPE_1588 0x88F7
+ /**< IEEE 802.1AS 1588 Precise Time Protocol. */
+#define RTE_ETHER_TYPE_SLOW 0x8809 /**< Slow protocols (LACP and Marker). */
+#define RTE_ETHER_TYPE_TEB 0x6558 /**< Transparent Ethernet Bridging. */
+#define RTE_ETHER_TYPE_LLDP 0x88CC /**< LLDP Protocol. */
+#define RTE_ETHER_TYPE_MPLS 0x8847 /**< MPLS ethertype. */
+#define RTE_ETHER_TYPE_MPLSM 0x8848 /**< MPLS multicast ethertype. */
+
+#define RTE_ETHER_VXLAN_HLEN \
+ (sizeof(struct udp_hdr) + sizeof(struct rte_vxlan_hdr))
+ /**< VXLAN tunnel header length. */
/**
* VXLAN-GPE protocol header (draft-ietf-nvo3-vxlan-gpe-05).
} __attribute__((__packed__));
/* VXLAN-GPE next protocol types */
-#define VXLAN_GPE_TYPE_IPV4 1 /**< IPv4 Protocol. */
-#define VXLAN_GPE_TYPE_IPV6 2 /**< IPv6 Protocol. */
-#define VXLAN_GPE_TYPE_ETH 3 /**< Ethernet Protocol. */
-#define VXLAN_GPE_TYPE_NSH 4 /**< NSH Protocol. */
-#define VXLAN_GPE_TYPE_MPLS 5 /**< MPLS Protocol. */
-#define VXLAN_GPE_TYPE_GBP 6 /**< GBP Protocol. */
-#define VXLAN_GPE_TYPE_VBNG 7 /**< vBNG Protocol. */
-
-#define ETHER_VXLAN_GPE_HLEN (sizeof(struct udp_hdr) + \
+#define RTE_VXLAN_GPE_TYPE_IPV4 1 /**< IPv4 Protocol. */
+#define RTE_VXLAN_GPE_TYPE_IPV6 2 /**< IPv6 Protocol. */
+#define RTE_VXLAN_GPE_TYPE_ETH 3 /**< Ethernet Protocol. */
+#define RTE_VXLAN_GPE_TYPE_NSH 4 /**< NSH Protocol. */
+#define RTE_VXLAN_GPE_TYPE_MPLS 5 /**< MPLS Protocol. */
+#define RTE_VXLAN_GPE_TYPE_GBP 6 /**< GBP Protocol. */
+#define RTE_VXLAN_GPE_TYPE_VBNG 7 /**< vBNG Protocol. */
+
+#define RTE_ETHER_VXLAN_GPE_HLEN (sizeof(struct udp_hdr) + \
sizeof(struct rte_vxlan_gpe_hdr))
/**< VXLAN-GPE tunnel header length. */
= rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
struct rte_vlan_hdr *vh;
- if (eh->ether_type != rte_cpu_to_be_16(ETHER_TYPE_VLAN))
+ if (eh->ether_type != rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN))
return -1;
vh = (struct rte_vlan_hdr *)(eh + 1);
/* Copy ether header over rather than moving whole packet */
memmove(rte_pktmbuf_adj(m, sizeof(struct rte_vlan_hdr)),
- eh, 2 * ETHER_ADDR_LEN);
+ eh, 2 * RTE_ETHER_ADDR_LEN);
return 0;
}
if (nh == NULL)
return -ENOSPC;
- memmove(nh, oh, 2 * ETHER_ADDR_LEN);
- nh->ether_type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
+ memmove(nh, oh, 2 * RTE_ETHER_ADDR_LEN);
+ nh->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
vh = (struct rte_vlan_hdr *) (nh + 1);
vh->vlan_tci = rte_cpu_to_be_16((*m)->vlan_tci);
*off += opt_len[flags];
*proto = gh->proto;
- if (*proto == rte_cpu_to_be_16(ETHER_TYPE_TEB))
+ if (*proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_TEB))
return RTE_PTYPE_TUNNEL_NVGRE;
else
return RTE_PTYPE_TUNNEL_GRE;
}
case IPPROTO_IPIP:
- *proto = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+ *proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);
return RTE_PTYPE_TUNNEL_IP;
case IPPROTO_IPV6:
- *proto = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+ *proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6);
return RTE_PTYPE_TUNNEL_IP; /* IP is also valid for IPv6 */
default:
return 0;
if ((layers & RTE_PTYPE_L2_MASK) == 0)
return 0;
- if (proto == rte_cpu_to_be_16(ETHER_TYPE_IPv4))
+ if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4))
goto l3; /* fast path if packet is IPv4 */
- if (proto == rte_cpu_to_be_16(ETHER_TYPE_VLAN)) {
+ if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN)) {
const struct rte_vlan_hdr *vh;
struct rte_vlan_hdr vh_copy;
off += sizeof(*vh);
hdr_lens->l2_len += sizeof(*vh);
proto = vh->eth_proto;
- } else if (proto == rte_cpu_to_be_16(ETHER_TYPE_QINQ)) {
+ } else if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ)) {
const struct rte_vlan_hdr *vh;
struct rte_vlan_hdr vh_copy;
off += 2 * sizeof(*vh);
hdr_lens->l2_len += 2 * sizeof(*vh);
proto = vh->eth_proto;
- } else if ((proto == rte_cpu_to_be_16(ETHER_TYPE_MPLS)) ||
- (proto == rte_cpu_to_be_16(ETHER_TYPE_MPLSM))) {
+ } else if ((proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_MPLS)) ||
+ (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_MPLSM))) {
unsigned int i;
const struct mpls_hdr *mh;
struct mpls_hdr mh_copy;
if ((layers & RTE_PTYPE_L3_MASK) == 0)
return pkt_type;
- if (proto == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
+ if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4)) {
const struct ipv4_hdr *ip4h;
struct ipv4_hdr ip4h_copy;
}
proto = ip4h->next_proto_id;
pkt_type |= ptype_l4(proto);
- } else if (proto == rte_cpu_to_be_16(ETHER_TYPE_IPv6)) {
+ } else if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6)) {
const struct ipv6_hdr *ip6h;
struct ipv6_hdr ip6h_copy;
int frag = 0;
return pkt_type;
hdr_lens->inner_l2_len = 0;
- if (proto == rte_cpu_to_be_16(ETHER_TYPE_TEB)) {
+ if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_TEB)) {
eh = rte_pktmbuf_read(m, off, sizeof(*eh), &eh_copy);
if (unlikely(eh == NULL))
return pkt_type;
hdr_lens->inner_l2_len = sizeof(*eh);
}
- if (proto == rte_cpu_to_be_16(ETHER_TYPE_VLAN)) {
+ if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN)) {
const struct rte_vlan_hdr *vh;
struct rte_vlan_hdr vh_copy;
off += sizeof(*vh);
hdr_lens->inner_l2_len += sizeof(*vh);
proto = vh->eth_proto;
- } else if (proto == rte_cpu_to_be_16(ETHER_TYPE_QINQ)) {
+ } else if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ)) {
const struct rte_vlan_hdr *vh;
struct rte_vlan_hdr vh_copy;
if ((layers & RTE_PTYPE_INNER_L3_MASK) == 0)
return pkt_type;
- if (proto == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
+ if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4)) {
const struct ipv4_hdr *ip4h;
struct ipv4_hdr ip4h_copy;
}
proto = ip4h->next_proto_id;
pkt_type |= ptype_inner_l4(proto);
- } else if (proto == rte_cpu_to_be_16(ETHER_TYPE_IPv6)) {
+ } else if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6)) {
const struct ipv6_hdr *ip6h;
struct ipv6_hdr ip6h_copy;
int frag = 0;
{
struct encap_ether_data *d = data;
uint16_t ethertype = (common_cfg->ip_version) ?
- ETHER_TYPE_IPv4 :
- ETHER_TYPE_IPv6;
+ RTE_ETHER_TYPE_IPv4 :
+ RTE_ETHER_TYPE_IPv6;
/* Ethernet */
rte_ether_addr_copy(&p->ether.ether.da, &d->ether.d_addr);
{
struct encap_vlan_data *d = data;
uint16_t ethertype = (common_cfg->ip_version) ?
- ETHER_TYPE_IPv4 :
- ETHER_TYPE_IPv6;
+ RTE_ETHER_TYPE_IPv4 :
+ RTE_ETHER_TYPE_IPv6;
/* Ethernet */
rte_ether_addr_copy(&p->vlan.ether.da, &d->ether.d_addr);
rte_ether_addr_copy(&p->vlan.ether.sa, &d->ether.s_addr);
- d->ether.ether_type = rte_htons(ETHER_TYPE_VLAN);
+ d->ether.ether_type = rte_htons(RTE_ETHER_TYPE_VLAN);
/* VLAN */
d->vlan.vlan_tci = rte_htons(VLAN(p->vlan.vlan.pcp,
{
struct encap_qinq_data *d = data;
uint16_t ethertype = (common_cfg->ip_version) ?
- ETHER_TYPE_IPv4 :
- ETHER_TYPE_IPv6;
+ RTE_ETHER_TYPE_IPv4 :
+ RTE_ETHER_TYPE_IPv6;
/* Ethernet */
rte_ether_addr_copy(&p->qinq.ether.da, &d->ether.d_addr);
rte_ether_addr_copy(&p->qinq.ether.sa, &d->ether.s_addr);
- d->ether.ether_type = rte_htons(ETHER_TYPE_QINQ);
+ d->ether.ether_type = rte_htons(RTE_ETHER_TYPE_QINQ);
/* SVLAN */
d->svlan.vlan_tci = rte_htons(VLAN(p->qinq.svlan.pcp,
p->qinq.svlan.dei,
p->qinq.svlan.vid));
- d->svlan.eth_proto = rte_htons(ETHER_TYPE_VLAN);
+ d->svlan.eth_proto = rte_htons(RTE_ETHER_TYPE_VLAN);
/* CVLAN */
d->cvlan.vlan_tci = rte_htons(VLAN(p->qinq.cvlan.pcp,
/* Ethernet */
rte_ether_addr_copy(&p->qinq.ether.da, &d->ether.d_addr);
rte_ether_addr_copy(&p->qinq.ether.sa, &d->ether.s_addr);
- d->ether.ether_type = rte_htons(ETHER_TYPE_VLAN);
+ d->ether.ether_type = rte_htons(RTE_ETHER_TYPE_VLAN);
/* SVLAN */
d->svlan.vlan_tci = rte_htons(VLAN(p->qinq.svlan.pcp,
p->qinq.svlan.dei,
p->qinq.svlan.vid));
- d->svlan.eth_proto = rte_htons(ETHER_TYPE_VLAN);
+ d->svlan.eth_proto = rte_htons(RTE_ETHER_TYPE_VLAN);
/* CVLAN */
d->cvlan.vlan_tci = rte_htons(VLAN(p->qinq.cvlan.pcp,
&d->ether.d_addr);
rte_ether_addr_copy(&p->vxlan.ether.sa,
&d->ether.s_addr);
- d->ether.ether_type = rte_htons(ETHER_TYPE_VLAN);
+ d->ether.ether_type = rte_htons(RTE_ETHER_TYPE_VLAN);
/* VLAN */
d->vlan.vlan_tci = rte_htons(VLAN(p->vxlan.vlan.pcp,
p->vxlan.vlan.dei,
p->vxlan.vlan.vid));
- d->vlan.eth_proto = rte_htons(ETHER_TYPE_IPv4);
+ d->vlan.eth_proto = rte_htons(RTE_ETHER_TYPE_IPv4);
/* IPv4*/
d->ipv4.version_ihl = 0x45;
&d->ether.d_addr);
rte_ether_addr_copy(&p->vxlan.ether.sa,
&d->ether.s_addr);
- d->ether.ether_type = rte_htons(ETHER_TYPE_IPv4);
+ d->ether.ether_type = rte_htons(RTE_ETHER_TYPE_IPv4);
/* IPv4*/
d->ipv4.version_ihl = 0x45;
&d->ether.d_addr);
rte_ether_addr_copy(&p->vxlan.ether.sa,
&d->ether.s_addr);
- d->ether.ether_type = rte_htons(ETHER_TYPE_VLAN);
+ d->ether.ether_type = rte_htons(RTE_ETHER_TYPE_VLAN);
/* VLAN */
d->vlan.vlan_tci = rte_htons(VLAN(p->vxlan.vlan.pcp,
p->vxlan.vlan.dei,
p->vxlan.vlan.vid));
- d->vlan.eth_proto = rte_htons(ETHER_TYPE_IPv6);
+ d->vlan.eth_proto = rte_htons(RTE_ETHER_TYPE_IPv6);
/* IPv6*/
d->ipv6.vtc_flow = rte_htonl((6 << 28) |
&d->ether.d_addr);
rte_ether_addr_copy(&p->vxlan.ether.sa,
&d->ether.s_addr);
- d->ether.ether_type = rte_htons(ETHER_TYPE_IPv6);
+ d->ether.ether_type = rte_htons(RTE_ETHER_TYPE_IPv6);
/* IPv6*/
d->ipv6.vtc_flow = rte_htonl((6 << 28) |
{
uint8_t *pcap_dumper = (port->dumper);
struct pcap_pkthdr pcap_hdr;
- uint8_t jumbo_pkt_buf[ETHER_MAX_JUMBO_FRAME_LEN];
+ uint8_t jumbo_pkt_buf[RTE_ETHER_MAX_JUMBO_FRAME_LEN];
uint8_t *pkt;
/* Maximum num packets already reached */
struct rte_mbuf *jumbo_mbuf;
uint32_t pkt_index = 0;
- /* if packet size longer than ETHER_MAX_JUMBO_FRAME_LEN,
+ /* if packet size longer than RTE_ETHER_MAX_JUMBO_FRAME_LEN,
* ignore it.
*/
- if (mbuf->pkt_len > ETHER_MAX_JUMBO_FRAME_LEN)
+ if (mbuf->pkt_len > RTE_ETHER_MAX_JUMBO_FRAME_LEN)
return;
for (jumbo_mbuf = mbuf; jumbo_mbuf != NULL;
m->l2_len = sizeof(struct rte_ether_hdr);
ethertype = rte_be_to_cpu_16(eth_hdr->ether_type);
- if (ethertype == ETHER_TYPE_VLAN) {
+ if (ethertype == RTE_ETHER_TYPE_VLAN) {
struct rte_vlan_hdr *vlan_hdr =
(struct rte_vlan_hdr *)(eth_hdr + 1);
l3_hdr = (char *)eth_hdr + m->l2_len;
switch (ethertype) {
- case ETHER_TYPE_IPv4:
+ case RTE_ETHER_TYPE_IPv4:
ipv4_hdr = l3_hdr;
*l4_proto = ipv4_hdr->next_proto_id;
m->l3_len = (ipv4_hdr->version_ihl & 0x0f) * 4;
*l4_hdr = (char *)l3_hdr + m->l3_len;
m->ol_flags |= PKT_TX_IPV4;
break;
- case ETHER_TYPE_IPv6:
+ case RTE_ETHER_TYPE_IPv6:
ipv6_hdr = l3_hdr;
*l4_proto = ipv6_hdr->proto;
m->l3_len = sizeof(struct ipv6_hdr);