if (info->ethertype == _htons(RTE_ETHER_TYPE_IPV4)) {
ipv4_hdr = l3_hdr;
- ol_flags |= PKT_TX_IPV4;
+ ol_flags |= RTE_MBUF_F_TX_IPV4;
if (info->l4_proto == IPPROTO_TCP && tso_segsz) {
- ol_flags |= PKT_TX_IP_CKSUM;
+ ol_flags |= RTE_MBUF_F_TX_IP_CKSUM;
} else {
if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) {
- ol_flags |= PKT_TX_IP_CKSUM;
+ ol_flags |= RTE_MBUF_F_TX_IP_CKSUM;
} else if (ipv4_hdr->hdr_checksum != 0) {
ipv4_hdr->hdr_checksum = 0;
ipv4_hdr->hdr_checksum =
}
}
} else if (info->ethertype == _htons(RTE_ETHER_TYPE_IPV6))
- ol_flags |= PKT_TX_IPV6;
+ ol_flags |= RTE_MBUF_F_TX_IPV6;
else
return 0; /* packet type not supported, nothing to do */
/* do not recalculate udp cksum if it was 0 */
if (udp_hdr->dgram_cksum != 0) {
if (tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) {
- ol_flags |= PKT_TX_UDP_CKSUM;
+ ol_flags |= RTE_MBUF_F_TX_UDP_CKSUM;
} else {
udp_hdr->dgram_cksum = 0;
udp_hdr->dgram_cksum =
}
}
if (info->gso_enable)
- ol_flags |= PKT_TX_UDP_SEG;
+ ol_flags |= RTE_MBUF_F_TX_UDP_SEG;
} else if (info->l4_proto == IPPROTO_TCP) {
tcp_hdr = (struct rte_tcp_hdr *)((char *)l3_hdr + info->l3_len);
if (tso_segsz)
- ol_flags |= PKT_TX_TCP_SEG;
+ ol_flags |= RTE_MBUF_F_TX_TCP_SEG;
else if (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) {
- ol_flags |= PKT_TX_TCP_CKSUM;
+ ol_flags |= RTE_MBUF_F_TX_TCP_CKSUM;
} else if (tcp_hdr->cksum != 0) {
tcp_hdr->cksum = 0;
tcp_hdr->cksum =
info->ethertype);
}
if (info->gso_enable)
- ol_flags |= PKT_TX_TCP_SEG;
+ ol_flags |= RTE_MBUF_F_TX_TCP_SEG;
} else if (info->l4_proto == IPPROTO_SCTP) {
sctp_hdr = (struct rte_sctp_hdr *)
((char *)l3_hdr + info->l3_len);
* offloaded */
if ((tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) &&
((ipv4_hdr->total_length & 0x3) == 0)) {
- ol_flags |= PKT_TX_SCTP_CKSUM;
+ ol_flags |= RTE_MBUF_F_TX_SCTP_CKSUM;
} else if (sctp_hdr->cksum != 0) {
sctp_hdr->cksum = 0;
/* XXX implement CRC32c, example available in
if (info->outer_ethertype == _htons(RTE_ETHER_TYPE_IPV4)) {
ipv4_hdr->hdr_checksum = 0;
- ol_flags |= PKT_TX_OUTER_IPV4;
+ ol_flags |= RTE_MBUF_F_TX_OUTER_IPV4;
if (tx_offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
- ol_flags |= PKT_TX_OUTER_IP_CKSUM;
+ ol_flags |= RTE_MBUF_F_TX_OUTER_IP_CKSUM;
else
ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
} else
- ol_flags |= PKT_TX_OUTER_IPV6;
+ ol_flags |= RTE_MBUF_F_TX_OUTER_IPV6;
if (info->outer_l4_proto != IPPROTO_UDP)
return ol_flags;
((char *)outer_l3_hdr + info->outer_l3_len);
if (tso_enabled)
- ol_flags |= PKT_TX_TCP_SEG;
+ ol_flags |= RTE_MBUF_F_TX_TCP_SEG;
/* Skip SW outer UDP checksum generation if HW supports it */
if (tx_offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) {
udp_hdr->dgram_cksum
= rte_ipv6_phdr_cksum(ipv6_hdr, ol_flags);
- ol_flags |= PKT_TX_OUTER_UDP_CKSUM;
+ ol_flags |= RTE_MBUF_F_TX_OUTER_UDP_CKSUM;
return ol_flags;
}
info.is_tunnel = 0;
info.pkt_len = rte_pktmbuf_pkt_len(m);
tx_ol_flags = m->ol_flags &
- (IND_ATTACHED_MBUF | EXT_ATTACHED_MBUF);
+ (RTE_MBUF_F_INDIRECT | RTE_MBUF_F_EXTERNAL);
rx_ol_flags = m->ol_flags;
/* Update the L3/L4 checksum error packet statistics */
- if ((rx_ol_flags & PKT_RX_IP_CKSUM_MASK) == PKT_RX_IP_CKSUM_BAD)
+ if ((rx_ol_flags & RTE_MBUF_F_RX_IP_CKSUM_MASK) == RTE_MBUF_F_RX_IP_CKSUM_BAD)
rx_bad_ip_csum += 1;
- if ((rx_ol_flags & PKT_RX_L4_CKSUM_MASK) == PKT_RX_L4_CKSUM_BAD)
+ if ((rx_ol_flags & RTE_MBUF_F_RX_L4_CKSUM_MASK) == RTE_MBUF_F_RX_L4_CKSUM_BAD)
rx_bad_l4_csum += 1;
- if (rx_ol_flags & PKT_RX_OUTER_L4_CKSUM_BAD)
+ if (rx_ol_flags & RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD)
rx_bad_outer_l4_csum += 1;
- if (rx_ol_flags & PKT_RX_OUTER_IP_CKSUM_BAD)
+ if (rx_ol_flags & RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD)
rx_bad_outer_ip_csum += 1;
/* step 1: dissect packet, parsing optional vlan, ip4/ip6, vxlan
((char *)l3_hdr + info.l3_len);
parse_gtp(udp_hdr, &info);
if (info.is_tunnel) {
- tx_ol_flags |= PKT_TX_TUNNEL_GTP;
+ tx_ol_flags |= RTE_MBUF_F_TX_TUNNEL_GTP;
goto tunnel_update;
}
parse_vxlan_gpe(udp_hdr, &info);
if (info.is_tunnel) {
tx_ol_flags |=
- PKT_TX_TUNNEL_VXLAN_GPE;
+ RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE;
goto tunnel_update;
}
parse_vxlan(udp_hdr, &info,
m->packet_type);
if (info.is_tunnel) {
tx_ol_flags |=
- PKT_TX_TUNNEL_VXLAN;
+ RTE_MBUF_F_TX_TUNNEL_VXLAN;
goto tunnel_update;
}
parse_geneve(udp_hdr, &info);
if (info.is_tunnel) {
tx_ol_flags |=
- PKT_TX_TUNNEL_GENEVE;
+ RTE_MBUF_F_TX_TUNNEL_GENEVE;
goto tunnel_update;
}
} else if (info.l4_proto == IPPROTO_GRE) {
((char *)l3_hdr + info.l3_len);
parse_gre(gre_hdr, &info);
if (info.is_tunnel)
- tx_ol_flags |= PKT_TX_TUNNEL_GRE;
+ tx_ol_flags |= RTE_MBUF_F_TX_TUNNEL_GRE;
} else if (info.l4_proto == IPPROTO_IPIP) {
void *encap_ip_hdr;
encap_ip_hdr = (char *)l3_hdr + info.l3_len;
parse_encap_ip(encap_ip_hdr, &info);
if (info.is_tunnel)
- tx_ol_flags |= PKT_TX_TUNNEL_IPIP;
+ tx_ol_flags |= RTE_MBUF_F_TX_TUNNEL_IPIP;
}
}
if (info.is_tunnel == 1) {
tx_ol_flags |= process_outer_cksums(outer_l3_hdr, &info,
tx_offloads,
- !!(tx_ol_flags & PKT_TX_TCP_SEG));
+ !!(tx_ol_flags & RTE_MBUF_F_TX_TCP_SEG));
}
/* step 3: fill the mbuf meta data (flags and header lengths) */
DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
(tx_offloads &
DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) ||
- (tx_ol_flags & PKT_TX_OUTER_IPV6)) {
+ (tx_ol_flags & RTE_MBUF_F_TX_OUTER_IPV6)) {
m->outer_l2_len = info.outer_l2_len;
m->outer_l3_len = info.outer_l3_len;
m->l2_len = info.l2_len;
"l4_proto=%d l4_len=%d flags=%s\n",
info.l2_len, rte_be_to_cpu_16(info.ethertype),
info.l3_len, info.l4_proto, info.l4_len, buf);
- if (rx_ol_flags & PKT_RX_LRO)
+ if (rx_ol_flags & RTE_MBUF_F_RX_LRO)
printf("rx: m->lro_segsz=%u\n", m->tso_segsz);
if (info.is_tunnel == 1)
printf("rx: outer_l2_len=%d outer_ethertype=%x "
DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
(tx_offloads &
DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) ||
- (tx_ol_flags & PKT_TX_OUTER_IPV6))
+ (tx_ol_flags & RTE_MBUF_F_TX_OUTER_IPV6))
printf("tx: m->outer_l2_len=%d "
"m->outer_l3_len=%d\n",
m->outer_l2_len,
m->outer_l3_len);
if (info.tunnel_tso_segsz != 0 &&
- (m->ol_flags & PKT_TX_TCP_SEG))
+ (m->ol_flags & RTE_MBUF_F_TX_TCP_SEG))
printf("tx: m->tso_segsz=%d\n",
m->tso_segsz);
} else if (info.tso_segsz != 0 &&
- (m->ol_flags & PKT_TX_TCP_SEG))
+ (m->ol_flags & RTE_MBUF_F_TX_TCP_SEG))
printf("tx: m->tso_segsz=%d\n", m->tso_segsz);
rte_get_tx_ol_flag_list(m->ol_flags, buf, sizeof(buf));
printf("tx: flags=%s", buf);
tx_offloads = ports[fs->tx_port].dev_conf.txmode.offloads;
if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
- ol_flags |= PKT_TX_VLAN_PKT;
+ ol_flags |= RTE_MBUF_F_TX_VLAN_PKT;
if (tx_offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
- ol_flags |= PKT_TX_QINQ_PKT;
+ ol_flags |= RTE_MBUF_F_TX_QINQ_PKT;
if (tx_offloads & DEV_TX_OFFLOAD_MACSEC_INSERT)
- ol_flags |= PKT_TX_MACSEC;
+ ol_flags |= RTE_MBUF_F_TX_MACSEC;
for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
if (!nb_pkt || !nb_clones) {
sizeof(*ip_hdr));
pkt->nb_segs = 1;
pkt->pkt_len = pkt_size;
- pkt->ol_flags &= EXT_ATTACHED_MBUF;
+ pkt->ol_flags &= RTE_MBUF_F_EXTERNAL;
pkt->ol_flags |= ol_flags;
pkt->vlan_tci = vlan_tci;
pkt->vlan_tci_outer = vlan_tci_outer;
eth_hdr = rte_pktmbuf_mtod(mb, struct rte_ether_hdr *);
eth_type = rte_be_to_cpu_16(eth_hdr->ether_type);
- if (! (mb->ol_flags & PKT_RX_IEEE1588_PTP)) {
+ if (! (mb->ol_flags & RTE_MBUF_F_RX_IEEE1588_PTP)) {
if (eth_type == RTE_ETHER_TYPE_1588) {
printf("Port %u Received PTP packet not filtered"
" by hardware\n",
* Check that the received PTP packet has been timestamped by the
* hardware.
*/
- if (! (mb->ol_flags & PKT_RX_IEEE1588_TMST)) {
+ if (! (mb->ol_flags & RTE_MBUF_F_RX_IEEE1588_TMST)) {
printf("Port %u Received PTP packet not timestamped"
" by hardware\n",
fs->rx_port);
rte_ether_addr_copy(&addr, ð_hdr->s_addr);
/* Forward PTP packet with hardware TX timestamp */
- mb->ol_flags |= PKT_TX_IEEE1588_TMST;
+ mb->ol_flags |= RTE_MBUF_F_TX_IEEE1588_TMST;
fs->tx_packets += 1;
if (rte_eth_tx_burst(fs->rx_port, fs->tx_queue, &mb, 1) == 0) {
printf("Port %u sent PTP packet dropped\n", fs->rx_port);
txp = &ports[fs->tx_port];
tx_offloads = txp->dev_conf.txmode.offloads;
if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
- ol_flags = PKT_TX_VLAN_PKT;
+ ol_flags = RTE_MBUF_F_TX_VLAN_PKT;
if (tx_offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
- ol_flags |= PKT_TX_QINQ_PKT;
+ ol_flags |= RTE_MBUF_F_TX_QINQ_PKT;
if (tx_offloads & DEV_TX_OFFLOAD_MACSEC_INSERT)
- ol_flags |= PKT_TX_MACSEC;
+ ol_flags |= RTE_MBUF_F_TX_MACSEC;
for (i = 0; i < nb_rx; i++) {
if (likely(i < nb_rx - 1))
rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[i + 1],
ð_hdr->d_addr);
rte_ether_addr_copy(&ports[fs->tx_port].eth_addr,
ð_hdr->s_addr);
- mb->ol_flags &= IND_ATTACHED_MBUF | EXT_ATTACHED_MBUF;
+ mb->ol_flags &= RTE_MBUF_F_INDIRECT | RTE_MBUF_F_EXTERNAL;
mb->ol_flags |= ol_flags;
mb->l2_len = sizeof(struct rte_ether_hdr);
mb->l3_len = sizeof(struct rte_ipv4_hdr);
uint64_t ol_flags = 0;
ol_flags |= (tx_offload & DEV_TX_OFFLOAD_VLAN_INSERT) ?
- PKT_TX_VLAN : 0;
+ RTE_MBUF_F_TX_VLAN : 0;
ol_flags |= (tx_offload & DEV_TX_OFFLOAD_QINQ_INSERT) ?
- PKT_TX_QINQ : 0;
+ RTE_MBUF_F_TX_QINQ : 0;
ol_flags |= (tx_offload & DEV_TX_OFFLOAD_MACSEC_INSERT) ?
- PKT_TX_MACSEC : 0;
+ RTE_MBUF_F_TX_MACSEC : 0;
return ol_flags;
}
{
int i;
- if (ol_flags & PKT_TX_VLAN)
+ if (ol_flags & RTE_MBUF_F_TX_VLAN)
for (i = 0; i < nb; i++)
pkts[i]->vlan_tci = vlan;
- if (ol_flags & PKT_TX_QINQ)
+ if (ol_flags & RTE_MBUF_F_TX_QINQ)
for (i = 0; i < nb; i++)
pkts[i]->vlan_tci_outer = outer_vlan;
}
static inline void
mbuf_field_set(struct rte_mbuf *mb, uint64_t ol_flags)
{
- mb->ol_flags &= IND_ATTACHED_MBUF | EXT_ATTACHED_MBUF;
+ mb->ol_flags &= RTE_MBUF_F_INDIRECT | RTE_MBUF_F_EXTERNAL;
mb->ol_flags |= ol_flags;
mb->l2_len = sizeof(struct rte_ether_hdr);
mb->l3_len = sizeof(struct rte_ipv4_hdr);
rte_pktmbuf_reset_headroom(pkt);
pkt->data_len = tx_pkt_seg_lengths[0];
- pkt->ol_flags &= EXT_ATTACHED_MBUF;
+ pkt->ol_flags &= RTE_MBUF_F_EXTERNAL;
pkt->ol_flags |= ol_flags;
pkt->vlan_tci = vlan_tci;
pkt->vlan_tci_outer = vlan_tci_outer;
vlan_tci = txp->tx_vlan_id;
vlan_tci_outer = txp->tx_vlan_id_outer;
if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
- ol_flags = PKT_TX_VLAN_PKT;
+ ol_flags = RTE_MBUF_F_TX_VLAN_PKT;
if (tx_offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
- ol_flags |= PKT_TX_QINQ_PKT;
+ ol_flags |= RTE_MBUF_F_TX_QINQ_PKT;
if (tx_offloads & DEV_TX_OFFLOAD_MACSEC_INSERT)
- ol_flags |= PKT_TX_MACSEC;
+ ol_flags |= RTE_MBUF_F_TX_MACSEC;
/*
* Initialize Ethernet header.
eth_type, (unsigned int) mb->pkt_len,
(int)mb->nb_segs);
ol_flags = mb->ol_flags;
- if (ol_flags & PKT_RX_RSS_HASH) {
+ if (ol_flags & RTE_MBUF_F_RX_RSS_HASH) {
MKDUMPSTR(print_buf, buf_size, cur_len,
" - RSS hash=0x%x",
(unsigned int) mb->hash.rss);
MKDUMPSTR(print_buf, buf_size, cur_len,
" - RSS queue=0x%x", (unsigned int) queue);
}
- if (ol_flags & PKT_RX_FDIR) {
+ if (ol_flags & RTE_MBUF_F_RX_FDIR) {
MKDUMPSTR(print_buf, buf_size, cur_len,
" - FDIR matched ");
- if (ol_flags & PKT_RX_FDIR_ID)
+ if (ol_flags & RTE_MBUF_F_RX_FDIR_ID)
MKDUMPSTR(print_buf, buf_size, cur_len,
"ID=0x%x", mb->hash.fdir.hi);
- else if (ol_flags & PKT_RX_FDIR_FLX)
+ else if (ol_flags & RTE_MBUF_F_RX_FDIR_FLX)
MKDUMPSTR(print_buf, buf_size, cur_len,
"flex bytes=0x%08x %08x",
mb->hash.fdir.hi, mb->hash.fdir.lo);
if (is_timestamp_enabled(mb))
MKDUMPSTR(print_buf, buf_size, cur_len,
" - timestamp %"PRIu64" ", get_timestamp(mb));
- if (ol_flags & PKT_RX_QINQ)
+ if (ol_flags & RTE_MBUF_F_RX_QINQ)
MKDUMPSTR(print_buf, buf_size, cur_len,
" - QinQ VLAN tci=0x%x, VLAN tci outer=0x%x",
mb->vlan_tci, mb->vlan_tci_outer);
- else if (ol_flags & PKT_RX_VLAN)
+ else if (ol_flags & RTE_MBUF_F_RX_VLAN)
MKDUMPSTR(print_buf, buf_size, cur_len,
" - VLAN tci=0x%x", mb->vlan_tci);
- if (!is_rx && (ol_flags & PKT_TX_DYNF_METADATA))
+ if (!is_rx && (ol_flags & RTE_MBUF_F_TX_DYNF_METADATA))
MKDUMPSTR(print_buf, buf_size, cur_len,
" - Tx metadata: 0x%x",
*RTE_FLOW_DYNF_METADATA(mb));
- if (is_rx && (ol_flags & PKT_RX_DYNF_METADATA))
+ if (is_rx && (ol_flags & RTE_MBUF_F_RX_DYNF_METADATA))
MKDUMPSTR(print_buf, buf_size, cur_len,
" - Rx metadata: 0x%x",
*RTE_FLOW_DYNF_METADATA(mb));
for (i = 0; i < nb_pkts; i++) {
*RTE_FLOW_DYNF_METADATA(pkts[i]) =
ports[port_id].tx_metadata;
- pkts[i]->ol_flags |= PKT_TX_DYNF_METADATA;
+ pkts[i]->ol_flags |= RTE_MBUF_F_TX_DYNF_METADATA;
}
return nb_pkts;
}
"ibuf pkt_len is not equal to obuf pkt_len");
/* check mbuf ol_flags */
- TEST_ASSERT(ut_params->ibuf[j]->ol_flags & PKT_TX_SEC_OFFLOAD,
- "ibuf PKT_TX_SEC_OFFLOAD is not set");
+ TEST_ASSERT(ut_params->ibuf[j]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD,
+ "ibuf RTE_MBUF_F_TX_SEC_OFFLOAD is not set");
}
return 0;
}
GOTO_FAIL("%s expected: -1, received = %d\n", __func__, ret);
/* Test case to check with zero buffer len */
- ret = rte_get_rx_ol_flag_list(PKT_RX_L4_CKSUM_MASK, buf, 0);
+ ret = rte_get_rx_ol_flag_list(RTE_MBUF_F_RX_L4_CKSUM_MASK, buf, 0);
if (ret != -1)
GOTO_FAIL("%s expected: -1, received = %d\n", __func__, ret);
"non-zero, buffer should not be empty");
/* Test case to check with valid mask value */
- ret = rte_get_rx_ol_flag_list(PKT_RX_SEC_OFFLOAD, buf, sizeof(buf));
+ ret = rte_get_rx_ol_flag_list(RTE_MBUF_F_RX_SEC_OFFLOAD, buf,
+ sizeof(buf));
if (ret != 0)
GOTO_FAIL("%s expected: 0, received = %d\n", __func__, ret);
GOTO_FAIL("%s expected: -1, received = %d\n", __func__, ret);
/* Test case to check with zero buffer len */
- ret = rte_get_tx_ol_flag_list(PKT_TX_IP_CKSUM, buf, 0);
+ ret = rte_get_tx_ol_flag_list(RTE_MBUF_F_TX_IP_CKSUM, buf, 0);
if (ret != -1)
GOTO_FAIL("%s expected: -1, received = %d\n", __func__, ret);
"non-zero, buffer should not be empty");
/* Test case to check with valid mask value */
- ret = rte_get_tx_ol_flag_list(PKT_TX_UDP_CKSUM, buf, sizeof(buf));
+ ret = rte_get_tx_ol_flag_list(RTE_MBUF_F_TX_UDP_CKSUM, buf,
+ sizeof(buf));
if (ret != 0)
GOTO_FAIL("%s expected: 0, received = %d\n", __func__, ret);
uint16_t i;
const char *flag_str = NULL;
const struct flag_name rx_flags[] = {
- VAL_NAME(PKT_RX_VLAN),
- VAL_NAME(PKT_RX_RSS_HASH),
- VAL_NAME(PKT_RX_FDIR),
- VAL_NAME(PKT_RX_L4_CKSUM_BAD),
- VAL_NAME(PKT_RX_L4_CKSUM_GOOD),
- VAL_NAME(PKT_RX_L4_CKSUM_NONE),
- VAL_NAME(PKT_RX_IP_CKSUM_BAD),
- VAL_NAME(PKT_RX_IP_CKSUM_GOOD),
- VAL_NAME(PKT_RX_IP_CKSUM_NONE),
- VAL_NAME(PKT_RX_OUTER_IP_CKSUM_BAD),
- VAL_NAME(PKT_RX_VLAN_STRIPPED),
- VAL_NAME(PKT_RX_IEEE1588_PTP),
- VAL_NAME(PKT_RX_IEEE1588_TMST),
- VAL_NAME(PKT_RX_FDIR_ID),
- VAL_NAME(PKT_RX_FDIR_FLX),
- VAL_NAME(PKT_RX_QINQ_STRIPPED),
- VAL_NAME(PKT_RX_LRO),
- VAL_NAME(PKT_RX_SEC_OFFLOAD),
- VAL_NAME(PKT_RX_SEC_OFFLOAD_FAILED),
- VAL_NAME(PKT_RX_OUTER_L4_CKSUM_BAD),
- VAL_NAME(PKT_RX_OUTER_L4_CKSUM_GOOD),
- VAL_NAME(PKT_RX_OUTER_L4_CKSUM_INVALID),
+ VAL_NAME(RTE_MBUF_F_RX_VLAN),
+ VAL_NAME(RTE_MBUF_F_RX_RSS_HASH),
+ VAL_NAME(RTE_MBUF_F_RX_FDIR),
+ VAL_NAME(RTE_MBUF_F_RX_L4_CKSUM_BAD),
+ VAL_NAME(RTE_MBUF_F_RX_L4_CKSUM_GOOD),
+ VAL_NAME(RTE_MBUF_F_RX_L4_CKSUM_NONE),
+ VAL_NAME(RTE_MBUF_F_RX_IP_CKSUM_BAD),
+ VAL_NAME(RTE_MBUF_F_RX_IP_CKSUM_GOOD),
+ VAL_NAME(RTE_MBUF_F_RX_IP_CKSUM_NONE),
+ VAL_NAME(RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD),
+ VAL_NAME(RTE_MBUF_F_RX_VLAN_STRIPPED),
+ VAL_NAME(RTE_MBUF_F_RX_IEEE1588_PTP),
+ VAL_NAME(RTE_MBUF_F_RX_IEEE1588_TMST),
+ VAL_NAME(RTE_MBUF_F_RX_FDIR_ID),
+ VAL_NAME(RTE_MBUF_F_RX_FDIR_FLX),
+ VAL_NAME(RTE_MBUF_F_RX_QINQ_STRIPPED),
+ VAL_NAME(RTE_MBUF_F_RX_LRO),
+ VAL_NAME(RTE_MBUF_F_RX_SEC_OFFLOAD),
+ VAL_NAME(RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED),
+ VAL_NAME(RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD),
+ VAL_NAME(RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD),
+ VAL_NAME(RTE_MBUF_F_RX_OUTER_L4_CKSUM_INVALID),
};
/* Test case to check with valid flag */
uint16_t i;
const char *flag_str = NULL;
const struct flag_name tx_flags[] = {
- VAL_NAME(PKT_TX_VLAN),
- VAL_NAME(PKT_TX_IP_CKSUM),
- VAL_NAME(PKT_TX_TCP_CKSUM),
- VAL_NAME(PKT_TX_SCTP_CKSUM),
- VAL_NAME(PKT_TX_UDP_CKSUM),
- VAL_NAME(PKT_TX_IEEE1588_TMST),
- VAL_NAME(PKT_TX_TCP_SEG),
- VAL_NAME(PKT_TX_IPV4),
- VAL_NAME(PKT_TX_IPV6),
- VAL_NAME(PKT_TX_OUTER_IP_CKSUM),
- VAL_NAME(PKT_TX_OUTER_IPV4),
- VAL_NAME(PKT_TX_OUTER_IPV6),
- VAL_NAME(PKT_TX_TUNNEL_VXLAN),
- VAL_NAME(PKT_TX_TUNNEL_GRE),
- VAL_NAME(PKT_TX_TUNNEL_IPIP),
- VAL_NAME(PKT_TX_TUNNEL_GENEVE),
- VAL_NAME(PKT_TX_TUNNEL_MPLSINUDP),
- VAL_NAME(PKT_TX_TUNNEL_VXLAN_GPE),
- VAL_NAME(PKT_TX_TUNNEL_IP),
- VAL_NAME(PKT_TX_TUNNEL_UDP),
- VAL_NAME(PKT_TX_QINQ),
- VAL_NAME(PKT_TX_MACSEC),
- VAL_NAME(PKT_TX_SEC_OFFLOAD),
- VAL_NAME(PKT_TX_UDP_SEG),
- VAL_NAME(PKT_TX_OUTER_UDP_CKSUM),
+ VAL_NAME(RTE_MBUF_F_TX_VLAN),
+ VAL_NAME(RTE_MBUF_F_TX_IP_CKSUM),
+ VAL_NAME(RTE_MBUF_F_TX_TCP_CKSUM),
+ VAL_NAME(RTE_MBUF_F_TX_SCTP_CKSUM),
+ VAL_NAME(RTE_MBUF_F_TX_UDP_CKSUM),
+ VAL_NAME(RTE_MBUF_F_TX_IEEE1588_TMST),
+ VAL_NAME(RTE_MBUF_F_TX_TCP_SEG),
+ VAL_NAME(RTE_MBUF_F_TX_IPV4),
+ VAL_NAME(RTE_MBUF_F_TX_IPV6),
+ VAL_NAME(RTE_MBUF_F_TX_OUTER_IP_CKSUM),
+ VAL_NAME(RTE_MBUF_F_TX_OUTER_IPV4),
+ VAL_NAME(RTE_MBUF_F_TX_OUTER_IPV6),
+ VAL_NAME(RTE_MBUF_F_TX_TUNNEL_VXLAN),
+ VAL_NAME(RTE_MBUF_F_TX_TUNNEL_GRE),
+ VAL_NAME(RTE_MBUF_F_TX_TUNNEL_IPIP),
+ VAL_NAME(RTE_MBUF_F_TX_TUNNEL_GENEVE),
+ VAL_NAME(RTE_MBUF_F_TX_TUNNEL_MPLSINUDP),
+ VAL_NAME(RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE),
+ VAL_NAME(RTE_MBUF_F_TX_TUNNEL_IP),
+ VAL_NAME(RTE_MBUF_F_TX_TUNNEL_UDP),
+ VAL_NAME(RTE_MBUF_F_TX_QINQ),
+ VAL_NAME(RTE_MBUF_F_TX_MACSEC),
+ VAL_NAME(RTE_MBUF_F_TX_SEC_OFFLOAD),
+ VAL_NAME(RTE_MBUF_F_TX_UDP_SEG),
+ VAL_NAME(RTE_MBUF_F_TX_OUTER_UDP_CKSUM),
};
/* Test case to check with valid flag */
/* test to validate if IP checksum is counted only for IPV4 packet */
/* set both IP checksum and IPV6 flags */
- ol_flags |= PKT_TX_IP_CKSUM;
- ol_flags |= PKT_TX_IPV6;
+ ol_flags |= RTE_MBUF_F_TX_IP_CKSUM;
+ ol_flags |= RTE_MBUF_F_TX_IPV6;
if (test_mbuf_validate_tx_offload("MBUF_TEST_IP_CKSUM_IPV6_SET",
pktmbuf_pool,
ol_flags, 0, -EINVAL) < 0)
ol_flags = 0;
/* test to validate if IP type is set when required */
- ol_flags |= PKT_TX_L4_MASK;
+ ol_flags |= RTE_MBUF_F_TX_L4_MASK;
if (test_mbuf_validate_tx_offload("MBUF_TEST_IP_TYPE_NOT_SET",
pktmbuf_pool,
ol_flags, 0, -EINVAL) < 0)
GOTO_FAIL("%s failed: IP type is not set.\n", __func__);
/* test if IP type is set when TCP SEG is on */
- ol_flags |= PKT_TX_TCP_SEG;
+ ol_flags |= RTE_MBUF_F_TX_TCP_SEG;
if (test_mbuf_validate_tx_offload("MBUF_TEST_IP_TYPE_NOT_SET",
pktmbuf_pool,
ol_flags, 0, -EINVAL) < 0)
ol_flags = 0;
/* test to confirm IP type (IPV4/IPV6) is set */
- ol_flags = PKT_TX_L4_MASK;
- ol_flags |= PKT_TX_IPV6;
+ ol_flags = RTE_MBUF_F_TX_L4_MASK;
+ ol_flags |= RTE_MBUF_F_TX_IPV6;
if (test_mbuf_validate_tx_offload("MBUF_TEST_IP_TYPE_SET",
pktmbuf_pool,
ol_flags, 0, 0) < 0)
ol_flags = 0;
/* test to check TSO segment size is non-zero */
- ol_flags |= PKT_TX_IPV4;
- ol_flags |= PKT_TX_TCP_SEG;
+ ol_flags |= RTE_MBUF_F_TX_IPV4;
+ ol_flags |= RTE_MBUF_F_TX_TCP_SEG;
/* set 0 tso segment size */
if (test_mbuf_validate_tx_offload("MBUF_TEST_NULL_TSO_SEGSZ",
pktmbuf_pool,
ol_flags, 0, -EINVAL) < 0)
GOTO_FAIL("%s failed: tso segment size is null.\n", __func__);
- /* retain IPV4 and PKT_TX_TCP_SEG mask */
+ /* retain IPV4 and RTE_MBUF_F_TX_TCP_SEG mask */
/* set valid tso segment size but IP CKSUM not set */
if (test_mbuf_validate_tx_offload("MBUF_TEST_TSO_IP_CKSUM_NOT_SET",
pktmbuf_pool,
/* test to validate if IP checksum is set for TSO capability */
/* retain IPV4, TCP_SEG, tso_seg size */
- ol_flags |= PKT_TX_IP_CKSUM;
+ ol_flags |= RTE_MBUF_F_TX_IP_CKSUM;
if (test_mbuf_validate_tx_offload("MBUF_TEST_TSO_IP_CKSUM_SET",
pktmbuf_pool,
ol_flags, 512, 0) < 0)
/* test to confirm TSO for IPV6 type */
ol_flags = 0;
- ol_flags |= PKT_TX_IPV6;
- ol_flags |= PKT_TX_TCP_SEG;
+ ol_flags |= RTE_MBUF_F_TX_IPV6;
+ ol_flags |= RTE_MBUF_F_TX_TCP_SEG;
if (test_mbuf_validate_tx_offload("MBUF_TEST_TSO_IPV6_SET",
pktmbuf_pool,
ol_flags, 512, 0) < 0)
ol_flags = 0;
/* test if outer IP checksum set for non outer IPv4 packet */
- ol_flags |= PKT_TX_IPV6;
- ol_flags |= PKT_TX_OUTER_IP_CKSUM;
+ ol_flags |= RTE_MBUF_F_TX_IPV6;
+ ol_flags |= RTE_MBUF_F_TX_OUTER_IP_CKSUM;
if (test_mbuf_validate_tx_offload("MBUF_TEST_OUTER_IPV4_NOT_SET",
pktmbuf_pool,
ol_flags, 512, -EINVAL) < 0)
ol_flags = 0;
/* test to confirm outer IP checksum is set for outer IPV4 packet */
- ol_flags |= PKT_TX_OUTER_IP_CKSUM;
- ol_flags |= PKT_TX_OUTER_IPV4;
+ ol_flags |= RTE_MBUF_F_TX_OUTER_IP_CKSUM;
+ ol_flags |= RTE_MBUF_F_TX_OUTER_IPV4;
if (test_mbuf_validate_tx_offload("MBUF_TEST_OUTER_IPV4_SET",
pktmbuf_pool,
ol_flags, 512, 0) < 0)
buf_iova = rte_mem_virt2iova(ext_buf_addr);
rte_pktmbuf_attach_extbuf(m, ext_buf_addr, buf_iova, buf_len,
ret_shinfo);
- if (m->ol_flags != EXT_ATTACHED_MBUF)
+ if (m->ol_flags != RTE_MBUF_F_EXTERNAL)
GOTO_FAIL("%s: External buffer is not attached to mbuf\n",
__func__);
/* attach the same external buffer to the cloned mbuf */
rte_pktmbuf_attach_extbuf(clone, ext_buf_addr, buf_iova, buf_len,
ret_shinfo);
- if (clone->ol_flags != EXT_ATTACHED_MBUF)
+ if (clone->ol_flags != RTE_MBUF_F_EXTERNAL)
GOTO_FAIL("%s: External buffer is not attached to mbuf\n",
__func__);
flag2, strerror(errno));
flag3 = rte_mbuf_dynflag_register_bitnum(&dynflag3,
- rte_bsf64(PKT_LAST_FREE));
- if (flag3 != rte_bsf64(PKT_LAST_FREE))
+ rte_bsf64(RTE_MBUF_F_LAST_FREE));
+ if (flag3 != rte_bsf64(RTE_MBUF_F_LAST_FREE))
GOTO_FAIL("failed to register dynamic flag 3, flag3=%d: %s",
flag3, strerror(errno));
// enable VLAN insert offload
testpmd> port config (port_id) rx_offload vlan_insert|qinq_insert (on|off)
- if (mbuf->ol_flags && PKT_TX_QINQ) // case-1: insert VLAN to single-tagged packet
+ if (mbuf->ol_flags && RTE_MBUF_F_TX_QINQ) // case-1: insert VLAN to single-tagged packet
tci_value = mbuf->vlan_tci_outer
- else if (mbuf->ol_flags && PKT_TX_VLAN) // case-2: insert VLAN to untagged packet
+ else if (mbuf->ol_flags && RTE_MBUF_F_TX_VLAN) // case-2: insert VLAN to untagged packet
tci_value = mbuf->vlan_tci
VLAN Strip
testpmd> port config (port_id) tx_offload vlan_strip (on|off)
// notify application VLAN strip via mbuf
- mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_STRIPPED // outer VLAN is found and stripped
+ mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_STRIPPED // outer VLAN is found and stripped
mbuf->vlan_tci = tci_value // TCI of the stripped VLAN
Time Synchronization
.. code-block:: console
// RX packet completion will indicate whether the packet is PTP
- mbuf->ol_flags |= PKT_RX_IEEE1588_PTP
+ mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP
Statistics Collection
~~~~~~~~~~~~~~~~~~~~~
- Rx checksum offloads.
The NIC validates IPv4/UDP/TCP checksums of both inner and outer packets.
- Good checksum flags (e.g. ``PKT_RX_L4_CKSUM_GOOD``) indicate that the inner
+ Good checksum flags (e.g. ``RTE_MBUF_F_RX_L4_CKSUM_GOOD``) indicate that the inner
packet has the correct checksum, and if applicable, the outer packet also
- has the correct checksum. Bad checksum flags (e.g. ``PKT_RX_L4_CKSUM_BAD``)
+ has the correct checksum. Bad checksum flags (e.g. ``RTE_MBUF_F_RX_L4_CKSUM_BAD``)
indicate that the inner and/or outer packets have invalid checksum values.
- Inner Rx packet type classification
Another alternative is modify the adapter's ingress VLAN rewrite mode so that
packets with the default VLAN tag are stripped by the adapter and presented to
-DPDK as untagged packets. In this case mbuf->vlan_tci and the PKT_RX_VLAN and
-PKT_RX_VLAN_STRIPPED mbuf flags would not be set. This mode is enabled with the
+DPDK as untagged packets. In this case mbuf->vlan_tci and the RTE_MBUF_F_RX_VLAN and
+RTE_MBUF_F_RX_VLAN_STRIPPED mbuf flags would not be set. This mode is enabled with the
``devargs`` parameter ``ig-vlan-rewrite=untag``. For example::
-a 12:00.0,ig-vlan-rewrite=untag
``dev_conf.rxmode.max_lro_pkt_size``.
* **[implements] datapath**: ``LRO functionality``.
* **[implements] rte_eth_dev_data**: ``lro``.
-* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_LRO``, ``mbuf.tso_segsz``.
+* **[provides] mbuf**: ``mbuf.ol_flags:RTE_MBUF_F_RX_LRO``, ``mbuf.tso_segsz``.
* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_TCP_LRO``.
* **[provides] rte_eth_dev_info**: ``max_lro_pkt_size``.
* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_TCP_TSO``.
* **[uses] rte_eth_desc_lim**: ``nb_seg_max``, ``nb_mtu_seg_max``.
-* **[uses] mbuf**: ``mbuf.ol_flags:`` ``PKT_TX_TCP_SEG``, ``PKT_TX_IPV4``, ``PKT_TX_IPV6``, ``PKT_TX_IP_CKSUM``.
+* **[uses] mbuf**: ``mbuf.ol_flags:`` ``RTE_MBUF_F_TX_TCP_SEG``, ``RTE_MBUF_F_TX_IPV4``, ``RTE_MBUF_F_TX_IPV6``, ``RTE_MBUF_F_TX_IP_CKSUM``.
* **[uses] mbuf**: ``mbuf.tso_segsz``, ``mbuf.l2_len``, ``mbuf.l3_len``, ``mbuf.l4_len``.
* **[implements] datapath**: ``TSO functionality``.
* **[provides] rte_eth_dev_info**: ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_TCP_TSO,DEV_TX_OFFLOAD_UDP_TSO``.
* **[uses] user config**: ``dev_conf.rx_adv_conf.rss_conf``.
* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_RSS_HASH``.
* **[provides] rte_eth_dev_info**: ``flow_type_rss_offloads``.
-* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_RSS_HASH``, ``mbuf.rss``.
+* **[provides] mbuf**: ``mbuf.ol_flags:RTE_MBUF_F_RX_RSS_HASH``, ``mbuf.rss``.
.. _nic_features_inner_rss:
* **[uses] rte_flow_action_rss**: ``level``.
* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_RSS_HASH``.
-* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_RSS_HASH``, ``mbuf.rss``.
+* **[provides] mbuf**: ``mbuf.ol_flags:RTE_MBUF_F_RX_RSS_HASH``, ``mbuf.rss``.
.. _nic_features_rss_key_update:
``session_stats_get``, ``session_destroy``, ``set_pkt_metadata``, ``capabilities_get``.
* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_SECURITY``,
``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_SECURITY``.
-* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_SEC_OFFLOAD``,
- ``mbuf.ol_flags:PKT_TX_SEC_OFFLOAD``, ``mbuf.ol_flags:PKT_RX_SEC_OFFLOAD_FAILED``.
+* **[provides] mbuf**: ``mbuf.ol_flags:RTE_MBUF_F_RX_SEC_OFFLOAD``,
+ ``mbuf.ol_flags:RTE_MBUF_F_TX_SEC_OFFLOAD``, ``mbuf.ol_flags:RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED``.
* **[provides] rte_security_ops, capabilities_get**: ``action: RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO``
``capabilities_get``.
* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_SECURITY``,
``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_SECURITY``.
-* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_SEC_OFFLOAD``,
- ``mbuf.ol_flags:PKT_TX_SEC_OFFLOAD``, ``mbuf.ol_flags:PKT_RX_SEC_OFFLOAD_FAILED``.
+* **[provides] mbuf**: ``mbuf.ol_flags:RTE_MBUF_F_RX_SEC_OFFLOAD``,
+ ``mbuf.ol_flags:RTE_MBUF_F_TX_SEC_OFFLOAD``, ``mbuf.ol_flags:RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED``.
* **[provides] rte_security_ops, capabilities_get**: ``action: RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL``
* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_VLAN_STRIP,DEV_RX_OFFLOAD_VLAN_FILTER,DEV_RX_OFFLOAD_VLAN_EXTEND``.
* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_VLAN_INSERT``.
-* **[uses] mbuf**: ``mbuf.ol_flags:PKT_TX_VLAN``, ``mbuf.vlan_tci``.
+* **[uses] mbuf**: ``mbuf.ol_flags:RTE_MBUF_F_TX_VLAN``, ``mbuf.vlan_tci``.
* **[implements] eth_dev_ops**: ``vlan_offload_set``.
-* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_VLAN_STRIPPED``, ``mbuf.ol_flags:PKT_RX_VLAN`` ``mbuf.vlan_tci``.
+* **[provides] mbuf**: ``mbuf.ol_flags:RTE_MBUF_F_RX_VLAN_STRIPPED``, ``mbuf.ol_flags:RTE_MBUF_F_RX_VLAN`` ``mbuf.vlan_tci``.
* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_VLAN_STRIP``,
``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_VLAN_INSERT``.
* **[related] API**: ``rte_eth_dev_set_vlan_offload()``,
* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_QINQ_STRIP``.
* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_QINQ_INSERT``.
-* **[uses] mbuf**: ``mbuf.ol_flags:PKT_TX_QINQ``, ``mbuf.vlan_tci_outer``.
-* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_QINQ_STRIPPED``, ``mbuf.ol_flags:PKT_RX_QINQ``,
- ``mbuf.ol_flags:PKT_RX_VLAN_STRIPPED``, ``mbuf.ol_flags:PKT_RX_VLAN``
+* **[uses] mbuf**: ``mbuf.ol_flags:RTE_MBUF_F_TX_QINQ``, ``mbuf.vlan_tci_outer``.
+* **[provides] mbuf**: ``mbuf.ol_flags:RTE_MBUF_F_RX_QINQ_STRIPPED``, ``mbuf.ol_flags:RTE_MBUF_F_RX_QINQ``,
+ ``mbuf.ol_flags:RTE_MBUF_F_RX_VLAN_STRIPPED``, ``mbuf.ol_flags:RTE_MBUF_F_RX_VLAN``
``mbuf.vlan_tci``, ``mbuf.vlan_tci_outer``.
* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_QINQ_STRIP``,
``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_QINQ_INSERT``.
* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_IPV4_CKSUM``.
* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_IPV4_CKSUM``.
-* **[uses] mbuf**: ``mbuf.ol_flags:PKT_TX_IP_CKSUM``,
- ``mbuf.ol_flags:PKT_TX_IPV4`` | ``PKT_TX_IPV6``.
+* **[uses] mbuf**: ``mbuf.ol_flags:RTE_MBUF_F_TX_IP_CKSUM``,
+ ``mbuf.ol_flags:RTE_MBUF_F_TX_IPV4`` | ``RTE_MBUF_F_TX_IPV6``.
* **[uses] mbuf**: ``mbuf.l2_len``, ``mbuf.l3_len``.
-* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_IP_CKSUM_UNKNOWN`` |
- ``PKT_RX_IP_CKSUM_BAD`` | ``PKT_RX_IP_CKSUM_GOOD`` |
- ``PKT_RX_IP_CKSUM_NONE``.
+* **[provides] mbuf**: ``mbuf.ol_flags:RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN`` |
+ ``RTE_MBUF_F_RX_IP_CKSUM_BAD`` | ``RTE_MBUF_F_RX_IP_CKSUM_GOOD`` |
+ ``RTE_MBUF_F_RX_IP_CKSUM_NONE``.
* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_IPV4_CKSUM``,
``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_IPV4_CKSUM``.
* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_UDP_CKSUM,DEV_RX_OFFLOAD_TCP_CKSUM,DEV_RX_OFFLOAD_SCTP_CKSUM``.
* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_UDP_CKSUM,DEV_TX_OFFLOAD_TCP_CKSUM,DEV_TX_OFFLOAD_SCTP_CKSUM``.
-* **[uses] mbuf**: ``mbuf.ol_flags:PKT_TX_IPV4`` | ``PKT_TX_IPV6``,
- ``mbuf.ol_flags:PKT_TX_L4_NO_CKSUM`` | ``PKT_TX_TCP_CKSUM`` |
- ``PKT_TX_SCTP_CKSUM`` | ``PKT_TX_UDP_CKSUM``.
+* **[uses] mbuf**: ``mbuf.ol_flags:RTE_MBUF_F_TX_IPV4`` | ``RTE_MBUF_F_TX_IPV6``,
+ ``mbuf.ol_flags:RTE_MBUF_F_TX_L4_NO_CKSUM`` | ``RTE_MBUF_F_TX_TCP_CKSUM`` |
+ ``RTE_MBUF_F_TX_SCTP_CKSUM`` | ``RTE_MBUF_F_TX_UDP_CKSUM``.
* **[uses] mbuf**: ``mbuf.l2_len``, ``mbuf.l3_len``.
-* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_L4_CKSUM_UNKNOWN`` |
- ``PKT_RX_L4_CKSUM_BAD`` | ``PKT_RX_L4_CKSUM_GOOD`` |
- ``PKT_RX_L4_CKSUM_NONE``.
+* **[provides] mbuf**: ``mbuf.ol_flags:RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN`` |
+ ``RTE_MBUF_F_RX_L4_CKSUM_BAD`` | ``RTE_MBUF_F_RX_L4_CKSUM_GOOD`` |
+ ``RTE_MBUF_F_RX_L4_CKSUM_NONE``.
* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_UDP_CKSUM,DEV_RX_OFFLOAD_TCP_CKSUM,DEV_RX_OFFLOAD_SCTP_CKSUM``,
``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_UDP_CKSUM,DEV_TX_OFFLOAD_TCP_CKSUM,DEV_TX_OFFLOAD_SCTP_CKSUM``.
Supports Timestamp.
* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_TIMESTAMP``.
-* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_TIMESTAMP``.
+* **[provides] mbuf**: ``mbuf.ol_flags:RTE_MBUF_F_RX_TIMESTAMP``.
* **[provides] mbuf**: ``mbuf.timestamp``.
* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa: DEV_RX_OFFLOAD_TIMESTAMP``.
* **[related] eth_dev_ops**: ``read_clock``.
* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_MACSEC_STRIP``.
* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_MACSEC_INSERT``.
-* **[uses] mbuf**: ``mbuf.ol_flags:PKT_TX_MACSEC``.
+* **[uses] mbuf**: ``mbuf.ol_flags:RTE_MBUF_F_TX_MACSEC``.
* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_MACSEC_STRIP``,
``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_MACSEC_INSERT``.
* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM``.
* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM``.
-* **[uses] mbuf**: ``mbuf.ol_flags:PKT_TX_IP_CKSUM``,
- ``mbuf.ol_flags:PKT_TX_IPV4`` | ``PKT_TX_IPV6``,
- ``mbuf.ol_flags:PKT_TX_OUTER_IP_CKSUM``,
- ``mbuf.ol_flags:PKT_TX_OUTER_IPV4`` | ``PKT_TX_OUTER_IPV6``.
+* **[uses] mbuf**: ``mbuf.ol_flags:RTE_MBUF_F_TX_IP_CKSUM``,
+ ``mbuf.ol_flags:RTE_MBUF_F_TX_IPV4`` | ``RTE_MBUF_F_TX_IPV6``,
+ ``mbuf.ol_flags:RTE_MBUF_F_TX_OUTER_IP_CKSUM``,
+ ``mbuf.ol_flags:RTE_MBUF_F_TX_OUTER_IPV4`` | ``RTE_MBUF_F_TX_OUTER_IPV6``.
* **[uses] mbuf**: ``mbuf.outer_l2_len``, ``mbuf.outer_l3_len``.
-* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_OUTER_IP_CKSUM_BAD``.
+* **[provides] mbuf**: ``mbuf.ol_flags:RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD``.
* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM``,
``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM``.
Supports inner packet L4 checksum.
* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_OUTER_UDP_CKSUM``.
-* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_OUTER_L4_CKSUM_UNKNOWN`` |
- ``PKT_RX_OUTER_L4_CKSUM_BAD`` | ``PKT_RX_OUTER_L4_CKSUM_GOOD`` | ``PKT_RX_OUTER_L4_CKSUM_INVALID``.
+* **[provides] mbuf**: ``mbuf.ol_flags:RTE_MBUF_F_RX_OUTER_L4_CKSUM_UNKNOWN`` |
+ ``RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD`` | ``RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD`` | ``RTE_MBUF_F_RX_OUTER_L4_CKSUM_INVALID``.
* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_OUTER_UDP_CKSUM``.
-* **[uses] mbuf**: ``mbuf.ol_flags:PKT_TX_OUTER_IPV4`` | ``PKT_TX_OUTER_IPV6``.
- ``mbuf.ol_flags:PKT_TX_OUTER_UDP_CKSUM``.
+* **[uses] mbuf**: ``mbuf.ol_flags:RTE_MBUF_F_TX_OUTER_IPV4`` | ``RTE_MBUF_F_TX_OUTER_IPV6``.
+ ``mbuf.ol_flags:RTE_MBUF_F_TX_OUTER_UDP_CKSUM``.
* **[uses] mbuf**: ``mbuf.outer_l2_len``, ``mbuf.outer_l3_len``.
* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_OUTER_UDP_CKSUM``,
``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_OUTER_UDP_CKSUM``.
Errata: 44 Integrity Error Reported for IPv4/UDP Packets With Zero Checksum
To support UDP zero checksum, the zero and bad UDP checksum packet is marked as
-PKT_RX_L4_CKSUM_UNKNOWN, so the application needs to recompute the checksum to
+RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN, so the application needs to recompute the checksum to
validate it.
Inline crypto processing support
no MPRQ feature or vectorized code can be engaged.
- When Multi-Packet Rx queue is configured (``mprq_en``), a Rx packet can be
- externally attached to a user-provided mbuf with having EXT_ATTACHED_MBUF in
+ externally attached to a user-provided mbuf with having RTE_MBUF_F_EXTERNAL in
ol_flags. As the mempool for the external buffer is managed by PMD, all the
Rx mbufs must be freed before the device is closed. Otherwise, the mempool of
the external buffers will be freed by PMD and the application which still
- If Multi-Packet Rx queue is configured (``mprq_en``) and Rx CQE compression is
enabled (``rxq_cqe_comp_en``) at the same time, RSS hash result is not fully
- supported. Some Rx packets may not have PKT_RX_RSS_HASH.
+ supported. Some Rx packets may not have RTE_MBUF_F_RX_RSS_HASH.
- IPv6 Multicast messages are not supported on VM, while promiscuous mode
and allmulticast mode are both set to off.
the mbuf by external buffer attachment - ``rte_pktmbuf_attach_extbuf()``.
A mempool for external buffers will be allocated and managed by PMD. If Rx
packet is externally attached, ol_flags field of the mbuf will have
- EXT_ATTACHED_MBUF and this flag must be preserved. ``RTE_MBUF_HAS_EXTBUF()``
+ RTE_MBUF_F_EXTERNAL and this flag must be preserved. ``RTE_MBUF_HAS_EXTBUF()``
checks the flag. The default value is 128, valid only if ``mprq_en`` is set.
- ``rxqs_min_mprq`` parameter [int]
responsibility to ensure that these flags are set.
- For example, in order to segment TCP/IPv4 packets, the application should
- add the ``PKT_TX_IPV4`` and ``PKT_TX_TCP_SEG`` flags to the mbuf's
+ add the ``RTE_MBUF_F_TX_IPV4`` and ``RTE_MBUF_F_TX_TCP_SEG`` flags to the mbuf's
ol_flags.
- If checksum calculation in hardware is required, the application should
- also add the ``PKT_TX_TCP_CKSUM`` and ``PKT_TX_IP_CKSUM`` flags.
+ also add the ``RTE_MBUF_F_TX_TCP_CKSUM`` and ``RTE_MBUF_F_TX_IP_CKSUM`` flags.
#. Check if the packet should be processed. Packets with one of the
following properties are not processed and are returned immediately:
On TX side, it is also possible for an application to delegate some
processing to the hardware if it supports it. For instance, the
-PKT_TX_IP_CKSUM flag allows to offload the computation of the IPv4
+RTE_MBUF_F_TX_IP_CKSUM flag allows to offload the computation of the IPv4
checksum.
The following examples explain how to configure different TX offloads on
mb->l2_len = len(out_eth)
mb->l3_len = len(out_ip)
- mb->ol_flags |= PKT_TX_IPV4 | PKT_TX_IP_CSUM
+ mb->ol_flags |= RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CSUM
set out_ip checksum to 0 in the packet
This is supported on hardware advertising DEV_TX_OFFLOAD_IPV4_CKSUM.
mb->l2_len = len(out_eth)
mb->l3_len = len(out_ip)
- mb->ol_flags |= PKT_TX_IPV4 | PKT_TX_IP_CSUM | PKT_TX_UDP_CKSUM
+ mb->ol_flags |= RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CSUM | RTE_MBUF_F_TX_UDP_CKSUM
set out_ip checksum to 0 in the packet
set out_udp checksum to pseudo header using rte_ipv4_phdr_cksum()
mb->l2_len = len(out_eth + out_ip + out_udp + vxlan + in_eth)
mb->l3_len = len(in_ip)
- mb->ol_flags |= PKT_TX_IPV4 | PKT_TX_IP_CSUM
+ mb->ol_flags |= RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CSUM
set in_ip checksum to 0 in the packet
This is similar to case 1), but l2_len is different. It is supported
mb->l2_len = len(out_eth + out_ip + out_udp + vxlan + in_eth)
mb->l3_len = len(in_ip)
- mb->ol_flags |= PKT_TX_IPV4 | PKT_TX_IP_CSUM | PKT_TX_TCP_CKSUM
+ mb->ol_flags |= RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CSUM | RTE_MBUF_F_TX_TCP_CKSUM
set in_ip checksum to 0 in the packet
set in_tcp checksum to pseudo header using rte_ipv4_phdr_cksum()
mb->l2_len = len(out_eth + out_ip + out_udp + vxlan + in_eth)
mb->l3_len = len(in_ip)
mb->l4_len = len(in_tcp)
- mb->ol_flags |= PKT_TX_IPV4 | PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM |
- PKT_TX_TCP_SEG;
+ mb->ol_flags |= RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_TCP_CKSUM |
+ RTE_MBUF_F_TX_TCP_SEG;
set in_ip checksum to 0 in the packet
set in_tcp checksum to pseudo header without including the IP
payload length using rte_ipv4_phdr_cksum()
mb->outer_l3_len = len(out_ip)
mb->l2_len = len(out_udp + vxlan + in_eth)
mb->l3_len = len(in_ip)
- mb->ol_flags |= PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IP_CKSUM | \
- PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM;
+ mb->ol_flags |= RTE_MBUF_F_TX_OUTER_IPV4 | RTE_MBUF_F_TX_OUTER_IP_CKSUM | \
+ RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_TCP_CKSUM;
set out_ip checksum to 0 in the packet
set in_ip checksum to 0 in the packet
set in_tcp checksum to pseudo header using rte_ipv4_phdr_cksum()
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The Latency stats library marks the time in the timestamp field of the
-mbuf for the ingress packets and sets the ``PKT_RX_TIMESTAMP`` flag of
+mbuf for the ingress packets and sets the ``RTE_MBUF_F_RX_TIMESTAMP`` flag of
``ol_flags`` for the mbuf to indicate the marked time as a valid one.
At the egress, the mbufs with the flag set are considered having valid
timestamp and are used for the latency calculation.
Matches 32 bit metadata item set.
On egress, metadata can be set either by mbuf metadata field with
-PKT_TX_DYNF_METADATA flag or ``SET_META`` action. On ingress, ``SET_META``
+RTE_MBUF_F_TX_DYNF_METADATA flag or ``SET_META`` action. On ingress, ``SET_META``
action sets metadata for a packet and the metadata will be reported via
-``metadata`` dynamic field of ``rte_mbuf`` with PKT_RX_DYNF_METADATA flag.
+``metadata`` dynamic field of ``rte_mbuf`` with RTE_MBUF_F_RX_DYNF_METADATA flag.
- Default ``mask`` matches the specified Rx metadata value.
Action: ``MARK``
^^^^^^^^^^^^^^^^
-Attaches an integer value to packets and sets ``PKT_RX_FDIR`` and
-``PKT_RX_FDIR_ID`` mbuf flags.
+Attaches an integer value to packets and sets ``RTE_MBUF_F_RX_FDIR`` and
+``RTE_MBUF_F_RX_FDIR_ID`` mbuf flags.
This value is arbitrary and application-defined. Maximum allowed value
depends on the underlying implementation. It is returned in the
^^^^^^^^^^^^^^^^
Flags packets. Similar to `Action: MARK`_ without a specific value; only
-sets the ``PKT_RX_FDIR`` mbuf flag.
+sets the ``RTE_MBUF_F_RX_FDIR`` mbuf flag.
- No configurable properties.
Set metadata. Item ``META`` matches metadata.
-Metadata set by mbuf metadata field with PKT_TX_DYNF_METADATA flag on egress
+Metadata set by mbuf metadata field with RTE_MBUF_F_TX_DYNF_METADATA flag on egress
will be overridden by this action. On ingress, the metadata will be carried by
``metadata`` dynamic field of ``rte_mbuf`` which can be accessed by
-``RTE_FLOW_DYNF_METADATA()``. PKT_RX_DYNF_METADATA flag will be set along
+``RTE_FLOW_DYNF_METADATA()``. RTE_MBUF_F_RX_DYNF_METADATA flag will be set along
with the data.
The mbuf dynamic field must be registered by calling
will be limited to maximum 256 queues.
Also compile time flag ``RTE_ETHDEV_QUEUE_STAT_CNTRS`` will be removed.
-* ethdev: The offload flag ``PKT_RX_EIP_CKSUM_BAD`` will be removed and
- replaced by the new flag ``PKT_RX_OUTER_IP_CKSUM_BAD``. The new name is more
+* ethdev: The offload flag ``RTE_MBUF_F_RX_EIP_CKSUM_BAD`` will be removed and
+ replaced by the new flag ``RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD``. The new name is more
consistent with existing outer header checksum status flag naming, which
should help in reducing confusion about its usage.
* mbuf: The mbuf offload flags ``PKT_*`` will be renamed as ``RTE_MBUF_F_*``.
A compatibility layer will be kept until DPDK 22.11, except for the flags
- that are already deprecated (ex: ``PKT_RX_L4_CKSUM_BAD``), which will
+ that are already deprecated (ex: ``RTE_MBUF_F_RX_L4_CKSUM_BAD``), which will
be removed.
``rte_log_get_global_level()``, ``rte_log_set_level()`` and
``rte_log_get_level()``.
-* **Removed mbuf flags PKT_RX_VLAN_PKT and PKT_RX_QINQ_PKT.**
+* **Removed mbuf flags RTE_MBUF_F_RX_VLAN_PKT and RTE_MBUF_F_RX_QINQ_PKT.**
- The ``mbuf`` flags ``PKT_RX_VLAN_PKT`` and ``PKT_RX_QINQ_PKT`` have
+ The ``mbuf`` flags ``RTE_MBUF_F_RX_VLAN_PKT`` and ``RTE_MBUF_F_RX_QINQ_PKT`` have
been removed since their behavior was not properly described.
-* **Added mbuf flags PKT_RX_VLAN and PKT_RX_QINQ.**
+* **Added mbuf flags RTE_MBUF_F_RX_VLAN and RTE_MBUF_F_RX_QINQ.**
Two ``mbuf`` flags have been added to indicate that the VLAN
identifier has been saved in the ``mbuf`` structure. For instance:
- - If VLAN is not stripped and TCI is saved: ``PKT_RX_VLAN``
- - If VLAN is stripped and TCI is saved: ``PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED``
+ - If VLAN is not stripped and TCI is saved: ``RTE_MBUF_F_RX_VLAN``
+ - If VLAN is stripped and TCI is saved: ``RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED``
* **Modified the vlan_offload_set_t function prototype in the ethdev library.**
* Tx metadata can also be set by SET_META action of rte_flow.
* Rx metadata is delivered to the host via a dynamic field of ``rte_mbuf``
- with ``PKT_RX_DYNF_METADATA``.
+ with ``RTE_MBUF_F_RX_DYNF_METADATA``.
* **Added ethdev API to set supported packet types.**
* Added new Rx offload flag ``DEV_RX_OFFLOAD_RSS_HASH`` which can be used to
enable/disable PMDs write to ``rte_mbuf::hash::rss``.
* PMDs notify the validity of ``rte_mbuf::hash:rss`` to the application
- by enabling ``PKT_RX_RSS_HASH`` flag in ``rte_mbuf::ol_flags``.
+ by enabling ``RTE_MBUF_F_RX_RSS_HASH`` flag in ``rte_mbuf::ol_flags``.
* **Added Rx/Tx packet burst mode "get" API.**
supported.
* ethdev: the tx_metadata mbuf field is moved to dynamic one.
- ``PKT_TX_METADATA`` flag is replaced with ``PKT_TX_DYNF_METADATA``.
+ ``RTE_MBUF_F_TX_METADATA`` flag is replaced with ``RTE_MBUF_F_TX_DYNF_METADATA``.
``DEV_TX_OFFLOAD_MATCH_METADATA`` offload flag is removed, now metadata
support in PMD is engaged on dynamic field registration.
* **igb: Fixed IEEE1588 frame identification in I210.**
- Fixed issue where the flag ``PKT_RX_IEEE1588_PTP`` was not being set
+ Fixed issue where the flag ``RTE_MBUF_F_RX_IEEE1588_PTP`` was not being set
in the Intel I210 NIC, as the EtherType in RX descriptor is in bits 8:10 of
Packet Type and not in the default bits 0:2.
if (likely(nb_rx == 0))
continue;
- if (m->ol_flags & PKT_RX_IEEE1588_PTP)
+ if (m->ol_flags & RTE_MBUF_F_RX_IEEE1588_PTP)
parse_ptp_frames(portid, m);
rte_pktmbuf_free(m);
.. code-block:: c
- if (m->ol_flags & PKT_RX_IEEE1588_PTP)
+ if (m->ol_flags & RTE_MBUF_F_RX_IEEE1588_PTP)
parse_ptp_frames(portid, m);
return lkey;
/* Take slower bottom-half on miss. */
return mlx5_mr_addr2mr_bh(priv->pd, 0, &priv->mr_scache, mr_ctrl, addr,
- !!(ol_flags & EXT_ATTACHED_MBUF));
+ !!(ol_flags & RTE_MBUF_F_EXTERNAL));
}
static __rte_always_inline uint32_t
return lkey;
/* Take slower bottom-half on miss. */
return mlx5_mr_addr2mr_bh(priv->pd, 0, &priv->mr_scache, mr_ctrl, addr,
- !!(ol_flags & EXT_ATTACHED_MBUF));
+ !!(ol_flags & RTE_MBUF_F_EXTERNAL));
}
static __rte_always_inline uint32_t
errcode = idx & 0xff;
errlev = (idx & 0x700) >> 8;
- val = PKT_RX_IP_CKSUM_UNKNOWN;
- val |= PKT_RX_L4_CKSUM_UNKNOWN;
- val |= PKT_RX_OUTER_L4_CKSUM_UNKNOWN;
+ val = RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN;
+ val |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN;
+ val |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_UNKNOWN;
switch (errlev) {
case OCCTX_ERRLEV_RE:
if (errcode) {
- val |= PKT_RX_IP_CKSUM_BAD;
- val |= PKT_RX_L4_CKSUM_BAD;
+ val |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
+ val |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
} else {
- val |= PKT_RX_IP_CKSUM_GOOD;
- val |= PKT_RX_L4_CKSUM_GOOD;
+ val |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
+ val |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
}
break;
case OCCTX_ERRLEV_LC:
if (errcode == OCCTX_EC_IP4_CSUM) {
- val |= PKT_RX_IP_CKSUM_BAD;
- val |= PKT_RX_OUTER_IP_CKSUM_BAD;
+ val |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
+ val |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
} else {
- val |= PKT_RX_IP_CKSUM_GOOD;
+ val |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
}
break;
case OCCTX_ERRLEV_LD:
/* Check if parsed packet is neither IPv4 or IPV6 */
if (errcode == OCCTX_EC_IP4_NOT)
break;
- val |= PKT_RX_IP_CKSUM_GOOD;
+ val |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
if (errcode == OCCTX_EC_L4_CSUM)
- val |= PKT_RX_OUTER_L4_CKSUM_BAD;
+ val |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD;
else
- val |= PKT_RX_L4_CKSUM_GOOD;
+ val |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
break;
case OCCTX_ERRLEV_LE:
if (errcode == OCCTX_EC_IP4_CSUM)
- val |= PKT_RX_IP_CKSUM_BAD;
+ val |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
else
- val |= PKT_RX_IP_CKSUM_GOOD;
+ val |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
break;
case OCCTX_ERRLEV_LF:
/* Check if parsed packet is neither IPv4 or IPV6 */
if (errcode == OCCTX_EC_IP4_NOT)
break;
- val |= PKT_RX_IP_CKSUM_GOOD;
+ val |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
if (errcode == OCCTX_EC_L4_CSUM)
- val |= PKT_RX_L4_CKSUM_BAD;
+ val |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
else
- val |= PKT_RX_L4_CKSUM_GOOD;
+ val |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
break;
}
if (!!(flag & OCCTX_RX_VLAN_FLTR_F)) {
if (likely(wqe->s.w2.vv)) {
- mbuf->ol_flags |= PKT_RX_VLAN;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN;
mbuf->vlan_tci =
ntohs(*((uint16_t *)((char *)mbuf->buf_addr +
mbuf->data_off + wqe->s.w4.vlptr + 2)));
uint16_t ref_cnt = m->refcnt;
if ((flags & NIX_TX_OFFLOAD_SECURITY_F) &&
- (m->ol_flags & PKT_TX_SEC_OFFLOAD)) {
+ (m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) {
txq = otx2_ssogws_xtract_meta(m, txq_data);
return otx2_sec_event_tx(base, ev, m, txq, flags);
}
/* check for vlan info */
if (ppd->tp_status & TP_STATUS_VLAN_VALID) {
mbuf->vlan_tci = ppd->tp_vlan_tci;
- mbuf->ol_flags |= (PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED);
+ mbuf->ol_flags |= (RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED);
}
/* release incoming frame and advance ring buffer */
}
/* insert vlan info if necessary */
- if (mbuf->ol_flags & PKT_TX_VLAN_PKT) {
+ if (mbuf->ol_flags & RTE_MBUF_F_TX_VLAN_PKT) {
if (rte_vlan_insert(&mbuf)) {
rte_pktmbuf_free(mbuf);
continue;
#include "hw_atl/hw_atl_b0.h"
#include "hw_atl/hw_atl_b0_internal.h"
-#define ATL_TX_CKSUM_OFFLOAD_MASK ( \
- PKT_TX_IP_CKSUM | \
- PKT_TX_L4_MASK | \
- PKT_TX_TCP_SEG)
-
-#define ATL_TX_OFFLOAD_MASK ( \
- PKT_TX_VLAN | \
- PKT_TX_IPV6 | \
- PKT_TX_IPV4 | \
- PKT_TX_IP_CKSUM | \
- PKT_TX_L4_MASK | \
- PKT_TX_TCP_SEG)
+#define ATL_TX_CKSUM_OFFLOAD_MASK (RTE_MBUF_F_TX_IP_CKSUM | \
+ RTE_MBUF_F_TX_L4_MASK | \
+ RTE_MBUF_F_TX_TCP_SEG)
+
+#define ATL_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_VLAN | \
+ RTE_MBUF_F_TX_IPV6 | \
+ RTE_MBUF_F_TX_IPV4 | \
+ RTE_MBUF_F_TX_IP_CKSUM | \
+ RTE_MBUF_F_TX_L4_MASK | \
+ RTE_MBUF_F_TX_TCP_SEG)
#define ATL_TX_OFFLOAD_NOTSUP_MASK \
- (PKT_TX_OFFLOAD_MASK ^ ATL_TX_OFFLOAD_MASK)
+ (RTE_MBUF_F_TX_OFFLOAD_MASK ^ ATL_TX_OFFLOAD_MASK)
/**
* Structure associated with each descriptor of the RX ring of a RX queue.
if (rxq->l3_csum_enabled && ((rxd_wb->pkt_type & 0x3) == 0)) {
/* IPv4 csum error ? */
if (rxd_wb->rx_stat & BIT(1))
- mbuf_flags |= PKT_RX_IP_CKSUM_BAD;
+ mbuf_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
else
- mbuf_flags |= PKT_RX_IP_CKSUM_GOOD;
+ mbuf_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
} else {
- mbuf_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
+ mbuf_flags |= RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN;
}
/* CSUM calculated ? */
if (rxq->l4_csum_enabled && (rxd_wb->rx_stat & BIT(3))) {
if (rxd_wb->rx_stat & BIT(2))
- mbuf_flags |= PKT_RX_L4_CKSUM_BAD;
+ mbuf_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
else
- mbuf_flags |= PKT_RX_L4_CKSUM_GOOD;
+ mbuf_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
} else {
- mbuf_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
+ mbuf_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN;
}
return mbuf_flags;
rx_mbuf->packet_type = atl_desc_to_pkt_type(&rxd_wb);
if (rx_mbuf->packet_type & RTE_PTYPE_L2_ETHER_VLAN) {
- rx_mbuf->ol_flags |= PKT_RX_VLAN;
+ rx_mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN;
rx_mbuf->vlan_tci = rxd_wb.vlan;
if (cfg->vlan_strip)
rx_mbuf->ol_flags |=
- PKT_RX_VLAN_STRIPPED;
+ RTE_MBUF_F_RX_VLAN_STRIPPED;
}
if (!rx_mbuf_first)
uint32_t tx_cmd = 0;
uint64_t ol_flags = tx_pkt->ol_flags;
- if (ol_flags & PKT_TX_TCP_SEG) {
+ if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
tx_cmd |= tx_desc_cmd_lso | tx_desc_cmd_l4cs;
txc->cmd = 0x4;
- if (ol_flags & PKT_TX_IPV6)
+ if (ol_flags & RTE_MBUF_F_TX_IPV6)
txc->cmd |= 0x2;
txc->l2_len = tx_pkt->l2_len;
txc->mss_len = tx_pkt->tso_segsz;
}
- if (ol_flags & PKT_TX_VLAN) {
+ if (ol_flags & RTE_MBUF_F_TX_VLAN) {
tx_cmd |= tx_desc_cmd_vlan;
txc->vlan_tag = tx_pkt->vlan_tci;
}
uint32_t tx_cmd)
{
txd->cmd |= tx_desc_cmd_fcs;
- txd->cmd |= (mbuf->ol_flags & PKT_TX_IP_CKSUM) ? tx_desc_cmd_ipv4 : 0;
+ txd->cmd |= (mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) ? tx_desc_cmd_ipv4 : 0;
/* L4 csum requested */
- txd->cmd |= (mbuf->ol_flags & PKT_TX_L4_MASK) ? tx_desc_cmd_l4cs : 0;
+ txd->cmd |= (mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK) ? tx_desc_cmd_l4cs : 0;
txd->cmd |= tx_cmd;
}
src_offset = 0;
if (pkt_buf->ol_flags & RTE_AVP_RX_VLAN_PKT) {
- ol_flags = PKT_RX_VLAN;
+ ol_flags = RTE_MBUF_F_RX_VLAN;
vlan_tci = pkt_buf->vlan_tci;
} else {
ol_flags = 0;
m->port = avp->port_id;
if (pkt_buf->ol_flags & RTE_AVP_RX_VLAN_PKT) {
- m->ol_flags = PKT_RX_VLAN;
+ m->ol_flags = RTE_MBUF_F_RX_VLAN;
m->vlan_tci = pkt_buf->vlan_tci;
}
first_buf->nb_segs = count;
first_buf->pkt_len = total_length;
- if (mbuf->ol_flags & PKT_TX_VLAN_PKT) {
+ if (mbuf->ol_flags & RTE_MBUF_F_TX_VLAN_PKT) {
first_buf->ol_flags |= RTE_AVP_TX_VLAN_PKT;
first_buf->vlan_tci = mbuf->vlan_tci;
}
pkt_buf->nb_segs = 1;
pkt_buf->next = NULL;
- if (m->ol_flags & PKT_TX_VLAN_PKT) {
+ if (m->ol_flags & RTE_MBUF_F_TX_VLAN_PKT) {
pkt_buf->ol_flags |= RTE_AVP_TX_VLAN_PKT;
pkt_buf->vlan_tci = m->vlan_tci;
}
}
if (rxq->pdata->rx_csum_enable) {
mbuf->ol_flags = 0;
- mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
- mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
if (unlikely(error_status == AXGBE_L3_CSUM_ERR)) {
- mbuf->ol_flags &= ~PKT_RX_IP_CKSUM_GOOD;
- mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
- mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD;
- mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
+ mbuf->ol_flags &= ~RTE_MBUF_F_RX_IP_CKSUM_GOOD;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
+ mbuf->ol_flags &= ~RTE_MBUF_F_RX_L4_CKSUM_GOOD;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN;
} else if (
unlikely(error_status == AXGBE_L4_CSUM_ERR)) {
- mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD;
- mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ mbuf->ol_flags &= ~RTE_MBUF_F_RX_L4_CKSUM_GOOD;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
}
}
rte_prefetch1(rte_pktmbuf_mtod(mbuf, void *));
offloads = rxq->pdata->eth_dev->data->dev_conf.rxmode.offloads;
if (!err || !etlt) {
if (etlt == RX_CVLAN_TAG_PRESENT) {
- mbuf->ol_flags |= PKT_RX_VLAN;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN;
mbuf->vlan_tci =
AXGMAC_GET_BITS_LE(desc->write.desc0,
RX_NORMAL_DESC0, OVT);
if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
- mbuf->ol_flags |= PKT_RX_VLAN_STRIPPED;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN_STRIPPED;
else
- mbuf->ol_flags &= ~PKT_RX_VLAN_STRIPPED;
+ mbuf->ol_flags &= ~RTE_MBUF_F_RX_VLAN_STRIPPED;
} else {
mbuf->ol_flags &=
- ~(PKT_RX_VLAN
- | PKT_RX_VLAN_STRIPPED);
+ ~(RTE_MBUF_F_RX_VLAN
+ | RTE_MBUF_F_RX_VLAN_STRIPPED);
mbuf->vlan_tci = 0;
}
}
/* Indicate if a Context Descriptor is next */
if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, CDA))
- mbuf->ol_flags |= PKT_RX_IEEE1588_PTP
- | PKT_RX_IEEE1588_TMST;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP
+ | RTE_MBUF_F_RX_IEEE1588_TMST;
pkt_len = AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3,
PL) - rxq->crc_len;
/* Mbuf populate */
offloads = rxq->pdata->eth_dev->data->dev_conf.rxmode.offloads;
if (!err || !etlt) {
if (etlt == RX_CVLAN_TAG_PRESENT) {
- mbuf->ol_flags |= PKT_RX_VLAN;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN;
mbuf->vlan_tci =
AXGMAC_GET_BITS_LE(desc->write.desc0,
RX_NORMAL_DESC0, OVT);
if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
- mbuf->ol_flags |= PKT_RX_VLAN_STRIPPED;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN_STRIPPED;
else
- mbuf->ol_flags &= ~PKT_RX_VLAN_STRIPPED;
+ mbuf->ol_flags &= ~RTE_MBUF_F_RX_VLAN_STRIPPED;
} else {
mbuf->ol_flags &=
- ~(PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED);
+ ~(RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED);
mbuf->vlan_tci = 0;
}
}
first_seg->port = rxq->port_id;
if (rxq->pdata->rx_csum_enable) {
mbuf->ol_flags = 0;
- mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
- mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
if (unlikely(error_status == AXGBE_L3_CSUM_ERR)) {
- mbuf->ol_flags &= ~PKT_RX_IP_CKSUM_GOOD;
- mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
- mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD;
- mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
+ mbuf->ol_flags &= ~RTE_MBUF_F_RX_IP_CKSUM_GOOD;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
+ mbuf->ol_flags &= ~RTE_MBUF_F_RX_L4_CKSUM_GOOD;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN;
} else if (unlikely(error_status
== AXGBE_L4_CSUM_ERR)) {
- mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD;
- mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ mbuf->ol_flags &= ~RTE_MBUF_F_RX_L4_CKSUM_GOOD;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
}
}
AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FL,
mbuf->pkt_len);
/* Timestamp enablement check */
- if (mbuf->ol_flags & PKT_TX_IEEE1588_TMST)
+ if (mbuf->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST)
AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, TTSE, 1);
rte_wmb();
/* Mark it as First and Last Descriptor */
/* Mark it as a NORMAL descriptor */
AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CTXT, 0);
/* configure h/w Offload */
- mask = mbuf->ol_flags & PKT_TX_L4_MASK;
- if ((mask == PKT_TX_TCP_CKSUM) || (mask == PKT_TX_UDP_CKSUM))
+ mask = mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK;
+ if ((mask == RTE_MBUF_F_TX_TCP_CKSUM) || (mask == RTE_MBUF_F_TX_UDP_CKSUM))
AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x3);
- else if (mbuf->ol_flags & PKT_TX_IP_CKSUM)
+ else if (mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x1);
rte_wmb();
- if (mbuf->ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) {
+ if (mbuf->ol_flags & (RTE_MBUF_F_TX_VLAN_PKT | RTE_MBUF_F_TX_QINQ_PKT)) {
/* Mark it as a CONTEXT descriptor */
AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3,
CTXT, 1);
{
uint64_t tmst_en = 0;
/* Timestamp enablement check */
- if (mbuf->ol_flags & PKT_TX_IEEE1588_TMST)
+ if (mbuf->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST)
tmst_en = TX_DESC_CTRL_FLAG_TMST;
__m128i descriptor = _mm_set_epi64x((uint64_t)mbuf->pkt_len << 32 |
TX_DESC_CTRL_FLAGS | mbuf->data_len
tx_start_bd->nbd = rte_cpu_to_le_16(2);
- if (m0->ol_flags & PKT_TX_VLAN_PKT) {
+ if (m0->ol_flags & RTE_MBUF_F_TX_VLAN_PKT) {
tx_start_bd->vlan_or_ethertype =
rte_cpu_to_le_16(m0->vlan_tci);
tx_start_bd->bd_flags.as_bitfield |=
*/
if (cqe_fp->pars_flags.flags & PARSING_FLAGS_VLAN) {
rx_mb->vlan_tci = cqe_fp->vlan_tag;
- rx_mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+ rx_mb->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
}
rx_pkts[nb_rx] = rx_mb;
mbuf->pkt_len = rte_le_to_cpu_32(tpa_start->len);
mbuf->data_len = mbuf->pkt_len;
mbuf->port = rxq->port_id;
- mbuf->ol_flags = PKT_RX_LRO;
+ mbuf->ol_flags = RTE_MBUF_F_RX_LRO;
bnxt_tpa_get_metadata(rxq->bp, tpa_info, tpa_start, tpa_start1);
if (likely(tpa_info->hash_valid)) {
mbuf->hash.rss = tpa_info->rss_hash;
- mbuf->ol_flags |= PKT_RX_RSS_HASH;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
} else if (tpa_info->cfa_code_valid) {
mbuf->hash.fdir.id = tpa_info->cfa_code;
- mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
}
if (tpa_info->vlan_valid) {
mbuf->vlan_tci = tpa_info->vlan;
- mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
}
if (likely(tpa_info->l4_csum_valid))
- mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
/* recycle next mbuf */
data_cons = RING_NEXT(data_cons);
pt[i] = 0;
if (i & RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN)
- pt[i] |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+ pt[i] |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
if (i & (RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC << 3)) {
/* Tunnel case. */
if (outer_cksum_enabled) {
if (i & RX_PKT_CMPL_FLAGS2_IP_CS_CALC)
- pt[i] |= PKT_RX_IP_CKSUM_GOOD;
+ pt[i] |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
if (i & RX_PKT_CMPL_FLAGS2_L4_CS_CALC)
- pt[i] |= PKT_RX_L4_CKSUM_GOOD;
+ pt[i] |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
if (i & RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC)
- pt[i] |= PKT_RX_OUTER_L4_CKSUM_GOOD;
+ pt[i] |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD;
} else {
if (i & RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC)
- pt[i] |= PKT_RX_IP_CKSUM_GOOD;
+ pt[i] |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
if (i & RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC)
- pt[i] |= PKT_RX_L4_CKSUM_GOOD;
+ pt[i] |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
}
} else {
/* Non-tunnel case. */
if (i & RX_PKT_CMPL_FLAGS2_IP_CS_CALC)
- pt[i] |= PKT_RX_IP_CKSUM_GOOD;
+ pt[i] |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
if (i & RX_PKT_CMPL_FLAGS2_L4_CS_CALC)
- pt[i] |= PKT_RX_L4_CKSUM_GOOD;
+ pt[i] |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
}
}
/* Tunnel case. */
if (outer_cksum_enabled) {
if (i & (RX_PKT_CMPL_ERRORS_IP_CS_ERROR >> 4))
- pt[i] |= PKT_RX_IP_CKSUM_BAD;
+ pt[i] |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
if (i & (RX_PKT_CMPL_ERRORS_T_IP_CS_ERROR >> 4))
- pt[i] |= PKT_RX_OUTER_IP_CKSUM_BAD;
+ pt[i] |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
if (i & (RX_PKT_CMPL_ERRORS_L4_CS_ERROR >> 4))
- pt[i] |= PKT_RX_L4_CKSUM_BAD;
+ pt[i] |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
if (i & (RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR >> 4))
- pt[i] |= PKT_RX_OUTER_L4_CKSUM_BAD;
+ pt[i] |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD;
} else {
if (i & (RX_PKT_CMPL_ERRORS_T_IP_CS_ERROR >> 4))
- pt[i] |= PKT_RX_IP_CKSUM_BAD;
+ pt[i] |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
if (i & (RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR >> 4))
- pt[i] |= PKT_RX_L4_CKSUM_BAD;
+ pt[i] |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
}
} else {
/* Non-tunnel case. */
if (i & (RX_PKT_CMPL_ERRORS_IP_CS_ERROR >> 4))
- pt[i] |= PKT_RX_IP_CKSUM_BAD;
+ pt[i] |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
if (i & (RX_PKT_CMPL_ERRORS_L4_CS_ERROR >> 4))
- pt[i] |= PKT_RX_L4_CKSUM_BAD;
+ pt[i] |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
}
}
}
if (flags_type & RX_PKT_CMPL_FLAGS_RSS_VALID) {
mbuf->hash.rss = rte_le_to_cpu_32(rxcmp->rss_hash);
- ol_flags |= PKT_RX_RSS_HASH;
+ ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
}
#ifdef RTE_LIBRTE_IEEE1588
if (unlikely((flags_type & RX_PKT_CMPL_FLAGS_MASK) ==
RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP))
- ol_flags |= PKT_RX_IEEE1588_PTP | PKT_RX_IEEE1588_TMST;
+ ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP | RTE_MBUF_F_RX_IEEE1588_TMST;
#endif
mbuf->ol_flags = ol_flags;
mbuf->hash.fdir.hi = mark_id;
*bnxt_cfa_code_dynfield(mbuf) = cfa_code & 0xffffffffull;
mbuf->hash.fdir.id = rxcmp1->cfa_code;
- mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
return mark_id;
}
}
mbuf->hash.fdir.hi = bp->mark_table[cfa_code].mark_id;
- mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
}
static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
{
if (RX_CMP_VLAN_VALID(rxcmp)) {
mbuf->vlan_tci = RX_CMP_METADATA0_VID(rxcmp1);
- mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
}
}
t_pkt = 1;
if (unlikely(RX_CMP_V2_L4_CS_ERR(error_v2)))
- mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
else if (flags2 & RX_CMP_FLAGS2_L4_CSUM_ALL_OK_MASK)
- mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
else
- mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN;
if (unlikely(RX_CMP_V2_L3_CS_ERR(error_v2)))
- mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
else if (flags2 & RX_CMP_FLAGS2_IP_CSUM_ALL_OK_MASK)
- mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
else
- mbuf->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN;
} else {
hdr_cnt = RX_CMP_V2_L4_CS_OK(flags2);
if (hdr_cnt > 1)
t_pkt = 1;
if (RX_CMP_V2_L4_CS_OK(flags2))
- mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
else if (RX_CMP_V2_L4_CS_ERR(error_v2))
- mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
else
- mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN;
if (RX_CMP_V2_L3_CS_OK(flags2))
- mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
else if (RX_CMP_V2_L3_CS_ERR(error_v2))
- mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
else
- mbuf->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN;
}
if (t_pkt) {
if (unlikely(RX_CMP_V2_OT_L4_CS_ERR(error_v2) ||
RX_CMP_V2_T_L4_CS_ERR(error_v2)))
- mbuf->ol_flags |= PKT_RX_OUTER_L4_CKSUM_BAD;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD;
else
- mbuf->ol_flags |= PKT_RX_OUTER_L4_CKSUM_GOOD;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD;
if (unlikely(RX_CMP_V2_T_IP_CS_ERR(error_v2)))
- mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
}
}
static bool
bnxt_xmit_need_long_bd(struct rte_mbuf *tx_pkt, struct bnxt_tx_queue *txq)
{
- if (tx_pkt->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_TCP_CKSUM |
- PKT_TX_UDP_CKSUM | PKT_TX_IP_CKSUM |
- PKT_TX_VLAN_PKT | PKT_TX_OUTER_IP_CKSUM |
- PKT_TX_TUNNEL_GRE | PKT_TX_TUNNEL_VXLAN |
- PKT_TX_TUNNEL_GENEVE | PKT_TX_IEEE1588_TMST |
- PKT_TX_QINQ_PKT) ||
+ if (tx_pkt->ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_TCP_CKSUM |
+ RTE_MBUF_F_TX_UDP_CKSUM | RTE_MBUF_F_TX_IP_CKSUM |
+ RTE_MBUF_F_TX_VLAN_PKT | RTE_MBUF_F_TX_OUTER_IP_CKSUM |
+ RTE_MBUF_F_TX_TUNNEL_GRE | RTE_MBUF_F_TX_TUNNEL_VXLAN |
+ RTE_MBUF_F_TX_TUNNEL_GENEVE | RTE_MBUF_F_TX_IEEE1588_TMST |
+ RTE_MBUF_F_TX_QINQ_PKT) ||
(BNXT_TRUFLOW_EN(txq->bp) &&
(txq->bp->tx_cfa_action || txq->vfr_tx_cfa_action)))
return true;
vlan_tag_flags = 0;
/* HW can accelerate only outer vlan in QinQ mode */
- if (tx_pkt->ol_flags & PKT_TX_QINQ_PKT) {
+ if (tx_pkt->ol_flags & RTE_MBUF_F_TX_QINQ_PKT) {
vlan_tag_flags = TX_BD_LONG_CFA_META_KEY_VLAN_TAG |
tx_pkt->vlan_tci_outer;
outer_tpid_bd = txq->bp->outer_tpid_bd &
BNXT_OUTER_TPID_BD_MASK;
vlan_tag_flags |= outer_tpid_bd;
- } else if (tx_pkt->ol_flags & PKT_TX_VLAN_PKT) {
+ } else if (tx_pkt->ol_flags & RTE_MBUF_F_TX_VLAN_PKT) {
/* shurd: Should this mask at
* TX_BD_LONG_CFA_META_VLAN_VID_MASK?
*/
else
txbd1->cfa_action = txq->bp->tx_cfa_action;
- if (tx_pkt->ol_flags & PKT_TX_TCP_SEG) {
+ if (tx_pkt->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
uint16_t hdr_size;
/* TSO */
TX_BD_LONG_LFLAGS_T_IPID;
hdr_size = tx_pkt->l2_len + tx_pkt->l3_len +
tx_pkt->l4_len;
- hdr_size += (tx_pkt->ol_flags & PKT_TX_TUNNEL_MASK) ?
+ hdr_size += (tx_pkt->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ?
tx_pkt->outer_l2_len +
tx_pkt->outer_l3_len : 0;
/* The hdr_size is multiple of 16bit units not 8bit.
PKT_TX_TCP_UDP_CKSUM) {
/* TCP/UDP CSO */
txbd1->lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM;
- } else if ((tx_pkt->ol_flags & PKT_TX_TCP_CKSUM) ==
- PKT_TX_TCP_CKSUM) {
+ } else if ((tx_pkt->ol_flags & RTE_MBUF_F_TX_TCP_CKSUM) ==
+ RTE_MBUF_F_TX_TCP_CKSUM) {
/* TCP/UDP CSO */
txbd1->lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM;
- } else if ((tx_pkt->ol_flags & PKT_TX_UDP_CKSUM) ==
- PKT_TX_UDP_CKSUM) {
+ } else if ((tx_pkt->ol_flags & RTE_MBUF_F_TX_UDP_CKSUM) ==
+ RTE_MBUF_F_TX_UDP_CKSUM) {
/* TCP/UDP CSO */
txbd1->lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM;
- } else if ((tx_pkt->ol_flags & PKT_TX_IP_CKSUM) ==
- PKT_TX_IP_CKSUM) {
+ } else if ((tx_pkt->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) ==
+ RTE_MBUF_F_TX_IP_CKSUM) {
/* IP CSO */
txbd1->lflags |= TX_BD_LONG_LFLAGS_IP_CHKSUM;
- } else if ((tx_pkt->ol_flags & PKT_TX_OUTER_IP_CKSUM) ==
- PKT_TX_OUTER_IP_CKSUM) {
+ } else if ((tx_pkt->ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) ==
+ RTE_MBUF_F_TX_OUTER_IP_CKSUM) {
/* IP CSO */
txbd1->lflags |= TX_BD_LONG_LFLAGS_T_IP_CHKSUM;
- } else if ((tx_pkt->ol_flags & PKT_TX_IEEE1588_TMST) ==
- PKT_TX_IEEE1588_TMST) {
+ } else if ((tx_pkt->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST) ==
+ RTE_MBUF_F_TX_IEEE1588_TMST) {
/* PTP */
txbd1->lflags |= TX_BD_LONG_LFLAGS_STAMP;
}
int bnxt_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
int bnxt_flush_tx_cmp(struct bnxt_cp_ring_info *cpr);
-#define PKT_TX_OIP_IIP_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | \
- PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM)
-#define PKT_TX_OIP_IIP_UDP_CKSUM (PKT_TX_UDP_CKSUM | \
- PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM)
-#define PKT_TX_OIP_IIP_TCP_CKSUM (PKT_TX_TCP_CKSUM | \
- PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM)
-#define PKT_TX_IIP_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | \
- PKT_TX_IP_CKSUM)
-#define PKT_TX_IIP_TCP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_IP_CKSUM)
-#define PKT_TX_IIP_UDP_CKSUM (PKT_TX_UDP_CKSUM | PKT_TX_IP_CKSUM)
-#define PKT_TX_OIP_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | \
- PKT_TX_OUTER_IP_CKSUM)
-#define PKT_TX_OIP_UDP_CKSUM (PKT_TX_UDP_CKSUM | \
- PKT_TX_OUTER_IP_CKSUM)
-#define PKT_TX_OIP_TCP_CKSUM (PKT_TX_TCP_CKSUM | \
- PKT_TX_OUTER_IP_CKSUM)
-#define PKT_TX_OIP_IIP_CKSUM (PKT_TX_IP_CKSUM | \
- PKT_TX_OUTER_IP_CKSUM)
-#define PKT_TX_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)
+#define PKT_TX_OIP_IIP_TCP_UDP_CKSUM (RTE_MBUF_F_TX_TCP_CKSUM | RTE_MBUF_F_TX_UDP_CKSUM | \
+ RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_OUTER_IP_CKSUM)
+#define PKT_TX_OIP_IIP_UDP_CKSUM (RTE_MBUF_F_TX_UDP_CKSUM | \
+ RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_OUTER_IP_CKSUM)
+#define PKT_TX_OIP_IIP_TCP_CKSUM (RTE_MBUF_F_TX_TCP_CKSUM | \
+ RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_OUTER_IP_CKSUM)
+#define PKT_TX_IIP_TCP_UDP_CKSUM (RTE_MBUF_F_TX_TCP_CKSUM | RTE_MBUF_F_TX_UDP_CKSUM | \
+ RTE_MBUF_F_TX_IP_CKSUM)
+#define PKT_TX_IIP_TCP_CKSUM (RTE_MBUF_F_TX_TCP_CKSUM | RTE_MBUF_F_TX_IP_CKSUM)
+#define PKT_TX_IIP_UDP_CKSUM (RTE_MBUF_F_TX_UDP_CKSUM | RTE_MBUF_F_TX_IP_CKSUM)
+#define PKT_TX_OIP_TCP_UDP_CKSUM (RTE_MBUF_F_TX_TCP_CKSUM | RTE_MBUF_F_TX_UDP_CKSUM | \
+ RTE_MBUF_F_TX_OUTER_IP_CKSUM)
+#define PKT_TX_OIP_UDP_CKSUM (RTE_MBUF_F_TX_UDP_CKSUM | \
+ RTE_MBUF_F_TX_OUTER_IP_CKSUM)
+#define PKT_TX_OIP_TCP_CKSUM (RTE_MBUF_F_TX_TCP_CKSUM | \
+ RTE_MBUF_F_TX_OUTER_IP_CKSUM)
+#define PKT_TX_OIP_IIP_CKSUM (RTE_MBUF_F_TX_IP_CKSUM | \
+ RTE_MBUF_F_TX_OUTER_IP_CKSUM)
+#define PKT_TX_TCP_UDP_CKSUM (RTE_MBUF_F_TX_TCP_CKSUM | RTE_MBUF_F_TX_UDP_CKSUM)
#define TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM (TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM | \
const uint16_t ether_type_slow_be =
rte_be_to_cpu_16(RTE_ETHER_TYPE_SLOW);
- return !((mbuf->ol_flags & PKT_RX_VLAN) ? mbuf->vlan_tci : 0) &&
+ return !((mbuf->ol_flags & RTE_MBUF_F_RX_VLAN) ? mbuf->vlan_tci : 0) &&
(ethertype == ether_type_slow_be &&
(subtype == SLOW_SUBTYPE_MARKER || subtype == SLOW_SUBTYPE_LACP));
}
uint16_t flags = 0;
/* Fastpath is dependent on these enums */
- RTE_BUILD_BUG_ON(PKT_TX_TCP_CKSUM != (1ULL << 52));
- RTE_BUILD_BUG_ON(PKT_TX_SCTP_CKSUM != (2ULL << 52));
- RTE_BUILD_BUG_ON(PKT_TX_UDP_CKSUM != (3ULL << 52));
- RTE_BUILD_BUG_ON(PKT_TX_IP_CKSUM != (1ULL << 54));
- RTE_BUILD_BUG_ON(PKT_TX_IPV4 != (1ULL << 55));
- RTE_BUILD_BUG_ON(PKT_TX_OUTER_IP_CKSUM != (1ULL << 58));
- RTE_BUILD_BUG_ON(PKT_TX_OUTER_IPV4 != (1ULL << 59));
- RTE_BUILD_BUG_ON(PKT_TX_OUTER_IPV6 != (1ULL << 60));
- RTE_BUILD_BUG_ON(PKT_TX_OUTER_UDP_CKSUM != (1ULL << 41));
+ RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_TCP_CKSUM != (1ULL << 52));
+ RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_SCTP_CKSUM != (2ULL << 52));
+ RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_UDP_CKSUM != (3ULL << 52));
+ RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_IP_CKSUM != (1ULL << 54));
+ RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_IPV4 != (1ULL << 55));
+ RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_OUTER_IP_CKSUM != (1ULL << 58));
+ RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_OUTER_IPV4 != (1ULL << 59));
+ RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_OUTER_IPV6 != (1ULL << 60));
+ RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_OUTER_UDP_CKSUM != (1ULL << 41));
RTE_BUILD_BUG_ON(RTE_MBUF_L2_LEN_BITS != 7);
RTE_BUILD_BUG_ON(RTE_MBUF_L3_LEN_BITS != 9);
RTE_BUILD_BUG_ON(RTE_MBUF_OUTL2_LEN_BITS != 7);
* 0 to CNXK_FLOW_ACTION_FLAG_DEFAULT - 2
*/
if (likely(match_id)) {
- ol_flags |= PKT_RX_FDIR;
+ ol_flags |= RTE_MBUF_F_RX_FDIR;
if (match_id != CNXK_FLOW_ACTION_FLAG_DEFAULT) {
- ol_flags |= PKT_RX_FDIR_ID;
+ ol_flags |= RTE_MBUF_F_RX_FDIR_ID;
mbuf->hash.fdir.hi = match_id - 1;
}
}
if (flag & NIX_RX_OFFLOAD_RSS_F) {
mbuf->hash.rss = tag;
- ol_flags |= PKT_RX_RSS_HASH;
+ ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
}
if (flag & NIX_RX_OFFLOAD_CHECKSUM_F)
if (flag & NIX_RX_OFFLOAD_VLAN_STRIP_F) {
if (rx->vtag0_gone) {
- ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+ ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
mbuf->vlan_tci = rx->vtag0_tci;
}
if (rx->vtag1_gone) {
- ol_flags |= PKT_RX_QINQ | PKT_RX_QINQ_STRIPPED;
+ ol_flags |= RTE_MBUF_F_RX_QINQ | RTE_MBUF_F_RX_QINQ_STRIPPED;
mbuf->vlan_tci_outer = rx->vtag1_tci;
}
}
nix_vlan_update(const uint64_t w2, uint64_t ol_flags, uint8x16_t *f)
{
if (w2 & BIT_ULL(21) /* vtag0_gone */) {
- ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+ ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
*f = vsetq_lane_u16((uint16_t)(w2 >> 32), *f, 5);
}
nix_qinq_update(const uint64_t w2, uint64_t ol_flags, struct rte_mbuf *mbuf)
{
if (w2 & BIT_ULL(23) /* vtag1_gone */) {
- ol_flags |= PKT_RX_QINQ | PKT_RX_QINQ_STRIPPED;
+ ol_flags |= RTE_MBUF_F_RX_QINQ | RTE_MBUF_F_RX_QINQ_STRIPPED;
mbuf->vlan_tci_outer = (uint16_t)(w2 >> 48);
}
f1 = vsetq_lane_u32(cq1_w0, f1, 3);
f2 = vsetq_lane_u32(cq2_w0, f2, 3);
f3 = vsetq_lane_u32(cq3_w0, f3, 3);
- ol_flags0 = PKT_RX_RSS_HASH;
- ol_flags1 = PKT_RX_RSS_HASH;
- ol_flags2 = PKT_RX_RSS_HASH;
- ol_flags3 = PKT_RX_RSS_HASH;
+ ol_flags0 = RTE_MBUF_F_RX_RSS_HASH;
+ ol_flags1 = RTE_MBUF_F_RX_RSS_HASH;
+ ol_flags2 = RTE_MBUF_F_RX_RSS_HASH;
+ ol_flags3 = RTE_MBUF_F_RX_RSS_HASH;
} else {
ol_flags0 = 0;
ol_flags1 = 0;
RTE_PTYPE_L2_ETHER_TIMESYNC,
RTE_PTYPE_L2_ETHER_TIMESYNC,
RTE_PTYPE_L2_ETHER_TIMESYNC};
- const uint64_t ts_olf = PKT_RX_IEEE1588_PTP |
- PKT_RX_IEEE1588_TMST |
+ const uint64_t ts_olf = RTE_MBUF_F_RX_IEEE1588_PTP |
+ RTE_MBUF_F_RX_IEEE1588_TMST |
tstamp->rx_tstamp_dynflag;
const uint32x4_t and_mask = {0x1, 0x2, 0x4, 0x8};
uint64x2_t ts01, ts23, mask;
{
uint64_t mask, ol_flags = m->ol_flags;
- if (flags & NIX_TX_OFFLOAD_TSO_F && (ol_flags & PKT_TX_TCP_SEG)) {
+ if (flags & NIX_TX_OFFLOAD_TSO_F && (ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
uintptr_t mdata = rte_pktmbuf_mtod(m, uintptr_t);
uint16_t *iplen, *oiplen, *oudplen;
uint16_t lso_sb, paylen;
- mask = -!!(ol_flags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6));
+ mask = -!!(ol_flags & (RTE_MBUF_F_TX_OUTER_IPV4 | RTE_MBUF_F_TX_OUTER_IPV6));
lso_sb = (mask & (m->outer_l2_len + m->outer_l3_len)) +
m->l2_len + m->l3_len + m->l4_len;
/* Get iplen position assuming no tunnel hdr */
iplen = (uint16_t *)(mdata + m->l2_len +
- (2 << !!(ol_flags & PKT_TX_IPV6)));
+ (2 << !!(ol_flags & RTE_MBUF_F_TX_IPV6)));
/* Handle tunnel tso */
if ((flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F) &&
- (ol_flags & PKT_TX_TUNNEL_MASK)) {
+ (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)) {
const uint8_t is_udp_tun =
(CNXK_NIX_UDP_TUN_BITMASK >>
- ((ol_flags & PKT_TX_TUNNEL_MASK) >> 45)) &
+ ((ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) >> 45)) &
0x1;
oiplen = (uint16_t *)(mdata + m->outer_l2_len +
(2 << !!(ol_flags &
- PKT_TX_OUTER_IPV6)));
+ RTE_MBUF_F_TX_OUTER_IPV6)));
*oiplen = rte_cpu_to_be_16(rte_be_to_cpu_16(*oiplen) -
paylen);
/* Update iplen position to inner ip hdr */
iplen = (uint16_t *)(mdata + lso_sb - m->l3_len -
m->l4_len +
- (2 << !!(ol_flags & PKT_TX_IPV6)));
+ (2 << !!(ol_flags & RTE_MBUF_F_TX_IPV6)));
}
*iplen = rte_cpu_to_be_16(rte_be_to_cpu_16(*iplen) - paylen);
if ((flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F) &&
(flags & NIX_TX_OFFLOAD_L3_L4_CSUM_F)) {
- const uint8_t csum = !!(ol_flags & PKT_TX_OUTER_UDP_CKSUM);
+ const uint8_t csum = !!(ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM);
const uint8_t ol3type =
- ((!!(ol_flags & PKT_TX_OUTER_IPV4)) << 1) +
- ((!!(ol_flags & PKT_TX_OUTER_IPV6)) << 2) +
- !!(ol_flags & PKT_TX_OUTER_IP_CKSUM);
+ ((!!(ol_flags & RTE_MBUF_F_TX_OUTER_IPV4)) << 1) +
+ ((!!(ol_flags & RTE_MBUF_F_TX_OUTER_IPV6)) << 2) +
+ !!(ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM);
/* Outer L3 */
w1.ol3type = ol3type;
w1.ol4type = csum + (csum << 1);
/* Inner L3 */
- w1.il3type = ((!!(ol_flags & PKT_TX_IPV4)) << 1) +
- ((!!(ol_flags & PKT_TX_IPV6)) << 2);
+ w1.il3type = ((!!(ol_flags & RTE_MBUF_F_TX_IPV4)) << 1) +
+ ((!!(ol_flags & RTE_MBUF_F_TX_IPV6)) << 2);
w1.il3ptr = w1.ol4ptr + m->l2_len;
w1.il4ptr = w1.il3ptr + m->l3_len;
/* Increment it by 1 if it is IPV4 as 3 is with csum */
- w1.il3type = w1.il3type + !!(ol_flags & PKT_TX_IP_CKSUM);
+ w1.il3type = w1.il3type + !!(ol_flags & RTE_MBUF_F_TX_IP_CKSUM);
/* Inner L4 */
- w1.il4type = (ol_flags & PKT_TX_L4_MASK) >> 52;
+ w1.il4type = (ol_flags & RTE_MBUF_F_TX_L4_MASK) >> 52;
/* In case of no tunnel header use only
* shift IL3/IL4 fields a bit to use
((w1.u & 0X00000000FFFFFFFF) >> (mask << 4));
} else if (flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F) {
- const uint8_t csum = !!(ol_flags & PKT_TX_OUTER_UDP_CKSUM);
+ const uint8_t csum = !!(ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM);
const uint8_t outer_l2_len = m->outer_l2_len;
/* Outer L3 */
w1.ol3ptr = outer_l2_len;
w1.ol4ptr = outer_l2_len + m->outer_l3_len;
/* Increment it by 1 if it is IPV4 as 3 is with csum */
- w1.ol3type = ((!!(ol_flags & PKT_TX_OUTER_IPV4)) << 1) +
- ((!!(ol_flags & PKT_TX_OUTER_IPV6)) << 2) +
- !!(ol_flags & PKT_TX_OUTER_IP_CKSUM);
+ w1.ol3type = ((!!(ol_flags & RTE_MBUF_F_TX_OUTER_IPV4)) << 1) +
+ ((!!(ol_flags & RTE_MBUF_F_TX_OUTER_IPV6)) << 2) +
+ !!(ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM);
/* Outer L4 */
w1.ol4type = csum + (csum << 1);
w1.ol3ptr = l2_len;
w1.ol4ptr = l2_len + m->l3_len;
/* Increment it by 1 if it is IPV4 as 3 is with csum */
- w1.ol3type = ((!!(ol_flags & PKT_TX_IPV4)) << 1) +
- ((!!(ol_flags & PKT_TX_IPV6)) << 2) +
- !!(ol_flags & PKT_TX_IP_CKSUM);
+ w1.ol3type = ((!!(ol_flags & RTE_MBUF_F_TX_IPV4)) << 1) +
+ ((!!(ol_flags & RTE_MBUF_F_TX_IPV6)) << 2) +
+ !!(ol_flags & RTE_MBUF_F_TX_IP_CKSUM);
/* Inner L4 */
- w1.ol4type = (ol_flags & PKT_TX_L4_MASK) >> 52;
+ w1.ol4type = (ol_flags & RTE_MBUF_F_TX_L4_MASK) >> 52;
}
if (flags & NIX_TX_NEED_EXT_HDR && flags & NIX_TX_OFFLOAD_VLAN_QINQ_F) {
- send_hdr_ext->w1.vlan1_ins_ena = !!(ol_flags & PKT_TX_VLAN);
+ send_hdr_ext->w1.vlan1_ins_ena = !!(ol_flags & RTE_MBUF_F_TX_VLAN);
/* HW will update ptr after vlan0 update */
send_hdr_ext->w1.vlan1_ins_ptr = 12;
send_hdr_ext->w1.vlan1_ins_tci = m->vlan_tci;
- send_hdr_ext->w1.vlan0_ins_ena = !!(ol_flags & PKT_TX_QINQ);
+ send_hdr_ext->w1.vlan0_ins_ena = !!(ol_flags & RTE_MBUF_F_TX_QINQ);
/* 2B before end of l2 header */
send_hdr_ext->w1.vlan0_ins_ptr = 12;
send_hdr_ext->w1.vlan0_ins_tci = m->vlan_tci_outer;
}
- if (flags & NIX_TX_OFFLOAD_TSO_F && (ol_flags & PKT_TX_TCP_SEG)) {
+ if (flags & NIX_TX_OFFLOAD_TSO_F && (ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
uint16_t lso_sb;
uint64_t mask;
send_hdr_ext->w0.lso = 1;
send_hdr_ext->w0.lso_mps = m->tso_segsz;
send_hdr_ext->w0.lso_format =
- NIX_LSO_FORMAT_IDX_TSOV4 + !!(ol_flags & PKT_TX_IPV6);
+ NIX_LSO_FORMAT_IDX_TSOV4 + !!(ol_flags & RTE_MBUF_F_TX_IPV6);
w1.ol4type = NIX_SENDL4TYPE_TCP_CKSUM;
/* Handle tunnel tso */
if ((flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F) &&
- (ol_flags & PKT_TX_TUNNEL_MASK)) {
+ (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)) {
const uint8_t is_udp_tun =
(CNXK_NIX_UDP_TUN_BITMASK >>
- ((ol_flags & PKT_TX_TUNNEL_MASK) >> 45)) &
+ ((ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) >> 45)) &
0x1;
uint8_t shift = is_udp_tun ? 32 : 0;
- shift += (!!(ol_flags & PKT_TX_OUTER_IPV6) << 4);
- shift += (!!(ol_flags & PKT_TX_IPV6) << 3);
+ shift += (!!(ol_flags & RTE_MBUF_F_TX_OUTER_IPV6) << 4);
+ shift += (!!(ol_flags & RTE_MBUF_F_TX_IPV6) << 3);
w1.il4type = NIX_SENDL4TYPE_TCP_CKSUM;
w1.ol4type = is_udp_tun ? NIX_SENDL4TYPE_UDP_CKSUM : 0;
const uint16_t flags)
{
if (flags & NIX_TX_OFFLOAD_TSTAMP_F) {
- const uint8_t is_ol_tstamp = !(ol_flags & PKT_TX_IEEE1588_TMST);
+ const uint8_t is_ol_tstamp = !(ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST);
struct nix_send_ext_s *send_hdr_ext =
(struct nix_send_ext_s *)lmt_addr + 16;
uint64_t *lmt = (uint64_t *)lmt_addr;
rte_compiler_barrier();
}
- /* Packets for which PKT_TX_IEEE1588_TMST is not set, tx tstamp
+ /* Packets for which RTE_MBUF_F_TX_IEEE1588_TMST is not set, tx tstamp
* should not be recorded, hence changing the alg type to
* NIX_SENDMEMALG_SET and also changing send mem addr field to
* next 8 bytes as it corrpt the actual tx tstamp registered
uint16_t lso_sb;
uint64_t mask;
- if (!(ol_flags & PKT_TX_TCP_SEG))
+ if (!(ol_flags & RTE_MBUF_F_TX_TCP_SEG))
return;
mask = -(!w1->il3type);
w0->u |= BIT(14);
w0->lso_sb = lso_sb;
w0->lso_mps = m->tso_segsz;
- w0->lso_format = NIX_LSO_FORMAT_IDX_TSOV4 + !!(ol_flags & PKT_TX_IPV6);
+ w0->lso_format = NIX_LSO_FORMAT_IDX_TSOV4 + !!(ol_flags & RTE_MBUF_F_TX_IPV6);
w1->ol4type = NIX_SENDL4TYPE_TCP_CKSUM;
/* Handle tunnel tso */
if ((flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F) &&
- (ol_flags & PKT_TX_TUNNEL_MASK)) {
+ (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)) {
const uint8_t is_udp_tun =
(CNXK_NIX_UDP_TUN_BITMASK >>
- ((ol_flags & PKT_TX_TUNNEL_MASK) >> 45)) &
+ ((ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) >> 45)) &
0x1;
uint8_t shift = is_udp_tun ? 32 : 0;
- shift += (!!(ol_flags & PKT_TX_OUTER_IPV6) << 4);
- shift += (!!(ol_flags & PKT_TX_IPV6) << 3);
+ shift += (!!(ol_flags & RTE_MBUF_F_TX_OUTER_IPV6) << 4);
+ shift += (!!(ol_flags & RTE_MBUF_F_TX_IPV6) << 3);
w1->il4type = NIX_SENDL4TYPE_TCP_CKSUM;
w1->ol4type = is_udp_tun ? NIX_SENDL4TYPE_UDP_CKSUM : 0;
const uint8x16_t tbl = {
/* [0-15] = il4type:il3type */
0x04, /* none (IPv6 assumed) */
- 0x14, /* PKT_TX_TCP_CKSUM (IPv6 assumed) */
- 0x24, /* PKT_TX_SCTP_CKSUM (IPv6 assumed) */
- 0x34, /* PKT_TX_UDP_CKSUM (IPv6 assumed) */
- 0x03, /* PKT_TX_IP_CKSUM */
- 0x13, /* PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM */
- 0x23, /* PKT_TX_IP_CKSUM | PKT_TX_SCTP_CKSUM */
- 0x33, /* PKT_TX_IP_CKSUM | PKT_TX_UDP_CKSUM */
- 0x02, /* PKT_TX_IPV4 */
- 0x12, /* PKT_TX_IPV4 | PKT_TX_TCP_CKSUM */
- 0x22, /* PKT_TX_IPV4 | PKT_TX_SCTP_CKSUM */
- 0x32, /* PKT_TX_IPV4 | PKT_TX_UDP_CKSUM */
- 0x03, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM */
- 0x13, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
- * PKT_TX_TCP_CKSUM
+ 0x14, /* RTE_MBUF_F_TX_TCP_CKSUM (IPv6 assumed) */
+ 0x24, /* RTE_MBUF_F_TX_SCTP_CKSUM (IPv6 assumed) */
+ 0x34, /* RTE_MBUF_F_TX_UDP_CKSUM (IPv6 assumed) */
+ 0x03, /* RTE_MBUF_F_TX_IP_CKSUM */
+ 0x13, /* RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_TCP_CKSUM */
+ 0x23, /* RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_SCTP_CKSUM */
+ 0x33, /* RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_UDP_CKSUM */
+ 0x02, /* RTE_MBUF_F_TX_IPV4 */
+ 0x12, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_TCP_CKSUM */
+ 0x22, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_SCTP_CKSUM */
+ 0x32, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_UDP_CKSUM */
+ 0x03, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM */
+ 0x13, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM |
+ * RTE_MBUF_F_TX_TCP_CKSUM
*/
- 0x23, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
- * PKT_TX_SCTP_CKSUM
+ 0x23, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM |
+ * RTE_MBUF_F_TX_SCTP_CKSUM
*/
- 0x33, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
- * PKT_TX_UDP_CKSUM
+ 0x33, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM |
+ * RTE_MBUF_F_TX_UDP_CKSUM
*/
};
{
/* [0-15] = il4type:il3type */
0x04, /* none (IPv6) */
- 0x14, /* PKT_TX_TCP_CKSUM (IPv6) */
- 0x24, /* PKT_TX_SCTP_CKSUM (IPv6) */
- 0x34, /* PKT_TX_UDP_CKSUM (IPv6) */
- 0x03, /* PKT_TX_IP_CKSUM */
- 0x13, /* PKT_TX_IP_CKSUM |
- * PKT_TX_TCP_CKSUM
+ 0x14, /* RTE_MBUF_F_TX_TCP_CKSUM (IPv6) */
+ 0x24, /* RTE_MBUF_F_TX_SCTP_CKSUM (IPv6) */
+ 0x34, /* RTE_MBUF_F_TX_UDP_CKSUM (IPv6) */
+ 0x03, /* RTE_MBUF_F_TX_IP_CKSUM */
+ 0x13, /* RTE_MBUF_F_TX_IP_CKSUM |
+ * RTE_MBUF_F_TX_TCP_CKSUM
*/
- 0x23, /* PKT_TX_IP_CKSUM |
- * PKT_TX_SCTP_CKSUM
+ 0x23, /* RTE_MBUF_F_TX_IP_CKSUM |
+ * RTE_MBUF_F_TX_SCTP_CKSUM
*/
- 0x33, /* PKT_TX_IP_CKSUM |
- * PKT_TX_UDP_CKSUM
+ 0x33, /* RTE_MBUF_F_TX_IP_CKSUM |
+ * RTE_MBUF_F_TX_UDP_CKSUM
*/
- 0x02, /* PKT_TX_IPV4 */
- 0x12, /* PKT_TX_IPV4 |
- * PKT_TX_TCP_CKSUM
+ 0x02, /* RTE_MBUF_F_TX_IPV4 */
+ 0x12, /* RTE_MBUF_F_TX_IPV4 |
+ * RTE_MBUF_F_TX_TCP_CKSUM
*/
- 0x22, /* PKT_TX_IPV4 |
- * PKT_TX_SCTP_CKSUM
+ 0x22, /* RTE_MBUF_F_TX_IPV4 |
+ * RTE_MBUF_F_TX_SCTP_CKSUM
*/
- 0x32, /* PKT_TX_IPV4 |
- * PKT_TX_UDP_CKSUM
+ 0x32, /* RTE_MBUF_F_TX_IPV4 |
+ * RTE_MBUF_F_TX_UDP_CKSUM
*/
- 0x03, /* PKT_TX_IPV4 |
- * PKT_TX_IP_CKSUM
+ 0x03, /* RTE_MBUF_F_TX_IPV4 |
+ * RTE_MBUF_F_TX_IP_CKSUM
*/
- 0x13, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
- * PKT_TX_TCP_CKSUM
+ 0x13, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM |
+ * RTE_MBUF_F_TX_TCP_CKSUM
*/
- 0x23, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
- * PKT_TX_SCTP_CKSUM
+ 0x23, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM |
+ * RTE_MBUF_F_TX_SCTP_CKSUM
*/
- 0x33, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
- * PKT_TX_UDP_CKSUM
+ 0x33, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM |
+ * RTE_MBUF_F_TX_UDP_CKSUM
*/
},
if (flags & NIX_TX_OFFLOAD_VLAN_QINQ_F) {
/* Tx ol_flag for vlan. */
- const uint64x2_t olv = {PKT_TX_VLAN, PKT_TX_VLAN};
+ const uint64x2_t olv = {RTE_MBUF_F_TX_VLAN,
+RTE_MBUF_F_TX_VLAN};
/* Bit enable for VLAN1 */
const uint64x2_t mlv = {BIT_ULL(49), BIT_ULL(49)};
/* Tx ol_flag for QnQ. */
- const uint64x2_t olq = {PKT_TX_QINQ, PKT_TX_QINQ};
+ const uint64x2_t olq = {RTE_MBUF_F_TX_QINQ,
+RTE_MBUF_F_TX_QINQ};
/* Bit enable for VLAN0 */
const uint64x2_t mlq = {BIT_ULL(48), BIT_ULL(48)};
/* Load vlan values from packet. outer is VLAN 0 */
if (flags & NIX_TX_OFFLOAD_TSTAMP_F) {
/* Tx ol_flag for timestam. */
- const uint64x2_t olf = {PKT_TX_IEEE1588_TMST,
- PKT_TX_IEEE1588_TMST};
+ const uint64x2_t olf = {RTE_MBUF_F_TX_IEEE1588_TMST,
+ RTE_MBUF_F_TX_IEEE1588_TMST};
/* Set send mem alg to SUB. */
const uint64x2_t alg = {BIT_ULL(59), BIT_ULL(59)};
/* Increment send mem address by 8. */
uint16_t flags = 0;
/* Fastpath is dependent on these enums */
- RTE_BUILD_BUG_ON(PKT_TX_TCP_CKSUM != (1ULL << 52));
- RTE_BUILD_BUG_ON(PKT_TX_SCTP_CKSUM != (2ULL << 52));
- RTE_BUILD_BUG_ON(PKT_TX_UDP_CKSUM != (3ULL << 52));
- RTE_BUILD_BUG_ON(PKT_TX_IP_CKSUM != (1ULL << 54));
- RTE_BUILD_BUG_ON(PKT_TX_IPV4 != (1ULL << 55));
- RTE_BUILD_BUG_ON(PKT_TX_OUTER_IP_CKSUM != (1ULL << 58));
- RTE_BUILD_BUG_ON(PKT_TX_OUTER_IPV4 != (1ULL << 59));
- RTE_BUILD_BUG_ON(PKT_TX_OUTER_IPV6 != (1ULL << 60));
- RTE_BUILD_BUG_ON(PKT_TX_OUTER_UDP_CKSUM != (1ULL << 41));
+ RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_TCP_CKSUM != (1ULL << 52));
+ RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_SCTP_CKSUM != (2ULL << 52));
+ RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_UDP_CKSUM != (3ULL << 52));
+ RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_IP_CKSUM != (1ULL << 54));
+ RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_IPV4 != (1ULL << 55));
+ RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_OUTER_IP_CKSUM != (1ULL << 58));
+ RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_OUTER_IPV4 != (1ULL << 59));
+ RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_OUTER_IPV6 != (1ULL << 60));
+ RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_OUTER_UDP_CKSUM != (1ULL << 41));
RTE_BUILD_BUG_ON(RTE_MBUF_L2_LEN_BITS != 7);
RTE_BUILD_BUG_ON(RTE_MBUF_L3_LEN_BITS != 9);
RTE_BUILD_BUG_ON(RTE_MBUF_OUTL2_LEN_BITS != 7);
* 0 to CNXK_FLOW_ACTION_FLAG_DEFAULT - 2
*/
if (likely(match_id)) {
- ol_flags |= PKT_RX_FDIR;
+ ol_flags |= RTE_MBUF_F_RX_FDIR;
if (match_id != CNXK_FLOW_ACTION_FLAG_DEFAULT) {
- ol_flags |= PKT_RX_FDIR_ID;
+ ol_flags |= RTE_MBUF_F_RX_FDIR_ID;
mbuf->hash.fdir.hi = match_id - 1;
}
}
if (flag & NIX_RX_OFFLOAD_RSS_F) {
mbuf->hash.rss = tag;
- ol_flags |= PKT_RX_RSS_HASH;
+ ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
}
if (flag & NIX_RX_OFFLOAD_CHECKSUM_F)
if (flag & NIX_RX_OFFLOAD_VLAN_STRIP_F) {
if (rx->cn9k.vtag0_gone) {
- ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+ ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
mbuf->vlan_tci = rx->cn9k.vtag0_tci;
}
if (rx->cn9k.vtag1_gone) {
- ol_flags |= PKT_RX_QINQ | PKT_RX_QINQ_STRIPPED;
+ ol_flags |= RTE_MBUF_F_RX_QINQ | RTE_MBUF_F_RX_QINQ_STRIPPED;
mbuf->vlan_tci_outer = rx->cn9k.vtag1_tci;
}
}
nix_vlan_update(const uint64_t w2, uint64_t ol_flags, uint8x16_t *f)
{
if (w2 & BIT_ULL(21) /* vtag0_gone */) {
- ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+ ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
*f = vsetq_lane_u16((uint16_t)(w2 >> 32), *f, 5);
}
nix_qinq_update(const uint64_t w2, uint64_t ol_flags, struct rte_mbuf *mbuf)
{
if (w2 & BIT_ULL(23) /* vtag1_gone */) {
- ol_flags |= PKT_RX_QINQ | PKT_RX_QINQ_STRIPPED;
+ ol_flags |= RTE_MBUF_F_RX_QINQ | RTE_MBUF_F_RX_QINQ_STRIPPED;
mbuf->vlan_tci_outer = (uint16_t)(w2 >> 48);
}
f1 = vsetq_lane_u32(cq1_w0, f1, 3);
f2 = vsetq_lane_u32(cq2_w0, f2, 3);
f3 = vsetq_lane_u32(cq3_w0, f3, 3);
- ol_flags0 = PKT_RX_RSS_HASH;
- ol_flags1 = PKT_RX_RSS_HASH;
- ol_flags2 = PKT_RX_RSS_HASH;
- ol_flags3 = PKT_RX_RSS_HASH;
+ ol_flags0 = RTE_MBUF_F_RX_RSS_HASH;
+ ol_flags1 = RTE_MBUF_F_RX_RSS_HASH;
+ ol_flags2 = RTE_MBUF_F_RX_RSS_HASH;
+ ol_flags3 = RTE_MBUF_F_RX_RSS_HASH;
} else {
ol_flags0 = 0;
ol_flags1 = 0;
RTE_PTYPE_L2_ETHER_TIMESYNC,
RTE_PTYPE_L2_ETHER_TIMESYNC,
RTE_PTYPE_L2_ETHER_TIMESYNC};
- const uint64_t ts_olf = PKT_RX_IEEE1588_PTP |
- PKT_RX_IEEE1588_TMST |
+ const uint64_t ts_olf = RTE_MBUF_F_RX_IEEE1588_PTP |
+ RTE_MBUF_F_RX_IEEE1588_TMST |
rxq->tstamp->rx_tstamp_dynflag;
const uint32x4_t and_mask = {0x1, 0x2, 0x4, 0x8};
uint64x2_t ts01, ts23, mask;
{
uint64_t mask, ol_flags = m->ol_flags;
- if (flags & NIX_TX_OFFLOAD_TSO_F && (ol_flags & PKT_TX_TCP_SEG)) {
+ if (flags & NIX_TX_OFFLOAD_TSO_F && (ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
uintptr_t mdata = rte_pktmbuf_mtod(m, uintptr_t);
uint16_t *iplen, *oiplen, *oudplen;
uint16_t lso_sb, paylen;
- mask = -!!(ol_flags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6));
+ mask = -!!(ol_flags & (RTE_MBUF_F_TX_OUTER_IPV4 | RTE_MBUF_F_TX_OUTER_IPV6));
lso_sb = (mask & (m->outer_l2_len + m->outer_l3_len)) +
m->l2_len + m->l3_len + m->l4_len;
/* Get iplen position assuming no tunnel hdr */
iplen = (uint16_t *)(mdata + m->l2_len +
- (2 << !!(ol_flags & PKT_TX_IPV6)));
+ (2 << !!(ol_flags & RTE_MBUF_F_TX_IPV6)));
/* Handle tunnel tso */
if ((flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F) &&
- (ol_flags & PKT_TX_TUNNEL_MASK)) {
+ (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)) {
const uint8_t is_udp_tun =
(CNXK_NIX_UDP_TUN_BITMASK >>
- ((ol_flags & PKT_TX_TUNNEL_MASK) >> 45)) &
+ ((ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) >> 45)) &
0x1;
oiplen = (uint16_t *)(mdata + m->outer_l2_len +
(2 << !!(ol_flags &
- PKT_TX_OUTER_IPV6)));
+ RTE_MBUF_F_TX_OUTER_IPV6)));
*oiplen = rte_cpu_to_be_16(rte_be_to_cpu_16(*oiplen) -
paylen);
/* Update iplen position to inner ip hdr */
iplen = (uint16_t *)(mdata + lso_sb - m->l3_len -
m->l4_len +
- (2 << !!(ol_flags & PKT_TX_IPV6)));
+ (2 << !!(ol_flags & RTE_MBUF_F_TX_IPV6)));
}
*iplen = rte_cpu_to_be_16(rte_be_to_cpu_16(*iplen) - paylen);
if ((flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F) &&
(flags & NIX_TX_OFFLOAD_L3_L4_CSUM_F)) {
- const uint8_t csum = !!(ol_flags & PKT_TX_OUTER_UDP_CKSUM);
+ const uint8_t csum = !!(ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM);
const uint8_t ol3type =
- ((!!(ol_flags & PKT_TX_OUTER_IPV4)) << 1) +
- ((!!(ol_flags & PKT_TX_OUTER_IPV6)) << 2) +
- !!(ol_flags & PKT_TX_OUTER_IP_CKSUM);
+ ((!!(ol_flags & RTE_MBUF_F_TX_OUTER_IPV4)) << 1) +
+ ((!!(ol_flags & RTE_MBUF_F_TX_OUTER_IPV6)) << 2) +
+ !!(ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM);
/* Outer L3 */
w1.ol3type = ol3type;
w1.ol4type = csum + (csum << 1);
/* Inner L3 */
- w1.il3type = ((!!(ol_flags & PKT_TX_IPV4)) << 1) +
- ((!!(ol_flags & PKT_TX_IPV6)) << 2);
+ w1.il3type = ((!!(ol_flags & RTE_MBUF_F_TX_IPV4)) << 1) +
+ ((!!(ol_flags & RTE_MBUF_F_TX_IPV6)) << 2);
w1.il3ptr = w1.ol4ptr + m->l2_len;
w1.il4ptr = w1.il3ptr + m->l3_len;
/* Increment it by 1 if it is IPV4 as 3 is with csum */
- w1.il3type = w1.il3type + !!(ol_flags & PKT_TX_IP_CKSUM);
+ w1.il3type = w1.il3type + !!(ol_flags & RTE_MBUF_F_TX_IP_CKSUM);
/* Inner L4 */
- w1.il4type = (ol_flags & PKT_TX_L4_MASK) >> 52;
+ w1.il4type = (ol_flags & RTE_MBUF_F_TX_L4_MASK) >> 52;
/* In case of no tunnel header use only
* shift IL3/IL4 fields a bit to use
((w1.u & 0X00000000FFFFFFFF) >> (mask << 4));
} else if (flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F) {
- const uint8_t csum = !!(ol_flags & PKT_TX_OUTER_UDP_CKSUM);
+ const uint8_t csum = !!(ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM);
const uint8_t outer_l2_len = m->outer_l2_len;
/* Outer L3 */
w1.ol3ptr = outer_l2_len;
w1.ol4ptr = outer_l2_len + m->outer_l3_len;
/* Increment it by 1 if it is IPV4 as 3 is with csum */
- w1.ol3type = ((!!(ol_flags & PKT_TX_OUTER_IPV4)) << 1) +
- ((!!(ol_flags & PKT_TX_OUTER_IPV6)) << 2) +
- !!(ol_flags & PKT_TX_OUTER_IP_CKSUM);
+ w1.ol3type = ((!!(ol_flags & RTE_MBUF_F_TX_OUTER_IPV4)) << 1) +
+ ((!!(ol_flags & RTE_MBUF_F_TX_OUTER_IPV6)) << 2) +
+ !!(ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM);
/* Outer L4 */
w1.ol4type = csum + (csum << 1);
w1.ol3ptr = l2_len;
w1.ol4ptr = l2_len + m->l3_len;
/* Increment it by 1 if it is IPV4 as 3 is with csum */
- w1.ol3type = ((!!(ol_flags & PKT_TX_IPV4)) << 1) +
- ((!!(ol_flags & PKT_TX_IPV6)) << 2) +
- !!(ol_flags & PKT_TX_IP_CKSUM);
+ w1.ol3type = ((!!(ol_flags & RTE_MBUF_F_TX_IPV4)) << 1) +
+ ((!!(ol_flags & RTE_MBUF_F_TX_IPV6)) << 2) +
+ !!(ol_flags & RTE_MBUF_F_TX_IP_CKSUM);
/* Inner L4 */
- w1.ol4type = (ol_flags & PKT_TX_L4_MASK) >> 52;
+ w1.ol4type = (ol_flags & RTE_MBUF_F_TX_L4_MASK) >> 52;
}
if (flags & NIX_TX_NEED_EXT_HDR && flags & NIX_TX_OFFLOAD_VLAN_QINQ_F) {
- send_hdr_ext->w1.vlan1_ins_ena = !!(ol_flags & PKT_TX_VLAN);
+ send_hdr_ext->w1.vlan1_ins_ena = !!(ol_flags & RTE_MBUF_F_TX_VLAN);
/* HW will update ptr after vlan0 update */
send_hdr_ext->w1.vlan1_ins_ptr = 12;
send_hdr_ext->w1.vlan1_ins_tci = m->vlan_tci;
- send_hdr_ext->w1.vlan0_ins_ena = !!(ol_flags & PKT_TX_QINQ);
+ send_hdr_ext->w1.vlan0_ins_ena = !!(ol_flags & RTE_MBUF_F_TX_QINQ);
/* 2B before end of l2 header */
send_hdr_ext->w1.vlan0_ins_ptr = 12;
send_hdr_ext->w1.vlan0_ins_tci = m->vlan_tci_outer;
}
- if (flags & NIX_TX_OFFLOAD_TSO_F && (ol_flags & PKT_TX_TCP_SEG)) {
+ if (flags & NIX_TX_OFFLOAD_TSO_F && (ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
uint16_t lso_sb;
uint64_t mask;
send_hdr_ext->w0.lso = 1;
send_hdr_ext->w0.lso_mps = m->tso_segsz;
send_hdr_ext->w0.lso_format =
- NIX_LSO_FORMAT_IDX_TSOV4 + !!(ol_flags & PKT_TX_IPV6);
+ NIX_LSO_FORMAT_IDX_TSOV4 + !!(ol_flags & RTE_MBUF_F_TX_IPV6);
w1.ol4type = NIX_SENDL4TYPE_TCP_CKSUM;
/* Handle tunnel tso */
if ((flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F) &&
- (ol_flags & PKT_TX_TUNNEL_MASK)) {
+ (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)) {
const uint8_t is_udp_tun =
(CNXK_NIX_UDP_TUN_BITMASK >>
- ((ol_flags & PKT_TX_TUNNEL_MASK) >> 45)) &
+ ((ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) >> 45)) &
0x1;
uint8_t shift = is_udp_tun ? 32 : 0;
- shift += (!!(ol_flags & PKT_TX_OUTER_IPV6) << 4);
- shift += (!!(ol_flags & PKT_TX_IPV6) << 3);
+ shift += (!!(ol_flags & RTE_MBUF_F_TX_OUTER_IPV6) << 4);
+ shift += (!!(ol_flags & RTE_MBUF_F_TX_IPV6) << 3);
w1.il4type = NIX_SENDL4TYPE_TCP_CKSUM;
w1.ol4type = is_udp_tun ? NIX_SENDL4TYPE_UDP_CKSUM : 0;
if (flags & NIX_TX_OFFLOAD_TSTAMP_F) {
struct nix_send_mem_s *send_mem;
uint16_t off = (no_segdw - 1) << 1;
- const uint8_t is_ol_tstamp = !(ol_flags & PKT_TX_IEEE1588_TMST);
+ const uint8_t is_ol_tstamp = !(ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST);
send_mem = (struct nix_send_mem_s *)(cmd + off);
if (flags & NIX_TX_MULTI_SEG_F) {
rte_compiler_barrier();
}
- /* Packets for which PKT_TX_IEEE1588_TMST is not set, tx tstamp
+ /* Packets for which RTE_MBUF_F_TX_IEEE1588_TMST is not set, tx tstamp
* should not be recorded, hence changing the alg type to
* NIX_SENDMEMALG_SET and also changing send mem addr field to
* next 8 bytes as it corrpt the actual tx tstamp registered
uint16_t lso_sb;
uint64_t mask;
- if (!(ol_flags & PKT_TX_TCP_SEG))
+ if (!(ol_flags & RTE_MBUF_F_TX_TCP_SEG))
return;
mask = -(!w1->il3type);
w0->u |= BIT(14);
w0->lso_sb = lso_sb;
w0->lso_mps = m->tso_segsz;
- w0->lso_format = NIX_LSO_FORMAT_IDX_TSOV4 + !!(ol_flags & PKT_TX_IPV6);
+ w0->lso_format = NIX_LSO_FORMAT_IDX_TSOV4 + !!(ol_flags & RTE_MBUF_F_TX_IPV6);
w1->ol4type = NIX_SENDL4TYPE_TCP_CKSUM;
/* Handle tunnel tso */
if ((flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F) &&
- (ol_flags & PKT_TX_TUNNEL_MASK)) {
+ (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)) {
const uint8_t is_udp_tun =
(CNXK_NIX_UDP_TUN_BITMASK >>
- ((ol_flags & PKT_TX_TUNNEL_MASK) >> 45)) &
+ ((ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) >> 45)) &
0x1;
w1->il4type = NIX_SENDL4TYPE_TCP_CKSUM;
/* Update format for UDP tunneled packet */
w0->lso_format += is_udp_tun ? 2 : 6;
- w0->lso_format += !!(ol_flags & PKT_TX_OUTER_IPV6) << 1;
+ w0->lso_format += !!(ol_flags & RTE_MBUF_F_TX_OUTER_IPV6) << 1;
}
}
const uint8x16_t tbl = {
/* [0-15] = il4type:il3type */
0x04, /* none (IPv6 assumed) */
- 0x14, /* PKT_TX_TCP_CKSUM (IPv6 assumed) */
- 0x24, /* PKT_TX_SCTP_CKSUM (IPv6 assumed) */
- 0x34, /* PKT_TX_UDP_CKSUM (IPv6 assumed) */
- 0x03, /* PKT_TX_IP_CKSUM */
- 0x13, /* PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM */
- 0x23, /* PKT_TX_IP_CKSUM | PKT_TX_SCTP_CKSUM */
- 0x33, /* PKT_TX_IP_CKSUM | PKT_TX_UDP_CKSUM */
- 0x02, /* PKT_TX_IPV4 */
- 0x12, /* PKT_TX_IPV4 | PKT_TX_TCP_CKSUM */
- 0x22, /* PKT_TX_IPV4 | PKT_TX_SCTP_CKSUM */
- 0x32, /* PKT_TX_IPV4 | PKT_TX_UDP_CKSUM */
- 0x03, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM */
- 0x13, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
- * PKT_TX_TCP_CKSUM
+ 0x14, /* RTE_MBUF_F_TX_TCP_CKSUM (IPv6 assumed) */
+ 0x24, /* RTE_MBUF_F_TX_SCTP_CKSUM (IPv6 assumed) */
+ 0x34, /* RTE_MBUF_F_TX_UDP_CKSUM (IPv6 assumed) */
+ 0x03, /* RTE_MBUF_F_TX_IP_CKSUM */
+ 0x13, /* RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_TCP_CKSUM */
+ 0x23, /* RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_SCTP_CKSUM */
+ 0x33, /* RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_UDP_CKSUM */
+ 0x02, /* RTE_MBUF_F_TX_IPV4 */
+ 0x12, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_TCP_CKSUM */
+ 0x22, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_SCTP_CKSUM */
+ 0x32, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_UDP_CKSUM */
+ 0x03, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM */
+ 0x13, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM |
+ * RTE_MBUF_F_TX_TCP_CKSUM
*/
- 0x23, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
- * PKT_TX_SCTP_CKSUM
+ 0x23, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM |
+ * RTE_MBUF_F_TX_SCTP_CKSUM
*/
- 0x33, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
- * PKT_TX_UDP_CKSUM
+ 0x33, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM |
+ * RTE_MBUF_F_TX_UDP_CKSUM
*/
};
{
/* [0-15] = il4type:il3type */
0x04, /* none (IPv6) */
- 0x14, /* PKT_TX_TCP_CKSUM (IPv6) */
- 0x24, /* PKT_TX_SCTP_CKSUM (IPv6) */
- 0x34, /* PKT_TX_UDP_CKSUM (IPv6) */
- 0x03, /* PKT_TX_IP_CKSUM */
- 0x13, /* PKT_TX_IP_CKSUM |
- * PKT_TX_TCP_CKSUM
+ 0x14, /* RTE_MBUF_F_TX_TCP_CKSUM (IPv6) */
+ 0x24, /* RTE_MBUF_F_TX_SCTP_CKSUM (IPv6) */
+ 0x34, /* RTE_MBUF_F_TX_UDP_CKSUM (IPv6) */
+ 0x03, /* RTE_MBUF_F_TX_IP_CKSUM */
+ 0x13, /* RTE_MBUF_F_TX_IP_CKSUM |
+ * RTE_MBUF_F_TX_TCP_CKSUM
*/
- 0x23, /* PKT_TX_IP_CKSUM |
- * PKT_TX_SCTP_CKSUM
+ 0x23, /* RTE_MBUF_F_TX_IP_CKSUM |
+ * RTE_MBUF_F_TX_SCTP_CKSUM
*/
- 0x33, /* PKT_TX_IP_CKSUM |
- * PKT_TX_UDP_CKSUM
+ 0x33, /* RTE_MBUF_F_TX_IP_CKSUM |
+ * RTE_MBUF_F_TX_UDP_CKSUM
*/
- 0x02, /* PKT_TX_IPV4 */
- 0x12, /* PKT_TX_IPV4 |
- * PKT_TX_TCP_CKSUM
+ 0x02, /* RTE_MBUF_F_TX_IPV4 */
+ 0x12, /* RTE_MBUF_F_TX_IPV4 |
+ * RTE_MBUF_F_TX_TCP_CKSUM
*/
- 0x22, /* PKT_TX_IPV4 |
- * PKT_TX_SCTP_CKSUM
+ 0x22, /* RTE_MBUF_F_TX_IPV4 |
+ * RTE_MBUF_F_TX_SCTP_CKSUM
*/
- 0x32, /* PKT_TX_IPV4 |
- * PKT_TX_UDP_CKSUM
+ 0x32, /* RTE_MBUF_F_TX_IPV4 |
+ * RTE_MBUF_F_TX_UDP_CKSUM
*/
- 0x03, /* PKT_TX_IPV4 |
- * PKT_TX_IP_CKSUM
+ 0x03, /* RTE_MBUF_F_TX_IPV4 |
+ * RTE_MBUF_F_TX_IP_CKSUM
*/
- 0x13, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
- * PKT_TX_TCP_CKSUM
+ 0x13, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM |
+ * RTE_MBUF_F_TX_TCP_CKSUM
*/
- 0x23, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
- * PKT_TX_SCTP_CKSUM
+ 0x23, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM |
+ * RTE_MBUF_F_TX_SCTP_CKSUM
*/
- 0x33, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
- * PKT_TX_UDP_CKSUM
+ 0x33, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM |
+ * RTE_MBUF_F_TX_UDP_CKSUM
*/
},
if (flags & NIX_TX_OFFLOAD_VLAN_QINQ_F) {
/* Tx ol_flag for vlan. */
- const uint64x2_t olv = {PKT_TX_VLAN, PKT_TX_VLAN};
+ const uint64x2_t olv = {RTE_MBUF_F_TX_VLAN,
+RTE_MBUF_F_TX_VLAN};
/* Bit enable for VLAN1 */
const uint64x2_t mlv = {BIT_ULL(49), BIT_ULL(49)};
/* Tx ol_flag for QnQ. */
- const uint64x2_t olq = {PKT_TX_QINQ, PKT_TX_QINQ};
+ const uint64x2_t olq = {RTE_MBUF_F_TX_QINQ,
+RTE_MBUF_F_TX_QINQ};
/* Bit enable for VLAN0 */
const uint64x2_t mlq = {BIT_ULL(48), BIT_ULL(48)};
/* Load vlan values from packet. outer is VLAN 0 */
if (flags & NIX_TX_OFFLOAD_TSTAMP_F) {
/* Tx ol_flag for timestam. */
- const uint64x2_t olf = {PKT_TX_IEEE1588_TMST,
- PKT_TX_IEEE1588_TMST};
+ const uint64x2_t olf = {RTE_MBUF_F_TX_IEEE1588_TMST,
+ RTE_MBUF_F_TX_IEEE1588_TMST};
/* Set send mem alg to SUB. */
const uint64x2_t alg = {BIT_ULL(59), BIT_ULL(59)};
/* Increment send mem address by 8. */
#define CNXK_NIX_FASTPATH_LOOKUP_MEM "cnxk_nix_fastpath_lookup_mem"
#define CNXK_NIX_UDP_TUN_BITMASK \
- ((1ull << (PKT_TX_TUNNEL_VXLAN >> 45)) | \
- (1ull << (PKT_TX_TUNNEL_GENEVE >> 45)))
+ ((1ull << (RTE_MBUF_F_TX_TUNNEL_VXLAN >> 45)) | \
+ (1ull << (RTE_MBUF_F_TX_TUNNEL_GENEVE >> 45)))
struct cnxk_fc_cfg {
enum rte_eth_fc_mode mode;
*/
*cnxk_nix_timestamp_dynfield(mbuf, tstamp) =
rte_be_to_cpu_64(*tstamp_ptr);
- /* PKT_RX_IEEE1588_TMST flag needs to be set only in case
+ /* RTE_MBUF_F_RX_IEEE1588_TMST flag needs to be set only in case
* PTP packets are received.
*/
if (mbuf->packet_type == RTE_PTYPE_L2_ETHER_TIMESYNC) {
tstamp->rx_tstamp =
*cnxk_nix_timestamp_dynfield(mbuf, tstamp);
tstamp->rx_ready = 1;
- mbuf->ol_flags |= PKT_RX_IEEE1588_PTP |
- PKT_RX_IEEE1588_TMST |
+ mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP |
+ RTE_MBUF_F_RX_IEEE1588_TMST |
tstamp->rx_tstamp_dynflag;
}
}
errlev = idx & 0xf;
errcode = (idx & 0xff0) >> 4;
- val = PKT_RX_IP_CKSUM_UNKNOWN;
- val |= PKT_RX_L4_CKSUM_UNKNOWN;
- val |= PKT_RX_OUTER_L4_CKSUM_UNKNOWN;
+ val = RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN;
+ val |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN;
+ val |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_UNKNOWN;
switch (errlev) {
case NPC_ERRLEV_RE:
* including Outer L2 length mismatch error
*/
if (errcode) {
- val |= PKT_RX_IP_CKSUM_BAD;
- val |= PKT_RX_L4_CKSUM_BAD;
+ val |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
+ val |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
} else {
- val |= PKT_RX_IP_CKSUM_GOOD;
- val |= PKT_RX_L4_CKSUM_GOOD;
+ val |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
+ val |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
}
break;
case NPC_ERRLEV_LC:
if (errcode == NPC_EC_OIP4_CSUM ||
errcode == NPC_EC_IP_FRAG_OFFSET_1) {
- val |= PKT_RX_IP_CKSUM_BAD;
- val |= PKT_RX_OUTER_IP_CKSUM_BAD;
+ val |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
+ val |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
} else {
- val |= PKT_RX_IP_CKSUM_GOOD;
+ val |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
}
break;
case NPC_ERRLEV_LG:
if (errcode == NPC_EC_IIP4_CSUM)
- val |= PKT_RX_IP_CKSUM_BAD;
+ val |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
else
- val |= PKT_RX_IP_CKSUM_GOOD;
+ val |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
break;
case NPC_ERRLEV_NIX:
if (errcode == NIX_RX_PERRCODE_OL4_CHK ||
errcode == NIX_RX_PERRCODE_OL4_LEN ||
errcode == NIX_RX_PERRCODE_OL4_PORT) {
- val |= PKT_RX_IP_CKSUM_GOOD;
- val |= PKT_RX_L4_CKSUM_BAD;
- val |= PKT_RX_OUTER_L4_CKSUM_BAD;
+ val |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
+ val |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
+ val |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD;
} else if (errcode == NIX_RX_PERRCODE_IL4_CHK ||
errcode == NIX_RX_PERRCODE_IL4_LEN ||
errcode == NIX_RX_PERRCODE_IL4_PORT) {
- val |= PKT_RX_IP_CKSUM_GOOD;
- val |= PKT_RX_L4_CKSUM_BAD;
+ val |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
+ val |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
} else if (errcode == NIX_RX_PERRCODE_IL3_LEN ||
errcode == NIX_RX_PERRCODE_OL3_LEN) {
- val |= PKT_RX_IP_CKSUM_BAD;
+ val |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
} else {
- val |= PKT_RX_IP_CKSUM_GOOD;
- val |= PKT_RX_L4_CKSUM_GOOD;
+ val |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
+ val |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
}
break;
}
*/
static inline int is_eth_imm(const struct rte_mbuf *m)
{
- unsigned int hdrlen = (m->ol_flags & PKT_TX_TCP_SEG) ?
+ unsigned int hdrlen = (m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) ?
sizeof(struct cpl_tx_pkt_lso_core) : 0;
hdrlen += sizeof(struct cpl_tx_pkt);
{
int csum_type;
- if (m->ol_flags & PKT_TX_IP_CKSUM) {
- switch (m->ol_flags & PKT_TX_L4_MASK) {
- case PKT_TX_TCP_CKSUM:
+ if (m->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
+ switch (m->ol_flags & RTE_MBUF_F_TX_L4_MASK) {
+ case RTE_MBUF_F_TX_TCP_CKSUM:
csum_type = TX_CSUM_TCPIP;
break;
- case PKT_TX_UDP_CKSUM:
+ case RTE_MBUF_F_TX_UDP_CKSUM:
csum_type = TX_CSUM_UDPIP;
break;
default:
/* fill the cpl message, same as in t4_eth_xmit, this should be kept
* similar to t4_eth_xmit
*/
- if (mbuf->ol_flags & PKT_TX_IP_CKSUM) {
+ if (mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
cntrl = hwcsum(adap->params.chip, mbuf) |
F_TXPKT_IPCSUM_DIS;
txq->stats.tx_cso++;
cntrl = F_TXPKT_L4CSUM_DIS | F_TXPKT_IPCSUM_DIS;
}
- if (mbuf->ol_flags & PKT_TX_VLAN_PKT) {
+ if (mbuf->ol_flags & RTE_MBUF_F_TX_VLAN_PKT) {
txq->stats.vlan_ins++;
cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(mbuf->vlan_tci);
}
return 0;
}
- if ((!(m->ol_flags & PKT_TX_TCP_SEG)) &&
+ if ((!(m->ol_flags & RTE_MBUF_F_TX_TCP_SEG)) &&
(unlikely(m->pkt_len > max_pkt_len)))
goto out_free;
/* align the end of coalesce WR to a 512 byte boundary */
txq->q.coalesce.max = (8 - (txq->q.pidx & 7)) * 8;
- if (!((m->ol_flags & PKT_TX_TCP_SEG) ||
+ if (!((m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) ||
m->pkt_len > RTE_ETHER_MAX_LEN)) {
if (should_tx_packet_coalesce(txq, mbuf, &cflits, adap)) {
if (unlikely(map_mbuf(mbuf, addr) < 0)) {
len += sizeof(*cpl);
/* Coalescing skipped and we send through normal path */
- if (!(m->ol_flags & PKT_TX_TCP_SEG)) {
+ if (!(m->ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
wr->op_immdlen = htonl(V_FW_WR_OP(is_pf4(adap) ?
FW_ETH_TX_PKT_WR :
FW_ETH_TX_PKT_VM_WR) |
cpl = (void *)(wr + 1);
else
cpl = (void *)(vmwr + 1);
- if (m->ol_flags & PKT_TX_IP_CKSUM) {
+ if (m->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
cntrl = hwcsum(adap->params.chip, m) |
F_TXPKT_IPCSUM_DIS;
txq->stats.tx_cso++;
lso = (void *)(wr + 1);
else
lso = (void *)(vmwr + 1);
- v6 = (m->ol_flags & PKT_TX_IPV6) != 0;
+ v6 = (m->ol_flags & RTE_MBUF_F_TX_IPV6) != 0;
l3hdr_len = m->l3_len;
l4hdr_len = m->l4_len;
eth_xtra_len = m->l2_len - RTE_ETHER_HDR_LEN;
txq->stats.tx_cso += m->tso_segsz;
}
- if (m->ol_flags & PKT_TX_VLAN_PKT) {
+ if (m->ol_flags & RTE_MBUF_F_TX_VLAN_PKT) {
txq->stats.vlan_ins++;
cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m->vlan_tci);
}
if (cpl->vlan_ex)
cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L2_ETHER_VLAN,
- PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED);
+ RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED);
else
cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L2_ETHER, 0);
if (cpl->l2info & htonl(F_RXF_IP))
cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L3_IPV4,
- csum_ok ? PKT_RX_IP_CKSUM_GOOD :
- PKT_RX_IP_CKSUM_BAD);
+ csum_ok ? RTE_MBUF_F_RX_IP_CKSUM_GOOD :
+ RTE_MBUF_F_RX_IP_CKSUM_BAD);
else if (cpl->l2info & htonl(F_RXF_IP6))
cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L3_IPV6,
- csum_ok ? PKT_RX_IP_CKSUM_GOOD :
- PKT_RX_IP_CKSUM_BAD);
+ csum_ok ? RTE_MBUF_F_RX_IP_CKSUM_GOOD :
+ RTE_MBUF_F_RX_IP_CKSUM_BAD);
if (cpl->l2info & htonl(F_RXF_TCP))
cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L4_TCP,
- csum_ok ? PKT_RX_L4_CKSUM_GOOD :
- PKT_RX_L4_CKSUM_BAD);
+ csum_ok ? RTE_MBUF_F_RX_L4_CKSUM_GOOD :
+ RTE_MBUF_F_RX_L4_CKSUM_BAD);
else if (cpl->l2info & htonl(F_RXF_UDP))
cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L4_UDP,
- csum_ok ? PKT_RX_L4_CKSUM_GOOD :
- PKT_RX_L4_CKSUM_BAD);
+ csum_ok ? RTE_MBUF_F_RX_L4_CKSUM_GOOD :
+ RTE_MBUF_F_RX_L4_CKSUM_BAD);
}
/**
if (!rss_hdr->filter_tid &&
rss_hdr->hash_type) {
- pkt->ol_flags |= PKT_RX_RSS_HASH;
+ pkt->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
pkt->hash.rss =
ntohl(rss_hdr->hash_val);
}
ETH_RSS_TCP | \
ETH_RSS_SCTP)
-#define DPAA_TX_CKSUM_OFFLOAD_MASK ( \
- PKT_TX_IP_CKSUM | \
- PKT_TX_TCP_CKSUM | \
- PKT_TX_UDP_CKSUM)
+#define DPAA_TX_CKSUM_OFFLOAD_MASK (RTE_MBUF_F_TX_IP_CKSUM | \
+ RTE_MBUF_F_TX_TCP_CKSUM | \
+ RTE_MBUF_F_TX_UDP_CKSUM)
/* DPAA Frame descriptor macros */
DPAA_DP_LOG(DEBUG, " Parsing mbuf: %p with annotations: %p", m, annot);
- m->ol_flags = PKT_RX_RSS_HASH | PKT_RX_IP_CKSUM_GOOD |
- PKT_RX_L4_CKSUM_GOOD;
+ m->ol_flags = RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_IP_CKSUM_GOOD |
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD;
switch (prs) {
case DPAA_PKT_TYPE_IPV4:
break;
case DPAA_PKT_TYPE_IPV4_CSUM_ERR:
case DPAA_PKT_TYPE_IPV6_CSUM_ERR:
- m->ol_flags = PKT_RX_RSS_HASH | PKT_RX_IP_CKSUM_BAD;
+ m->ol_flags = RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_IP_CKSUM_BAD;
break;
case DPAA_PKT_TYPE_IPV4_TCP_CSUM_ERR:
case DPAA_PKT_TYPE_IPV6_TCP_CSUM_ERR:
case DPAA_PKT_TYPE_IPV4_UDP_CSUM_ERR:
case DPAA_PKT_TYPE_IPV6_UDP_CSUM_ERR:
- m->ol_flags = PKT_RX_RSS_HASH | PKT_RX_L4_CKSUM_BAD;
+ m->ol_flags = RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_L4_CKSUM_BAD;
break;
case DPAA_PKT_TYPE_NONE:
m->packet_type = 0;
/* Check if Vlan is present */
if (prs & DPAA_PARSE_VLAN_MASK)
- m->ol_flags |= PKT_RX_VLAN;
+ m->ol_flags |= RTE_MBUF_F_RX_VLAN;
/* Packet received without stripping the vlan */
}
m->packet_type = dpaa2_dev_rx_parse_slow(m, annotation);
}
m->hash.rss = fd->simple.flc_hi;
- m->ol_flags |= PKT_RX_RSS_HASH;
+ m->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
if (dpaa2_enable_ts[m->port]) {
*dpaa2_timestamp_dynfield(m) = annotation->word2;
#if defined(RTE_LIBRTE_IEEE1588)
if (BIT_ISSET_AT_POS(annotation->word1, DPAA2_ETH_FAS_PTP))
- mbuf->ol_flags |= PKT_RX_IEEE1588_PTP;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP;
#endif
if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_1_PRESENT)) {
vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *,
(VLAN_TCI_OFFSET_1(annotation->word5) >> 16));
mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci);
- mbuf->ol_flags |= PKT_RX_VLAN;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN;
pkt_type |= RTE_PTYPE_L2_ETHER_VLAN;
} else if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_N_PRESENT)) {
vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *,
(VLAN_TCI_OFFSET_1(annotation->word5) >> 16));
mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci);
- mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_QINQ;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_QINQ;
pkt_type |= RTE_PTYPE_L2_ETHER_QINQ;
}
}
if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
- mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
- mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_FIRST_FRAGMENT |
L3_IP_1_MORE_FRAGMENT |
annotation->word4);
if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
- mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
- mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
if (dpaa2_enable_ts[mbuf->port]) {
*dpaa2_timestamp_dynfield(mbuf) = annotation->word2;
(*bufs)->nb_segs == 1 &&
rte_mbuf_refcnt_read((*bufs)) == 1)) {
if (unlikely(((*bufs)->ol_flags
- & PKT_TX_VLAN_PKT) ||
- (eth_data->dev_conf.txmode.offloads
- & DEV_TX_OFFLOAD_VLAN_INSERT))) {
+ & RTE_MBUF_F_TX_VLAN_PKT) ||
+ (eth_data->dev_conf.txmode.offloads
+ & DEV_TX_OFFLOAD_VLAN_INSERT))) {
ret = rte_vlan_insert(bufs);
if (ret)
goto send_n_return;
goto send_n_return;
}
- if (unlikely(((*bufs)->ol_flags & PKT_TX_VLAN_PKT) ||
- (eth_data->dev_conf.txmode.offloads
- & DEV_TX_OFFLOAD_VLAN_INSERT))) {
+ if (unlikely(((*bufs)->ol_flags & RTE_MBUF_F_TX_VLAN_PKT) ||
+ (eth_data->dev_conf.txmode.offloads
+ & DEV_TX_OFFLOAD_VLAN_INSERT))) {
int ret = rte_vlan_insert(bufs);
if (ret)
goto send_n_return;
(*bufs)->nb_segs == 1 &&
rte_mbuf_refcnt_read((*bufs)) == 1)) {
if (unlikely((*bufs)->ol_flags
- & PKT_TX_VLAN_PKT)) {
+ & RTE_MBUF_F_TX_VLAN_PKT)) {
ret = rte_vlan_insert(bufs);
if (ret)
goto send_n_return;
#define E1000_RXDCTL_GRAN 0x01000000 /* RXDCTL Granularity */
-#define E1000_TX_OFFLOAD_MASK ( \
- PKT_TX_IPV6 | \
- PKT_TX_IPV4 | \
- PKT_TX_IP_CKSUM | \
- PKT_TX_L4_MASK | \
- PKT_TX_VLAN_PKT)
+#define E1000_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_IPV6 | \
+ RTE_MBUF_F_TX_IPV4 | \
+ RTE_MBUF_F_TX_IP_CKSUM | \
+ RTE_MBUF_F_TX_L4_MASK | \
+ RTE_MBUF_F_TX_VLAN_PKT)
#define E1000_TX_OFFLOAD_NOTSUP_MASK \
- (PKT_TX_OFFLOAD_MASK ^ E1000_TX_OFFLOAD_MASK)
+ (RTE_MBUF_F_TX_OFFLOAD_MASK ^ E1000_TX_OFFLOAD_MASK)
/* PCI offset for querying configuration status register */
#define PCI_CFG_STATUS_REG 0x06
* When doing checksum or TCP segmentation with IPv6 headers,
* IPCSE field should be set t0 0.
*/
- if (flags & PKT_TX_IP_CKSUM) {
+ if (flags & RTE_MBUF_F_TX_IP_CKSUM) {
ctx.lower_setup.ip_fields.ipcse =
(uint16_t)rte_cpu_to_le_16(ipcse - 1);
cmd_len |= E1000_TXD_CMD_IP;
ctx.upper_setup.tcp_fields.tucss = (uint8_t)ipcse;
ctx.upper_setup.tcp_fields.tucse = 0;
- switch (flags & PKT_TX_L4_MASK) {
- case PKT_TX_UDP_CKSUM:
+ switch (flags & RTE_MBUF_F_TX_L4_MASK) {
+ case RTE_MBUF_F_TX_UDP_CKSUM:
ctx.upper_setup.tcp_fields.tucso = (uint8_t)(ipcse +
offsetof(struct rte_udp_hdr, dgram_cksum));
cmp_mask |= TX_MACIP_LEN_CMP_MASK;
break;
- case PKT_TX_TCP_CKSUM:
+ case RTE_MBUF_F_TX_TCP_CKSUM:
ctx.upper_setup.tcp_fields.tucso = (uint8_t)(ipcse +
offsetof(struct rte_tcp_hdr, cksum));
cmd_len |= E1000_TXD_CMD_TCP;
static const uint32_t l3_olinfo[2] = {0, E1000_TXD_POPTS_IXSM << 8};
uint32_t tmp;
- tmp = l4_olinfo[(ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM];
- tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
+ tmp = l4_olinfo[(ol_flags & RTE_MBUF_F_TX_L4_MASK) != RTE_MBUF_F_TX_L4_NO_CKSUM];
+ tmp |= l3_olinfo[(ol_flags & RTE_MBUF_F_TX_IP_CKSUM) != 0];
return tmp;
}
ol_flags = tx_pkt->ol_flags;
/* If hardware offload required */
- tx_ol_req = (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK));
+ tx_ol_req = (ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_L4_MASK));
if (tx_ol_req) {
hdrlen.f.vlan_tci = tx_pkt->vlan_tci;
hdrlen.f.l2_len = tx_pkt->l2_len;
popts_spec = 0;
/* Set VLAN Tag offload fields. */
- if (ol_flags & PKT_TX_VLAN_PKT) {
+ if (ol_flags & RTE_MBUF_F_TX_VLAN_PKT) {
cmd_type_len |= E1000_TXD_CMD_VLE;
popts_spec = tx_pkt->vlan_tci << E1000_TXD_VLAN_SHIFT;
}
/* Check if VLAN present */
pkt_flags = ((rx_status & E1000_RXD_STAT_VP) ?
- PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED : 0);
+ RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED : 0);
return pkt_flags;
}
uint64_t pkt_flags = 0;
if (rx_error & E1000_RXD_ERR_IPE)
- pkt_flags |= PKT_RX_IP_CKSUM_BAD;
+ pkt_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
if (rx_error & E1000_RXD_ERR_TCPE)
- pkt_flags |= PKT_RX_L4_CKSUM_BAD;
+ pkt_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
return pkt_flags;
}
rxm->ol_flags = rxm->ol_flags |
rx_desc_error_to_pkt_flags(rxd.errors);
- /* Only valid if PKT_RX_VLAN set in pkt_flags */
+ /* Only valid if RTE_MBUF_F_RX_VLAN set in pkt_flags */
rxm->vlan_tci = rte_le_to_cpu_16(rxd.special);
/*
first_seg->ol_flags = first_seg->ol_flags |
rx_desc_error_to_pkt_flags(rxd.errors);
- /* Only valid if PKT_RX_VLAN set in pkt_flags */
+ /* Only valid if RTE_MBUF_F_RX_VLAN set in pkt_flags */
rxm->vlan_tci = rte_le_to_cpu_16(rxd.special);
/* Prefetch data of first segment, if configured to do so. */
#include "e1000_ethdev.h"
#ifdef RTE_LIBRTE_IEEE1588
-#define IGB_TX_IEEE1588_TMST PKT_TX_IEEE1588_TMST
+#define IGB_TX_IEEE1588_TMST RTE_MBUF_F_TX_IEEE1588_TMST
#else
#define IGB_TX_IEEE1588_TMST 0
#endif
/* Bit Mask to indicate what bits required for building TX context */
-#define IGB_TX_OFFLOAD_MASK ( \
- PKT_TX_OUTER_IPV6 | \
- PKT_TX_OUTER_IPV4 | \
- PKT_TX_IPV6 | \
- PKT_TX_IPV4 | \
- PKT_TX_VLAN_PKT | \
- PKT_TX_IP_CKSUM | \
- PKT_TX_L4_MASK | \
- PKT_TX_TCP_SEG | \
+#define IGB_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_OUTER_IPV6 | \
+ RTE_MBUF_F_TX_OUTER_IPV4 | \
+ RTE_MBUF_F_TX_IPV6 | \
+ RTE_MBUF_F_TX_IPV4 | \
+ RTE_MBUF_F_TX_VLAN_PKT | \
+ RTE_MBUF_F_TX_IP_CKSUM | \
+ RTE_MBUF_F_TX_L4_MASK | \
+ RTE_MBUF_F_TX_TCP_SEG | \
IGB_TX_IEEE1588_TMST)
#define IGB_TX_OFFLOAD_NOTSUP_MASK \
- (PKT_TX_OFFLOAD_MASK ^ IGB_TX_OFFLOAD_MASK)
+ (RTE_MBUF_F_TX_OFFLOAD_MASK ^ IGB_TX_OFFLOAD_MASK)
/**
* Structure associated with each descriptor of the RX ring of a RX queue.
static inline uint64_t
check_tso_para(uint64_t ol_req, union igb_tx_offload ol_para)
{
- if (!(ol_req & PKT_TX_TCP_SEG))
+ if (!(ol_req & RTE_MBUF_F_TX_TCP_SEG))
return ol_req;
if ((ol_para.tso_segsz > IGB_TSO_MAX_MSS) || (ol_para.l2_len +
ol_para.l3_len + ol_para.l4_len > IGB_TSO_MAX_HDRLEN)) {
- ol_req &= ~PKT_TX_TCP_SEG;
- ol_req |= PKT_TX_TCP_CKSUM;
+ ol_req &= ~RTE_MBUF_F_TX_TCP_SEG;
+ ol_req |= RTE_MBUF_F_TX_TCP_CKSUM;
}
return ol_req;
}
/* Specify which HW CTX to upload. */
mss_l4len_idx = (ctx_idx << E1000_ADVTXD_IDX_SHIFT);
- if (ol_flags & PKT_TX_VLAN_PKT)
+ if (ol_flags & RTE_MBUF_F_TX_VLAN_PKT)
tx_offload_mask.data |= TX_VLAN_CMP_MASK;
/* check if TCP segmentation required for this packet */
- if (ol_flags & PKT_TX_TCP_SEG) {
+ if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
/* implies IP cksum in IPv4 */
- if (ol_flags & PKT_TX_IP_CKSUM)
+ if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4 |
E1000_ADVTXD_TUCMD_L4T_TCP |
E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
mss_l4len_idx |= tx_offload.tso_segsz << E1000_ADVTXD_MSS_SHIFT;
mss_l4len_idx |= tx_offload.l4_len << E1000_ADVTXD_L4LEN_SHIFT;
} else { /* no TSO, check if hardware checksum is needed */
- if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK))
+ if (ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_L4_MASK))
tx_offload_mask.data |= TX_MACIP_LEN_CMP_MASK;
- if (ol_flags & PKT_TX_IP_CKSUM)
+ if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4;
- switch (ol_flags & PKT_TX_L4_MASK) {
- case PKT_TX_UDP_CKSUM:
+ switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
+ case RTE_MBUF_F_TX_UDP_CKSUM:
type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP |
E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
mss_l4len_idx |= sizeof(struct rte_udp_hdr)
<< E1000_ADVTXD_L4LEN_SHIFT;
break;
- case PKT_TX_TCP_CKSUM:
+ case RTE_MBUF_F_TX_TCP_CKSUM:
type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP |
E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
mss_l4len_idx |= sizeof(struct rte_tcp_hdr)
<< E1000_ADVTXD_L4LEN_SHIFT;
break;
- case PKT_TX_SCTP_CKSUM:
+ case RTE_MBUF_F_TX_SCTP_CKSUM:
type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP |
E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
mss_l4len_idx |= sizeof(struct rte_sctp_hdr)
static const uint32_t l3_olinfo[2] = {0, E1000_ADVTXD_POPTS_IXSM};
uint32_t tmp;
- tmp = l4_olinfo[(ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM];
- tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
- tmp |= l4_olinfo[(ol_flags & PKT_TX_TCP_SEG) != 0];
+ tmp = l4_olinfo[(ol_flags & RTE_MBUF_F_TX_L4_MASK) != RTE_MBUF_F_TX_L4_NO_CKSUM];
+ tmp |= l3_olinfo[(ol_flags & RTE_MBUF_F_TX_IP_CKSUM) != 0];
+ tmp |= l4_olinfo[(ol_flags & RTE_MBUF_F_TX_TCP_SEG) != 0];
return tmp;
}
uint32_t cmdtype;
static uint32_t vlan_cmd[2] = {0, E1000_ADVTXD_DCMD_VLE};
static uint32_t tso_cmd[2] = {0, E1000_ADVTXD_DCMD_TSE};
- cmdtype = vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
- cmdtype |= tso_cmd[(ol_flags & PKT_TX_TCP_SEG) != 0];
+ cmdtype = vlan_cmd[(ol_flags & RTE_MBUF_F_TX_VLAN_PKT) != 0];
+ cmdtype |= tso_cmd[(ol_flags & RTE_MBUF_F_TX_TCP_SEG) != 0];
return cmdtype;
}
*/
cmd_type_len = txq->txd_type |
E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
- if (tx_ol_req & PKT_TX_TCP_SEG)
+ if (tx_ol_req & RTE_MBUF_F_TX_TCP_SEG)
pkt_len -= (tx_pkt->l2_len + tx_pkt->l3_len + tx_pkt->l4_len);
olinfo_status = (pkt_len << E1000_ADVTXD_PAYLEN_SHIFT);
#if defined(RTE_LIBRTE_IEEE1588)
- if (ol_flags & PKT_TX_IEEE1588_TMST)
+ if (ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST)
cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
#endif
if (tx_ol_req) {
m = tx_pkts[i];
/* Check some limitations for TSO in hardware */
- if (m->ol_flags & PKT_TX_TCP_SEG)
+ if (m->ol_flags & RTE_MBUF_F_TX_TCP_SEG)
if ((m->tso_segsz > IGB_TSO_MAX_MSS) ||
(m->l2_len + m->l3_len + m->l4_len >
IGB_TSO_MAX_HDRLEN)) {
static inline uint64_t
rx_desc_hlen_type_rss_to_pkt_flags(struct igb_rx_queue *rxq, uint32_t hl_tp_rs)
{
- uint64_t pkt_flags = ((hl_tp_rs & 0x0F) == 0) ? 0 : PKT_RX_RSS_HASH;
+ uint64_t pkt_flags = ((hl_tp_rs & 0x0F) == 0) ? 0 : RTE_MBUF_F_RX_RSS_HASH;
#if defined(RTE_LIBRTE_IEEE1588)
static uint32_t ip_pkt_etqf_map[8] = {
- 0, 0, 0, PKT_RX_IEEE1588_PTP,
+ 0, 0, 0, RTE_MBUF_F_RX_IEEE1588_PTP,
0, 0, 0, 0,
};
/* Check if VLAN present */
pkt_flags = ((rx_status & E1000_RXD_STAT_VP) ?
- PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED : 0);
+ RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED : 0);
#if defined(RTE_LIBRTE_IEEE1588)
if (rx_status & E1000_RXD_STAT_TMST)
- pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
+ pkt_flags = pkt_flags | RTE_MBUF_F_RX_IEEE1588_TMST;
#endif
return pkt_flags;
}
*/
static uint64_t error_to_pkt_flags_map[4] = {
- PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD,
- PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
- PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD,
- PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD,
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD,
+ RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD,
+ RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD
};
return error_to_pkt_flags_map[(rx_status >>
E1000_RXD_ERR_CKSUM_BIT) & E1000_RXD_ERR_CKSUM_MSK];
hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
/*
- * The vlan_tci field is only valid when PKT_RX_VLAN is
+ * The vlan_tci field is only valid when RTE_MBUF_F_RX_VLAN is
* set in the pkt_flags field and must be in CPU byte order.
*/
if ((staterr & rte_cpu_to_le_32(E1000_RXDEXT_STATERR_LB)) &&
first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
/*
- * The vlan_tci field is only valid when PKT_RX_VLAN is
+ * The vlan_tci field is only valid when RTE_MBUF_F_RX_VLAN is
* set in the pkt_flags field and must be in CPU byte order.
*/
if ((staterr & rte_cpu_to_le_32(E1000_RXDEXT_STATERR_LB)) &&
DEV_TX_OFFLOAD_UDP_CKSUM |\
DEV_TX_OFFLOAD_IPV4_CKSUM |\
DEV_TX_OFFLOAD_TCP_TSO)
-#define MBUF_OFFLOADS (PKT_TX_L4_MASK |\
- PKT_TX_IP_CKSUM |\
- PKT_TX_TCP_SEG)
+#define MBUF_OFFLOADS (RTE_MBUF_F_TX_L4_MASK |\
+ RTE_MBUF_F_TX_IP_CKSUM |\
+ RTE_MBUF_F_TX_TCP_SEG)
/** Vendor ID used by Amazon devices */
#define PCI_VENDOR_ID_AMAZON 0x1D0F
#define PCI_DEVICE_ID_ENA_VF 0xEC20
#define PCI_DEVICE_ID_ENA_VF_RSERV0 0xEC21
-#define ENA_TX_OFFLOAD_MASK (\
- PKT_TX_L4_MASK | \
- PKT_TX_IPV6 | \
- PKT_TX_IPV4 | \
- PKT_TX_IP_CKSUM | \
- PKT_TX_TCP_SEG)
+#define ENA_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_L4_MASK | \
+ RTE_MBUF_F_TX_IPV6 | \
+ RTE_MBUF_F_TX_IPV4 | \
+ RTE_MBUF_F_TX_IP_CKSUM | \
+ RTE_MBUF_F_TX_TCP_SEG)
#define ENA_TX_OFFLOAD_NOTSUP_MASK \
- (PKT_TX_OFFLOAD_MASK ^ ENA_TX_OFFLOAD_MASK)
+ (RTE_MBUF_F_TX_OFFLOAD_MASK ^ ENA_TX_OFFLOAD_MASK)
static const struct rte_pci_id pci_id_ena_map[] = {
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF) },
if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) {
packet_type |= RTE_PTYPE_L3_IPV4;
if (unlikely(ena_rx_ctx->l3_csum_err))
- ol_flags |= PKT_RX_IP_CKSUM_BAD;
+ ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
else
- ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+ ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
} else if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV6) {
packet_type |= RTE_PTYPE_L3_IPV6;
}
if (!ena_rx_ctx->l4_csum_checked || ena_rx_ctx->frag)
- ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
+ ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN;
else
if (unlikely(ena_rx_ctx->l4_csum_err))
- ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
else
- ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
if (fill_hash &&
likely((packet_type & ENA_PTYPE_HAS_HASH) && !ena_rx_ctx->frag)) {
- ol_flags |= PKT_RX_RSS_HASH;
+ ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
mbuf->hash.rss = ena_rx_ctx->hash;
}
if ((mbuf->ol_flags & MBUF_OFFLOADS) &&
(queue_offloads & QUEUE_OFFLOADS)) {
/* check if TSO is required */
- if ((mbuf->ol_flags & PKT_TX_TCP_SEG) &&
+ if ((mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG) &&
(queue_offloads & DEV_TX_OFFLOAD_TCP_TSO)) {
ena_tx_ctx->tso_enable = true;
}
/* check if L3 checksum is needed */
- if ((mbuf->ol_flags & PKT_TX_IP_CKSUM) &&
+ if ((mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) &&
(queue_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM))
ena_tx_ctx->l3_csum_enable = true;
- if (mbuf->ol_flags & PKT_TX_IPV6) {
+ if (mbuf->ol_flags & RTE_MBUF_F_TX_IPV6) {
ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6;
} else {
ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4;
}
/* check if L4 checksum is needed */
- if (((mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM) &&
+ if (((mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_TCP_CKSUM) &&
(queue_offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) {
ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP;
ena_tx_ctx->l4_csum_enable = true;
- } else if (((mbuf->ol_flags & PKT_TX_L4_MASK) ==
- PKT_TX_UDP_CKSUM) &&
+ } else if (((mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK) ==
+ RTE_MBUF_F_TX_UDP_CKSUM) &&
(queue_offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) {
ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP;
ena_tx_ctx->l4_csum_enable = true;
ena_rx_mbuf_prepare(mbuf, &ena_rx_ctx, fill_hash);
if (unlikely(mbuf->ol_flags &
- (PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD))) {
+ (RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD))) {
rte_atomic64_inc(&rx_ring->adapter->drv_stats->ierrors);
++rx_ring->rx_stats.bad_csum;
}
m = tx_pkts[i];
ol_flags = m->ol_flags;
- if (!(ol_flags & PKT_TX_IPV4))
+ if (!(ol_flags & RTE_MBUF_F_TX_IPV4))
continue;
/* If there was not L2 header length specified, assume it is
}
if ((ol_flags & ENA_TX_OFFLOAD_NOTSUP_MASK) != 0 ||
- (ol_flags & PKT_TX_L4_MASK) ==
- PKT_TX_SCTP_CKSUM) {
+ (ol_flags & RTE_MBUF_F_TX_L4_MASK) ==
+ RTE_MBUF_F_TX_SCTP_CKSUM) {
rte_errno = ENOTSUP;
return i;
}
*/
ret = rte_net_intel_cksum_flags_prepare(m,
- ol_flags & ~PKT_TX_TCP_SEG);
+ ol_flags & ~RTE_MBUF_F_TX_TCP_SEG);
if (ret != 0) {
rte_errno = -ret;
return i;
static inline void enetc_slow_parsing(struct rte_mbuf *m,
uint64_t parse_results)
{
- m->ol_flags &= ~(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
+ m->ol_flags &= ~(RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD);
switch (parse_results) {
case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4:
m->packet_type = RTE_PTYPE_L2_ETHER |
RTE_PTYPE_L3_IPV4;
- m->ol_flags |= PKT_RX_IP_CKSUM_BAD;
+ m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
return;
case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6:
m->packet_type = RTE_PTYPE_L2_ETHER |
RTE_PTYPE_L3_IPV6;
- m->ol_flags |= PKT_RX_IP_CKSUM_BAD;
+ m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
return;
case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4_TCP:
m->packet_type = RTE_PTYPE_L2_ETHER |
RTE_PTYPE_L3_IPV4 |
RTE_PTYPE_L4_TCP;
- m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
- PKT_RX_L4_CKSUM_BAD;
+ m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD |
+ RTE_MBUF_F_RX_L4_CKSUM_BAD;
return;
case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6_TCP:
m->packet_type = RTE_PTYPE_L2_ETHER |
RTE_PTYPE_L3_IPV6 |
RTE_PTYPE_L4_TCP;
- m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
- PKT_RX_L4_CKSUM_BAD;
+ m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD |
+ RTE_MBUF_F_RX_L4_CKSUM_BAD;
return;
case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4_UDP:
m->packet_type = RTE_PTYPE_L2_ETHER |
RTE_PTYPE_L3_IPV4 |
RTE_PTYPE_L4_UDP;
- m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
- PKT_RX_L4_CKSUM_BAD;
+ m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD |
+ RTE_MBUF_F_RX_L4_CKSUM_BAD;
return;
case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6_UDP:
m->packet_type = RTE_PTYPE_L2_ETHER |
RTE_PTYPE_L3_IPV6 |
RTE_PTYPE_L4_UDP;
- m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
- PKT_RX_L4_CKSUM_BAD;
+ m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD |
+ RTE_MBUF_F_RX_L4_CKSUM_BAD;
return;
case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4_SCTP:
m->packet_type = RTE_PTYPE_L2_ETHER |
RTE_PTYPE_L3_IPV4 |
RTE_PTYPE_L4_SCTP;
- m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
- PKT_RX_L4_CKSUM_BAD;
+ m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD |
+ RTE_MBUF_F_RX_L4_CKSUM_BAD;
return;
case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6_SCTP:
m->packet_type = RTE_PTYPE_L2_ETHER |
RTE_PTYPE_L3_IPV6 |
RTE_PTYPE_L4_SCTP;
- m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
- PKT_RX_L4_CKSUM_BAD;
+ m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD |
+ RTE_MBUF_F_RX_L4_CKSUM_BAD;
return;
case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4_ICMP:
m->packet_type = RTE_PTYPE_L2_ETHER |
RTE_PTYPE_L3_IPV4 |
RTE_PTYPE_L4_ICMP;
- m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
- PKT_RX_L4_CKSUM_BAD;
+ m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD |
+ RTE_MBUF_F_RX_L4_CKSUM_BAD;
return;
case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6_ICMP:
m->packet_type = RTE_PTYPE_L2_ETHER |
RTE_PTYPE_L3_IPV6 |
RTE_PTYPE_L4_ICMP;
- m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
- PKT_RX_L4_CKSUM_BAD;
+ m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD |
+ RTE_MBUF_F_RX_L4_CKSUM_BAD;
return;
/* More switch cases can be added */
default:
m->packet_type = RTE_PTYPE_UNKNOWN;
- m->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN |
- PKT_RX_L4_CKSUM_UNKNOWN;
+ m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN |
+ RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN;
}
}
enetc_dev_rx_parse(struct rte_mbuf *m, uint16_t parse_results)
{
ENETC_PMD_DP_DEBUG("parse summary = 0x%x ", parse_results);
- m->ol_flags |= PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD;
+ m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD;
switch (parse_results) {
case ENETC_PKT_TYPE_ETHER:
error_interrupt_offset);
/* Compute unsupported ol flags for enic_prep_pkts() */
enic->wq[index].tx_offload_notsup_mask =
- PKT_TX_OFFLOAD_MASK ^ enic->tx_offload_mask;
+ RTE_MBUF_F_TX_OFFLOAD_MASK ^ enic->tx_offload_mask;
cq_idx = enic_cq_wq(enic, index);
vnic_cq_init(&enic->cq[cq_idx],
(enic->geneve ? DEV_TX_OFFLOAD_GENEVE_TNL_TSO : 0) |
(enic->vxlan ? DEV_TX_OFFLOAD_VXLAN_TNL_TSO : 0);
enic->tx_offload_mask |=
- PKT_TX_OUTER_IPV6 |
- PKT_TX_OUTER_IPV4 |
- PKT_TX_OUTER_IP_CKSUM |
- PKT_TX_TUNNEL_MASK;
+ RTE_MBUF_F_TX_OUTER_IPV6 |
+ RTE_MBUF_F_TX_OUTER_IPV4 |
+ RTE_MBUF_F_TX_OUTER_IP_CKSUM |
+ RTE_MBUF_F_TX_TUNNEL_MASK;
enic->overlay_offload = true;
if (enic->vxlan && enic->geneve)
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_RSS_HASH;
enic->tx_offload_mask =
- PKT_TX_IPV6 |
- PKT_TX_IPV4 |
- PKT_TX_VLAN |
- PKT_TX_IP_CKSUM |
- PKT_TX_L4_MASK |
- PKT_TX_TCP_SEG;
+ RTE_MBUF_F_TX_IPV6 |
+ RTE_MBUF_F_TX_IPV4 |
+ RTE_MBUF_F_TX_VLAN |
+ RTE_MBUF_F_TX_IP_CKSUM |
+ RTE_MBUF_F_TX_L4_MASK |
+ RTE_MBUF_F_TX_TCP_SEG;
return 0;
}
for (i = 0; i != nb_pkts; i++) {
m = tx_pkts[i];
ol_flags = m->ol_flags;
- if (!(ol_flags & PKT_TX_TCP_SEG)) {
+ if (!(ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
if (unlikely(m->pkt_len > ENIC_TX_MAX_PKT_SIZE)) {
rte_errno = EINVAL;
return i;
wq_desc_avail = vnic_wq_desc_avail(wq);
head_idx = wq->head_idx;
desc_count = wq->ring.desc_count;
- ol_flags_mask = PKT_TX_VLAN | PKT_TX_IP_CKSUM | PKT_TX_L4_MASK;
+ ol_flags_mask = RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_L4_MASK;
tx_oversized = &enic->soft_stats.tx_oversized;
nb_pkts = RTE_MIN(nb_pkts, ENIC_TX_XMIT_MAX);
data_len = tx_pkt->data_len;
ol_flags = tx_pkt->ol_flags;
nb_segs = tx_pkt->nb_segs;
- tso = ol_flags & PKT_TX_TCP_SEG;
+ tso = ol_flags & RTE_MBUF_F_TX_TCP_SEG;
/* drop packet if it's too big to send */
if (unlikely(!tso && pkt_len > ENIC_TX_MAX_PKT_SIZE)) {
mss = 0;
vlan_id = tx_pkt->vlan_tci;
- vlan_tag_insert = !!(ol_flags & PKT_TX_VLAN);
+ vlan_tag_insert = !!(ol_flags & RTE_MBUF_F_TX_VLAN);
bus_addr = (dma_addr_t)
(tx_pkt->buf_iova + tx_pkt->data_off);
offload_mode = WQ_ENET_OFFLOAD_MODE_TSO;
mss = tx_pkt->tso_segsz;
/* For tunnel, need the size of outer+inner headers */
- if (ol_flags & PKT_TX_TUNNEL_MASK) {
+ if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
header_len += tx_pkt->outer_l2_len +
tx_pkt->outer_l3_len;
}
}
if ((ol_flags & ol_flags_mask) && (header_len == 0)) {
- if (ol_flags & PKT_TX_IP_CKSUM)
+ if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
mss |= ENIC_CALC_IP_CKSUM;
/* Nic uses just 1 bit for UDP and TCP */
- switch (ol_flags & PKT_TX_L4_MASK) {
- case PKT_TX_TCP_CKSUM:
- case PKT_TX_UDP_CKSUM:
+ switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
+ case RTE_MBUF_F_TX_TCP_CKSUM:
+ case RTE_MBUF_F_TX_UDP_CKSUM:
mss |= ENIC_CALC_TCP_UDP_CKSUM;
break;
}
desc->header_length_flags &=
((1 << WQ_ENET_FLAGS_EOP_SHIFT) |
(1 << WQ_ENET_FLAGS_CQ_ENTRY_SHIFT));
- if (p->ol_flags & PKT_TX_VLAN) {
+ if (p->ol_flags & RTE_MBUF_F_TX_VLAN) {
desc->header_length_flags |=
1 << WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT;
}
* is 0, so no need to set offload_mode.
*/
mss = 0;
- if (p->ol_flags & PKT_TX_IP_CKSUM)
+ if (p->ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
mss |= ENIC_CALC_IP_CKSUM << WQ_ENET_MSS_SHIFT;
- if (p->ol_flags & PKT_TX_L4_MASK)
+ if (p->ol_flags & RTE_MBUF_F_TX_L4_MASK)
mss |= ENIC_CALC_TCP_UDP_CKSUM << WQ_ENET_MSS_SHIFT;
desc->mss_loopback = mss;
/* VLAN STRIPPED flag. The L2 packet type updated here also */
if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) {
- pkt_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+ pkt_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
mbuf->packet_type |= RTE_PTYPE_L2_ETHER;
} else {
if (vlan_tci != 0) {
- pkt_flags |= PKT_RX_VLAN;
+ pkt_flags |= RTE_MBUF_F_RX_VLAN;
mbuf->packet_type |= RTE_PTYPE_L2_ETHER_VLAN;
} else {
mbuf->packet_type |= RTE_PTYPE_L2_ETHER;
clsf_cqd = (struct cq_enet_rq_clsf_desc *)cqd;
filter_id = clsf_cqd->filter_id;
if (filter_id) {
- pkt_flags |= PKT_RX_FDIR;
+ pkt_flags |= RTE_MBUF_F_RX_FDIR;
if (filter_id != ENIC_MAGIC_FILTER_ID) {
/* filter_id = mark id + 1, so subtract 1 */
mbuf->hash.fdir.hi = filter_id - 1;
- pkt_flags |= PKT_RX_FDIR_ID;
+ pkt_flags |= RTE_MBUF_F_RX_FDIR_ID;
}
}
} else if (enic_cq_rx_desc_rss_type(cqrd)) {
/* RSS flag */
- pkt_flags |= PKT_RX_RSS_HASH;
+ pkt_flags |= RTE_MBUF_F_RX_RSS_HASH;
mbuf->hash.rss = enic_cq_rx_desc_rss_hash(cqrd);
}
*/
if (mbuf->packet_type & RTE_PTYPE_L3_IPV4) {
if (enic_cq_rx_desc_ipv4_csum_ok(cqrd))
- pkt_flags |= PKT_RX_IP_CKSUM_GOOD;
+ pkt_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
else
- pkt_flags |= PKT_RX_IP_CKSUM_BAD;
+ pkt_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
}
if (l4_flags == RTE_PTYPE_L4_UDP ||
l4_flags == RTE_PTYPE_L4_TCP) {
if (enic_cq_rx_desc_tcp_udp_csum_ok(cqrd))
- pkt_flags |= PKT_RX_L4_CKSUM_GOOD;
+ pkt_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
else
- pkt_flags |= PKT_RX_L4_CKSUM_BAD;
+ pkt_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
}
}
}
0x80, 0x80, 11, 10,
0x80, 0x80, 11, 10,
0x80, 0x80, 11, 10);
- /* PKT_RX_RSS_HASH is 1<<1 so fits in 8-bit integer */
+ /* RTE_MBUF_F_RX_RSS_HASH is 1<<1 so fits in 8-bit integer */
const __m256i rss_shuffle =
- _mm256_set_epi8(/* second 128 bits */
- PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
- PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
- PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
- PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
- PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
- 0, /* rss_types = 0 */
- /* first 128 bits */
- PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
- PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
- PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
- PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
- PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
- 0 /* rss_types = 0 */);
+ _mm256_set_epi8(RTE_MBUF_F_RX_RSS_HASH,
+ RTE_MBUF_F_RX_RSS_HASH,
+ RTE_MBUF_F_RX_RSS_HASH,
+ RTE_MBUF_F_RX_RSS_HASH,
+ RTE_MBUF_F_RX_RSS_HASH,
+ RTE_MBUF_F_RX_RSS_HASH,
+ RTE_MBUF_F_RX_RSS_HASH,
+ RTE_MBUF_F_RX_RSS_HASH,
+ RTE_MBUF_F_RX_RSS_HASH,
+ RTE_MBUF_F_RX_RSS_HASH,
+ RTE_MBUF_F_RX_RSS_HASH,
+ RTE_MBUF_F_RX_RSS_HASH,
+ RTE_MBUF_F_RX_RSS_HASH,
+ RTE_MBUF_F_RX_RSS_HASH,
+ RTE_MBUF_F_RX_RSS_HASH,
+ 0, /* rss_types = 0 */
+ /* first 128 bits */
+ RTE_MBUF_F_RX_RSS_HASH,
+ RTE_MBUF_F_RX_RSS_HASH,
+ RTE_MBUF_F_RX_RSS_HASH,
+ RTE_MBUF_F_RX_RSS_HASH,
+ RTE_MBUF_F_RX_RSS_HASH,
+ RTE_MBUF_F_RX_RSS_HASH,
+ RTE_MBUF_F_RX_RSS_HASH,
+ RTE_MBUF_F_RX_RSS_HASH,
+ RTE_MBUF_F_RX_RSS_HASH,
+ RTE_MBUF_F_RX_RSS_HASH,
+ RTE_MBUF_F_RX_RSS_HASH,
+ RTE_MBUF_F_RX_RSS_HASH,
+ RTE_MBUF_F_RX_RSS_HASH,
+ RTE_MBUF_F_RX_RSS_HASH,
+ RTE_MBUF_F_RX_RSS_HASH,
+ 0 /* rss_types = 0 */);
/*
* VLAN offload flags.
* shuffle index:
*/
const __m256i vlan_shuffle =
_mm256_set_epi32(0, 0, 0, 0,
- PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, 0,
- PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, PKT_RX_VLAN);
+ RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED, 0,
+ RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
+ RTE_MBUF_F_RX_VLAN);
/* Use the same shuffle index as vlan_shuffle */
const __m256i vlan_ptype_shuffle =
_mm256_set_epi32(0, 0, 0, 0,
const __m256i csum_shuffle =
_mm256_set_epi8(/* second 128 bits */
/* 1111 ip4+ip4_ok+l4+l4_ok */
- ((PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1),
+ ((RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD) >> 1),
/* 1110 ip4_ok+ip4+l4+!l4_ok */
- ((PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1),
- (PKT_RX_IP_CKSUM_GOOD >> 1), /* 1101 ip4+ip4_ok */
- (PKT_RX_IP_CKSUM_GOOD >> 1), /* 1100 ip4_ok+ip4 */
- (PKT_RX_L4_CKSUM_GOOD >> 1), /* 1011 l4+l4_ok */
- (PKT_RX_L4_CKSUM_BAD >> 1), /* 1010 l4+!l4_ok */
+ ((RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD) >> 1),
+ (RTE_MBUF_F_RX_IP_CKSUM_GOOD >> 1),
+ /* 1101 ip4+ip4_ok */
+ (RTE_MBUF_F_RX_IP_CKSUM_GOOD >> 1),
+ /* 1100 ip4_ok+ip4 */
+ (RTE_MBUF_F_RX_L4_CKSUM_GOOD >> 1),
+ /* 1011 l4+l4_ok */
+ (RTE_MBUF_F_RX_L4_CKSUM_BAD >> 1),
+ /* 1010 l4+!l4_ok */
0, /* 1001 */
0, /* 1000 */
/* 0111 !ip4_ok+ip4+l4+l4_ok */
- ((PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD) >> 1),
+ ((RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD) >> 1),
/* 0110 !ip4_ok+ip4+l4+!l4_ok */
- ((PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD) >> 1),
- (PKT_RX_IP_CKSUM_BAD >> 1), /* 0101 !ip4_ok+ip4 */
- (PKT_RX_IP_CKSUM_BAD >> 1), /* 0100 !ip4_ok+ip4 */
- (PKT_RX_L4_CKSUM_GOOD >> 1), /* 0011 l4+l4_ok */
- (PKT_RX_L4_CKSUM_BAD >> 1), /* 0010 l4+!l4_ok */
+ ((RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD) >> 1),
+ (RTE_MBUF_F_RX_IP_CKSUM_BAD >> 1),
+ /* 0101 !ip4_ok+ip4 */
+ (RTE_MBUF_F_RX_IP_CKSUM_BAD >> 1),
+ /* 0100 !ip4_ok+ip4 */
+ (RTE_MBUF_F_RX_L4_CKSUM_GOOD >> 1),
+ /* 0011 l4+l4_ok */
+ (RTE_MBUF_F_RX_L4_CKSUM_BAD >> 1),
+ /* 0010 l4+!l4_ok */
0, /* 0001 */
0, /* 0000 */
/* first 128 bits */
- ((PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1),
- ((PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1),
- (PKT_RX_IP_CKSUM_GOOD >> 1),
- (PKT_RX_IP_CKSUM_GOOD >> 1),
- (PKT_RX_L4_CKSUM_GOOD >> 1),
- (PKT_RX_L4_CKSUM_BAD >> 1),
+ ((RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD) >> 1),
+ ((RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD) >> 1),
+ (RTE_MBUF_F_RX_IP_CKSUM_GOOD >> 1),
+ (RTE_MBUF_F_RX_IP_CKSUM_GOOD >> 1),
+ (RTE_MBUF_F_RX_L4_CKSUM_GOOD >> 1),
+ (RTE_MBUF_F_RX_L4_CKSUM_BAD >> 1),
0, 0,
- ((PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD) >> 1),
- ((PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD) >> 1),
- (PKT_RX_IP_CKSUM_BAD >> 1),
- (PKT_RX_IP_CKSUM_BAD >> 1),
- (PKT_RX_L4_CKSUM_GOOD >> 1),
- (PKT_RX_L4_CKSUM_BAD >> 1),
+ ((RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD) >> 1),
+ ((RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD) >> 1),
+ (RTE_MBUF_F_RX_IP_CKSUM_BAD >> 1),
+ (RTE_MBUF_F_RX_IP_CKSUM_BAD >> 1),
+ (RTE_MBUF_F_RX_L4_CKSUM_GOOD >> 1),
+ (RTE_MBUF_F_RX_L4_CKSUM_BAD >> 1),
0, 0);
/*
* Non-fragment PTYPEs.
break;
/*
- * Compute PKT_RX_RSS_HASH.
+ * Compute RTE_MBUF_F_RX_RSS_HASH.
* Use 2 shifts and 1 shuffle for 8 desc: 0.375 inst/desc
* RSS types in byte 0, 4, 8, 12, 16, 20, 24, 28
* Everything else is zero.
__m256i rss_types =
_mm256_srli_epi32(_mm256_slli_epi32(flags0_7, 10), 28);
/*
- * RSS flags (PKT_RX_RSS_HASH) are in
+ * RSS flags (RTE_MBUF_F_RX_RSS_HASH) are in
* byte 0, 4, 8, 12, 16, 20, 24, 28
* Everything else is zero.
*/
vlan0_7 = _mm256_sub_epi32(zero4, vlan0_7);
/*
- * Compute PKT_RX_VLAN and PKT_RX_VLAN_STRIPPED.
+ * Compute RTE_MBUF_F_RX_VLAN and RTE_MBUF_F_RX_VLAN_STRIPPED.
* Use 3 shifts, 1 or, 1 shuffle for 8 desc: 0.625 inst/desc
* VLAN offload flags in byte 0, 4, 8, 12, 16, 20, 24, 28
* Everything else is zero.
}
#endif
-#define FM10K_TX_OFFLOAD_MASK ( \
- PKT_TX_VLAN_PKT | \
- PKT_TX_IPV6 | \
- PKT_TX_IPV4 | \
- PKT_TX_IP_CKSUM | \
- PKT_TX_L4_MASK | \
- PKT_TX_TCP_SEG)
+#define FM10K_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_VLAN_PKT | \
+ RTE_MBUF_F_TX_IPV6 | \
+ RTE_MBUF_F_TX_IPV4 | \
+ RTE_MBUF_F_TX_IP_CKSUM | \
+ RTE_MBUF_F_TX_L4_MASK | \
+ RTE_MBUF_F_TX_TCP_SEG)
#define FM10K_TX_OFFLOAD_NOTSUP_MASK \
- (PKT_TX_OFFLOAD_MASK ^ FM10K_TX_OFFLOAD_MASK)
+ (RTE_MBUF_F_TX_OFFLOAD_MASK ^ FM10K_TX_OFFLOAD_MASK)
/* @note: When this function is changed, make corresponding change to
* fm10k_dev_supported_ptypes_get()
>> FM10K_RXD_PKTTYPE_SHIFT];
if (d->w.pkt_info & FM10K_RXD_RSSTYPE_MASK)
- m->ol_flags |= PKT_RX_RSS_HASH;
+ m->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
if (unlikely((d->d.staterr &
(FM10K_RXD_STATUS_IPCS | FM10K_RXD_STATUS_IPE)) ==
(FM10K_RXD_STATUS_IPCS | FM10K_RXD_STATUS_IPE)))
- m->ol_flags |= PKT_RX_IP_CKSUM_BAD;
+ m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
else
- m->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+ m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
if (unlikely((d->d.staterr &
(FM10K_RXD_STATUS_L4CS | FM10K_RXD_STATUS_L4E)) ==
(FM10K_RXD_STATUS_L4CS | FM10K_RXD_STATUS_L4E)))
- m->ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ m->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
else
- m->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ m->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
}
uint16_t
* Packets in fm10k device always carry at least one VLAN tag.
* For those packets coming in without VLAN tag,
* the port default VLAN tag will be used.
- * So, always PKT_RX_VLAN flag is set and vlan_tci
+ * So, always RTE_MBUF_F_RX_VLAN flag is set and vlan_tci
* is valid for each RX packet's mbuf.
*/
- mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
mbuf->vlan_tci = desc.w.vlan;
/**
* mbuf->vlan_tci_outer is an idle field in fm10k driver,
* Packets in fm10k device always carry at least one VLAN tag.
* For those packets coming in without VLAN tag,
* the port default VLAN tag will be used.
- * So, always PKT_RX_VLAN flag is set and vlan_tci
+ * So, always RTE_MBUF_F_RX_VLAN flag is set and vlan_tci
* is valid for each RX packet's mbuf.
*/
- first_seg->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+ first_seg->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
first_seg->vlan_tci = desc.w.vlan;
/**
* mbuf->vlan_tci_outer is an idle field in fm10k driver,
/* set checksum flags on first descriptor of packet. SCTP checksum
* offload is not supported, but we do not explicitly check for this
* case in favor of greatly simplified processing. */
- if (mb->ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK | PKT_TX_TCP_SEG))
+ if (mb->ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_L4_MASK | RTE_MBUF_F_TX_TCP_SEG))
q->hw_ring[q->next_free].flags |= FM10K_TXD_FLAG_CSUM;
/* set vlan if requested */
- if (mb->ol_flags & PKT_TX_VLAN_PKT)
+ if (mb->ol_flags & RTE_MBUF_F_TX_VLAN_PKT)
q->hw_ring[q->next_free].vlan = mb->vlan_tci;
else
q->hw_ring[q->next_free].vlan = 0;
q->hw_ring[q->next_free].buflen =
rte_cpu_to_le_16(rte_pktmbuf_data_len(mb));
- if (mb->ol_flags & PKT_TX_TCP_SEG) {
+ if (mb->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
hdrlen = mb->l2_len + mb->l3_len + mb->l4_len;
- hdrlen += (mb->ol_flags & PKT_TX_TUNNEL_MASK) ?
+ hdrlen += (mb->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ?
mb->outer_l2_len + mb->outer_l3_len : 0;
if (q->hw_ring[q->next_free].flags & FM10K_TXD_FLAG_FTAG)
hdrlen += sizeof(struct fm10k_ftag);
for (i = 0; i < nb_pkts; i++) {
m = tx_pkts[i];
- if ((m->ol_flags & PKT_TX_TCP_SEG) &&
+ if ((m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) &&
(m->tso_segsz < FM10K_TSO_MINMSS)) {
rte_errno = EINVAL;
return i;
#define RXEFLAG_SHIFT (13)
/* IPE/L4E flag shift */
#define L3L4EFLAG_SHIFT (14)
-/* shift PKT_RX_L4_CKSUM_GOOD into one byte by 1 bit */
+/* shift RTE_MBUF_F_RX_L4_CKSUM_GOOD into one byte by 1 bit */
#define CKSUM_SHIFT (1)
static inline void
const __m128i pkttype_msk = _mm_set_epi16(
0x0000, 0x0000, 0x0000, 0x0000,
- PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
- PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
- PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
- PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED);
+ RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
+ RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
+ RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
+ RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED);
/* mask everything except rss type */
const __m128i rsstype_msk = _mm_set_epi16(
const __m128i l3l4cksum_flag = _mm_set_epi8(0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
- (PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD) >> CKSUM_SHIFT,
- (PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD) >> CKSUM_SHIFT,
- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> CKSUM_SHIFT,
- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> CKSUM_SHIFT);
+ (RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD) >> CKSUM_SHIFT,
+ (RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD) >> CKSUM_SHIFT,
+ (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD) >> CKSUM_SHIFT,
+ (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD) >> CKSUM_SHIFT);
const __m128i rxe_flag = _mm_set_epi8(0, 0, 0, 0,
0, 0, 0, 0,
/* map rss type to rss hash flag */
const __m128i rss_flags = _mm_set_epi8(0, 0, 0, 0,
- 0, 0, 0, PKT_RX_RSS_HASH,
- PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH, 0,
- PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, 0);
+ 0, 0, 0, RTE_MBUF_F_RX_RSS_HASH,
+ RTE_MBUF_F_RX_RSS_HASH, 0, RTE_MBUF_F_RX_RSS_HASH, 0,
+ RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH,
+ RTE_MBUF_F_RX_RSS_HASH, 0);
/* Calculate RSS_hash and Vlan fields */
ptype0 = _mm_unpacklo_epi16(descs[0], descs[1]);
rss_type = HINIC_GET_RSS_TYPES(offload_type);
if (likely(rss_type != 0)) {
*rss_hash = cqe_hass_val;
- return PKT_RX_RSS_HASH;
+ return RTE_MBUF_F_RX_RSS_HASH;
}
return 0;
struct hinic_nic_dev *nic_dev = rxq->nic_dev;
if (unlikely(!(nic_dev->rx_csum_en & HINIC_RX_CSUM_OFFLOAD_EN)))
- return PKT_RX_IP_CKSUM_UNKNOWN;
+ return RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN;
/* most case checksum is ok */
checksum_err = HINIC_GET_RX_CSUM_ERR(status);
if (likely(checksum_err == 0))
- return (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
+ return (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD);
/* If BYPASS bit set, all other status indications should be ignored */
if (unlikely(HINIC_CSUM_ERR_BYPASSED(checksum_err)))
- return PKT_RX_IP_CKSUM_UNKNOWN;
+ return RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN;
flags = 0;
/* IP checksum error */
if (HINIC_CSUM_ERR_IP(checksum_err))
- flags |= PKT_RX_IP_CKSUM_BAD;
+ flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
else
- flags |= PKT_RX_IP_CKSUM_GOOD;
+ flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
/* L4 checksum error */
if (HINIC_CSUM_ERR_L4(checksum_err))
- flags |= PKT_RX_L4_CKSUM_BAD;
+ flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
else
- flags |= PKT_RX_L4_CKSUM_GOOD;
+ flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
if (unlikely(HINIC_CSUM_ERR_OTHER(checksum_err)))
- flags = PKT_RX_L4_CKSUM_NONE;
+ flags = RTE_MBUF_F_RX_L4_CKSUM_NONE;
rxq->rxq_stats.errors++;
*vlan_tci = vlan_tag;
- return PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+ return RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
}
static inline u32 hinic_rx_alloc_mbuf_bulk(struct hinic_rxq *rxq,
/* lro offload */
lro_num = HINIC_GET_RX_NUM_LRO(cqe.status);
if (unlikely(lro_num != 0)) {
- rxm->ol_flags |= PKT_RX_LRO;
+ rxm->ol_flags |= RTE_MBUF_F_RX_LRO;
rxm->tso_segsz = pkt_len / lro_num;
}
task->pkt_info2 = 0;
/* Base VLAN */
- if (unlikely(ol_flags & PKT_TX_VLAN_PKT)) {
+ if (unlikely(ol_flags & RTE_MBUF_F_TX_VLAN_PKT)) {
vlan_tag = mbuf->vlan_tci;
hinic_set_vlan_tx_offload(task, queue_info, vlan_tag,
vlan_tag >> VLAN_PRIO_SHIFT);
if (unlikely(!(ol_flags & HINIC_TX_CKSUM_OFFLOAD_MASK)))
return;
- if ((ol_flags & PKT_TX_TCP_SEG))
+ if ((ol_flags & RTE_MBUF_F_TX_TCP_SEG))
/* set tso info for task and qsf */
hinic_set_tso_info(task, queue_info, mbuf, tx_off_info);
else /* just support l4 checksum offload */
psd_hdr.dst_addr = ipv4_hdr->dst_addr;
psd_hdr.zero = 0;
psd_hdr.proto = ipv4_hdr->next_proto_id;
- if (ol_flags & PKT_TX_TCP_SEG) {
+ if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
psd_hdr.len = 0;
} else {
psd_hdr.len =
} psd_hdr;
psd_hdr.proto = (ipv6_hdr->proto << 24);
- if (ol_flags & PKT_TX_TCP_SEG)
+ if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
psd_hdr.len = 0;
else
psd_hdr.len = ipv6_hdr->payload_len;
{
uint64_t ol_flags = m->ol_flags;
- if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM)
+ if ((ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_UDP_CKSUM)
off_info->payload_offset = m->outer_l2_len + m->outer_l3_len +
m->l2_len + m->l3_len;
- else if ((ol_flags & PKT_TX_TCP_CKSUM) || (ol_flags & PKT_TX_TCP_SEG))
+ else if ((ol_flags & RTE_MBUF_F_TX_TCP_CKSUM) || (ol_flags & RTE_MBUF_F_TX_TCP_SEG))
off_info->payload_offset = m->outer_l2_len + m->outer_l3_len +
m->l2_len + m->l3_len + m->l4_len;
}
{
uint64_t ol_flags = m->ol_flags;
- if (((ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM) ||
- ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_SCTP_CKSUM))
+ if (((ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_UDP_CKSUM) ||
+ ((ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_SCTP_CKSUM))
off_info->payload_offset = m->l2_len + m->l3_len;
- else if ((ol_flags & PKT_TX_TCP_CKSUM) || (ol_flags & PKT_TX_TCP_SEG))
+ else if ((ol_flags & RTE_MBUF_F_TX_TCP_CKSUM) || (ol_flags & RTE_MBUF_F_TX_TCP_SEG))
off_info->payload_offset = m->l2_len + m->l3_len +
m->l4_len;
}
uint8_t l3_type;
uint64_t ol_flags = mbuf->ol_flags;
- if (ol_flags & PKT_TX_IPV4)
- l3_type = (ol_flags & PKT_TX_IP_CKSUM) ?
+ if (ol_flags & RTE_MBUF_F_TX_IPV4)
+ l3_type = (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) ?
IPV4_PKT_WITH_CHKSUM_OFFLOAD :
IPV4_PKT_NO_CHKSUM_OFFLOAD;
- else if (ol_flags & PKT_TX_IPV6)
+ else if (ol_flags & RTE_MBUF_F_TX_IPV6)
l3_type = IPV6_PKT;
else
l3_type = UNKNOWN_L3TYPE;
struct rte_tcp_hdr *tcp_hdr;
uint64_t ol_flags = mbuf->ol_flags;
- if (ol_flags & PKT_TX_IPV4) {
+ if (ol_flags & RTE_MBUF_F_TX_IPV4) {
ipv4_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_ipv4_hdr *,
inner_l3_offset);
- if (ol_flags & PKT_TX_IP_CKSUM)
+ if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
ipv4_hdr->hdr_checksum = 0;
tcp_hdr = (struct rte_tcp_hdr *)((char *)ipv4_hdr +
struct rte_udp_hdr *udp_hdr;
uint64_t ol_flags = mbuf->ol_flags;
- if (ol_flags & PKT_TX_IPV4) {
+ if (ol_flags & RTE_MBUF_F_TX_IPV4) {
ipv4_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_ipv4_hdr *,
inner_l3_offset);
- if (ol_flags & PKT_TX_IP_CKSUM)
+ if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
ipv4_hdr->hdr_checksum = 0;
udp_hdr = (struct rte_udp_hdr *)((char *)ipv4_hdr +
{
uint64_t ol_flags = mbuf->ol_flags;
- switch (ol_flags & PKT_TX_L4_MASK) {
- case PKT_TX_UDP_CKSUM:
+ switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
+ case RTE_MBUF_F_TX_UDP_CKSUM:
hinic_calculate_udp_checksum(mbuf, off_info, inner_l3_offset);
break;
- case PKT_TX_TCP_CKSUM:
+ case RTE_MBUF_F_TX_TCP_CKSUM:
hinic_calculate_tcp_checksum(mbuf, off_info, inner_l3_offset);
break;
- case PKT_TX_SCTP_CKSUM:
+ case RTE_MBUF_F_TX_SCTP_CKSUM:
hinic_calculate_sctp_checksum(off_info);
break;
default:
- if (ol_flags & PKT_TX_TCP_SEG)
+ if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
hinic_calculate_tcp_checksum(mbuf, off_info,
inner_l3_offset);
break;
return 0;
/* Support only vxlan offload */
- if (unlikely((ol_flags & PKT_TX_TUNNEL_MASK) &&
- !(ol_flags & PKT_TX_TUNNEL_VXLAN)))
+ if (unlikely((ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) &&
+ !(ol_flags & RTE_MBUF_F_TX_TUNNEL_VXLAN)))
return -ENOTSUP;
#ifdef RTE_LIBRTE_ETHDEV_DEBUG
return -EINVAL;
#endif
- if (ol_flags & PKT_TX_TUNNEL_VXLAN) {
+ if (ol_flags & RTE_MBUF_F_TX_TUNNEL_VXLAN) {
off_info->tunnel_type = TUNNEL_UDP_NO_CSUM;
/* inner_l4_tcp_udp csum should be set to calculate outer
*/
off_info->inner_l4_tcp_udp = 1;
- if ((ol_flags & PKT_TX_OUTER_IP_CKSUM) ||
- (ol_flags & PKT_TX_OUTER_IPV6) ||
- (ol_flags & PKT_TX_TCP_SEG)) {
+ if ((ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) ||
+ (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6) ||
+ (ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
inner_l3_offset = m->l2_len + m->outer_l2_len +
m->outer_l3_len;
off_info->outer_l2_len = m->outer_l2_len;
sqe_info->cpy_mbuf_cnt = 0;
/* non tso mbuf */
- if (likely(!(mbuf_pkt->ol_flags & PKT_TX_TCP_SEG))) {
+ if (likely(!(mbuf_pkt->ol_flags & RTE_MBUF_F_TX_TCP_SEG))) {
if (unlikely(mbuf_pkt->pkt_len > MAX_SINGLE_SGE_SIZE)) {
/* non tso packet len must less than 64KB */
return false;
#define HINIC_GET_WQ_TAIL(txq) \
((txq)->wq->queue_buf_vaddr + (txq)->wq->wq_buf_size)
-#define HINIC_TX_CKSUM_OFFLOAD_MASK ( \
- PKT_TX_IP_CKSUM | \
- PKT_TX_TCP_CKSUM | \
- PKT_TX_UDP_CKSUM | \
- PKT_TX_SCTP_CKSUM | \
- PKT_TX_OUTER_IP_CKSUM | \
- PKT_TX_TCP_SEG)
+#define HINIC_TX_CKSUM_OFFLOAD_MASK (RTE_MBUF_F_TX_IP_CKSUM | \
+ RTE_MBUF_F_TX_TCP_CKSUM | \
+ RTE_MBUF_F_TX_UDP_CKSUM | \
+ RTE_MBUF_F_TX_SCTP_CKSUM | \
+ RTE_MBUF_F_TX_OUTER_IP_CKSUM | \
+ RTE_MBUF_F_TX_TCP_SEG)
enum sq_wqe_type {
SQ_NORMAL_WQE = 0,
* - HNS3_SPECIAL_PORT_SW_CKSUM_MODE
* In this mode, HW can not do checksum for special UDP port like
* 4789, 4790, 6081 for non-tunnel UDP packets and UDP tunnel
- * packets without the PKT_TX_TUNEL_MASK in the mbuf. So, PMD need
+ * packets without the RTE_MBUF_F_TX_TUNEL_MASK in the mbuf. So, PMD need
* do the checksum for these packets to avoid a checksum error.
*
* - HNS3_SPECIAL_PORT_HW_CKSUM_MODE
mb->vlan_tci = 0;
return;
case HNS3_INNER_STRP_VLAN_VLD:
- mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+ mb->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
mb->vlan_tci = rte_le_to_cpu_16(rxd->rx.vlan_tag);
return;
case HNS3_OUTER_STRP_VLAN_VLD:
- mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+ mb->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
mb->vlan_tci = rte_le_to_cpu_16(rxd->rx.ot_vlan_tag);
return;
default:
struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(rxq->hns);
uint64_t timestamp = rte_le_to_cpu_64(rxd->timestamp);
- mbuf->ol_flags |= PKT_RX_IEEE1588_PTP | PKT_RX_IEEE1588_TMST;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP | RTE_MBUF_F_RX_IEEE1588_TMST;
if (hns3_timestamp_rx_dynflag > 0) {
*RTE_MBUF_DYNFIELD(mbuf, hns3_timestamp_dynfield_offset,
rte_mbuf_timestamp_t *) = timestamp;
rxm->data_len = rxm->pkt_len;
rxm->port = rxq->port_id;
rxm->hash.rss = rte_le_to_cpu_32(rxd.rx.rss_hash);
- rxm->ol_flags |= PKT_RX_RSS_HASH;
+ rxm->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
if (unlikely(bd_base_info & BIT(HNS3_RXD_LUM_B))) {
rxm->hash.fdir.hi =
rte_le_to_cpu_16(rxd.rx.fd_id);
- rxm->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+ rxm->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
}
rxm->nb_segs = 1;
rxm->next = NULL;
rxm->packet_type = hns3_rx_calc_ptype(rxq, l234_info, ol_info);
if (rxm->packet_type == RTE_PTYPE_L2_ETHER_TIMESYNC)
- rxm->ol_flags |= PKT_RX_IEEE1588_PTP;
+ rxm->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP;
hns3_rxd_to_vlan_tci(rxq, rxm, l234_info, &rxd);
first_seg->port = rxq->port_id;
first_seg->hash.rss = rte_le_to_cpu_32(rxd.rx.rss_hash);
- first_seg->ol_flags = PKT_RX_RSS_HASH;
+ first_seg->ol_flags = RTE_MBUF_F_RX_RSS_HASH;
if (unlikely(bd_base_info & BIT(HNS3_RXD_LUM_B))) {
first_seg->hash.fdir.hi =
rte_le_to_cpu_16(rxd.rx.fd_id);
- first_seg->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+ first_seg->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
}
gro_size = hns3_get_field(bd_base_info, HNS3_RXD_GRO_SIZE_M,
HNS3_RXD_GRO_SIZE_S);
if (gro_size != 0) {
- first_seg->ol_flags |= PKT_RX_LRO;
+ first_seg->ol_flags |= RTE_MBUF_F_RX_LRO;
first_seg->tso_segsz = gro_size;
}
l234_info, ol_info);
if (first_seg->packet_type == RTE_PTYPE_L2_ETHER_TIMESYNC)
- rxm->ol_flags |= PKT_RX_IEEE1588_PTP;
+ rxm->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP;
hns3_rxd_to_vlan_tci(rxq, first_seg, l234_info, &rxd);
static inline bool
hns3_pkt_is_tso(struct rte_mbuf *m)
{
- return (m->tso_segsz != 0 && m->ol_flags & PKT_TX_TCP_SEG);
+ return (m->tso_segsz != 0 && m->ol_flags & RTE_MBUF_F_TX_TCP_SEG);
}
static void
uint32_t paylen;
hdr_len = rxm->l2_len + rxm->l3_len + rxm->l4_len;
- hdr_len += (ol_flags & PKT_TX_TUNNEL_MASK) ?
+ hdr_len += (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ?
rxm->outer_l2_len + rxm->outer_l3_len : 0;
paylen = rxm->pkt_len - hdr_len;
desc->tx.paylen_fd_dop_ol4cs |= rte_cpu_to_le_32(paylen);
* To avoid the VLAN of Tx descriptor is overwritten by PVID, it should
* be added to the position close to the IP header when PVID is enabled.
*/
- if (!txq->pvid_sw_shift_en && ol_flags & (PKT_TX_VLAN_PKT |
- PKT_TX_QINQ_PKT)) {
+ if (!txq->pvid_sw_shift_en && ol_flags & (RTE_MBUF_F_TX_VLAN_PKT |
+ RTE_MBUF_F_TX_QINQ_PKT)) {
desc->tx.ol_type_vlan_len_msec |=
rte_cpu_to_le_32(BIT(HNS3_TXD_OVLAN_B));
- if (ol_flags & PKT_TX_QINQ_PKT)
+ if (ol_flags & RTE_MBUF_F_TX_QINQ_PKT)
desc->tx.outer_vlan_tag =
rte_cpu_to_le_16(rxm->vlan_tci_outer);
else
rte_cpu_to_le_16(rxm->vlan_tci);
}
- if (ol_flags & PKT_TX_QINQ_PKT ||
- ((ol_flags & PKT_TX_VLAN_PKT) && txq->pvid_sw_shift_en)) {
+ if (ol_flags & RTE_MBUF_F_TX_QINQ_PKT ||
+ ((ol_flags & RTE_MBUF_F_TX_VLAN_PKT) && txq->pvid_sw_shift_en)) {
desc->tx.type_cs_vlan_tso_len |=
rte_cpu_to_le_32(BIT(HNS3_TXD_VLAN_B));
desc->tx.vlan_tag = rte_cpu_to_le_16(rxm->vlan_tci);
}
- if (ol_flags & PKT_TX_IEEE1588_TMST)
+ if (ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST)
desc->tx.tp_fe_sc_vld_ra_ri |=
rte_cpu_to_le_16(BIT(HNS3_TXD_TSYN_B));
}
uint64_t ol_flags = m->ol_flags;
/* (outer) IP header type */
- if (ol_flags & PKT_TX_OUTER_IPV4) {
- if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
+ if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) {
+ if (ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM)
tmp |= hns3_gen_field_val(HNS3_TXD_OL3T_M,
HNS3_TXD_OL3T_S, HNS3_OL3T_IPV4_CSUM);
else
tmp |= hns3_gen_field_val(HNS3_TXD_OL3T_M,
HNS3_TXD_OL3T_S, HNS3_OL3T_IPV4_NO_CSUM);
- } else if (ol_flags & PKT_TX_OUTER_IPV6) {
+ } else if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6) {
tmp |= hns3_gen_field_val(HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
HNS3_OL3T_IPV6);
}
uint64_t ol_flags = m->ol_flags;
uint16_t inner_l2_len;
- switch (ol_flags & PKT_TX_TUNNEL_MASK) {
- case PKT_TX_TUNNEL_VXLAN_GPE:
- case PKT_TX_TUNNEL_GENEVE:
- case PKT_TX_TUNNEL_VXLAN:
+ switch (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
+ case RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE:
+ case RTE_MBUF_F_TX_TUNNEL_GENEVE:
+ case RTE_MBUF_F_TX_TUNNEL_VXLAN:
/* MAC in UDP tunnelling packet, include VxLAN and GENEVE */
tmp_outer |= hns3_gen_field_val(HNS3_TXD_TUNTYPE_M,
HNS3_TXD_TUNTYPE_S, HNS3_TUN_MAC_IN_UDP);
inner_l2_len = m->l2_len - RTE_ETHER_VXLAN_HLEN;
break;
- case PKT_TX_TUNNEL_GRE:
+ case RTE_MBUF_F_TX_TUNNEL_GRE:
tmp_outer |= hns3_gen_field_val(HNS3_TXD_TUNTYPE_M,
HNS3_TXD_TUNTYPE_S, HNS3_TUN_NVGRE);
/*
* calculations, the length of the L2 header include the outer and
* inner, will be filled during the parsing of tunnel packects.
*/
- if (!(ol_flags & PKT_TX_TUNNEL_MASK)) {
+ if (!(ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)) {
/*
* For non tunnel type the tunnel type id is 0, so no need to
* assign a value to it. Only the inner(normal) L2 header length
* calculate the header length.
*/
if (unlikely(!(ol_flags &
- (PKT_TX_OUTER_IP_CKSUM | PKT_TX_OUTER_UDP_CKSUM)) &&
+ (RTE_MBUF_F_TX_OUTER_IP_CKSUM | RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) &&
m->outer_l2_len == 0)) {
struct rte_net_hdr_lens hdr_len;
(void)rte_net_get_ptype(m, &hdr_len,
desc->tx.ol_type_vlan_len_msec = rte_cpu_to_le_32(tmp_outer);
desc->tx.type_cs_vlan_tso_len = rte_cpu_to_le_32(tmp_inner);
- tmp_ol4cs = ol_flags & PKT_TX_OUTER_UDP_CKSUM ?
+ tmp_ol4cs = ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM ?
BIT(HNS3_TXD_OL4CS_B) : 0;
desc->tx.paylen_fd_dop_ol4cs = rte_cpu_to_le_32(tmp_ol4cs);
uint32_t tmp;
tmp = *type_cs_vlan_tso_len;
- if (ol_flags & PKT_TX_IPV4)
+ if (ol_flags & RTE_MBUF_F_TX_IPV4)
l3_type = HNS3_L3T_IPV4;
- else if (ol_flags & PKT_TX_IPV6)
+ else if (ol_flags & RTE_MBUF_F_TX_IPV6)
l3_type = HNS3_L3T_IPV6;
else
l3_type = HNS3_L3T_NONE;
tmp |= hns3_gen_field_val(HNS3_TXD_L3T_M, HNS3_TXD_L3T_S, l3_type);
/* Enable L3 checksum offloads */
- if (ol_flags & PKT_TX_IP_CKSUM)
+ if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
tmp |= BIT(HNS3_TXD_L3CS_B);
*type_cs_vlan_tso_len = tmp;
}
uint64_t ol_flags = m->ol_flags;
uint32_t tmp;
/* Enable L4 checksum offloads */
- switch (ol_flags & (PKT_TX_L4_MASK | PKT_TX_TCP_SEG)) {
- case PKT_TX_TCP_CKSUM | PKT_TX_TCP_SEG:
- case PKT_TX_TCP_CKSUM:
- case PKT_TX_TCP_SEG:
+ switch (ol_flags & (RTE_MBUF_F_TX_L4_MASK | RTE_MBUF_F_TX_TCP_SEG)) {
+ case RTE_MBUF_F_TX_TCP_CKSUM | RTE_MBUF_F_TX_TCP_SEG:
+ case RTE_MBUF_F_TX_TCP_CKSUM:
+ case RTE_MBUF_F_TX_TCP_SEG:
tmp = *type_cs_vlan_tso_len;
tmp |= hns3_gen_field_val(HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
HNS3_L4T_TCP);
break;
- case PKT_TX_UDP_CKSUM:
+ case RTE_MBUF_F_TX_UDP_CKSUM:
tmp = *type_cs_vlan_tso_len;
tmp |= hns3_gen_field_val(HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
HNS3_L4T_UDP);
break;
- case PKT_TX_SCTP_CKSUM:
+ case RTE_MBUF_F_TX_SCTP_CKSUM:
tmp = *type_cs_vlan_tso_len;
tmp |= hns3_gen_field_val(HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
HNS3_L4T_SCTP);
/* ensure the first 8 frags is greater than mss + header */
hdr_len = tx_pkts->l2_len + tx_pkts->l3_len + tx_pkts->l4_len;
- hdr_len += (tx_pkts->ol_flags & PKT_TX_TUNNEL_MASK) ?
+ hdr_len += (tx_pkts->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ?
tx_pkts->outer_l2_len + tx_pkts->outer_l3_len : 0;
if (tot_len + m_last->data_len < tx_pkts->tso_segsz + hdr_len)
return true;
struct rte_ipv4_hdr *ipv4_hdr;
ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
m->outer_l2_len);
- if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
+ if (ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM)
ipv4_hdr->hdr_checksum = 0;
- if (ol_flags & PKT_TX_OUTER_UDP_CKSUM) {
+ if (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM) {
struct rte_udp_hdr *udp_hdr;
/*
* If OUTER_UDP_CKSUM is support, HW can caclulate the pseudo
* header for TSO packets
*/
- if (ol_flags & PKT_TX_TCP_SEG)
+ if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
return true;
udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *,
m->outer_l2_len + m->outer_l3_len);
struct rte_ipv6_hdr *ipv6_hdr;
ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
m->outer_l2_len);
- if (ol_flags & PKT_TX_OUTER_UDP_CKSUM) {
+ if (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM) {
struct rte_udp_hdr *udp_hdr;
/*
* If OUTER_UDP_CKSUM is support, HW can caclulate the pseudo
* header for TSO packets
*/
- if (ol_flags & PKT_TX_TCP_SEG)
+ if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
return true;
udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *,
m->outer_l2_len + m->outer_l3_len);
uint32_t paylen, hdr_len, l4_proto;
struct rte_udp_hdr *udp_hdr;
- if (!(ol_flags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6)))
+ if (!(ol_flags & (RTE_MBUF_F_TX_OUTER_IPV4 | RTE_MBUF_F_TX_OUTER_IPV6)))
return;
- if (ol_flags & PKT_TX_OUTER_IPV4) {
+ if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) {
if (hns3_outer_ipv4_cksum_prepared(m, ol_flags, &l4_proto))
return;
} else {
}
/* driver should ensure the outer udp cksum is 0 for TUNNEL TSO */
- if (l4_proto == IPPROTO_UDP && (ol_flags & PKT_TX_TCP_SEG)) {
+ if (l4_proto == IPPROTO_UDP && (ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
hdr_len = m->l2_len + m->l3_len + m->l4_len;
hdr_len += m->outer_l2_len + m->outer_l3_len;
paylen = m->pkt_len - hdr_len;
return -EINVAL;
hdr_len = m->l2_len + m->l3_len + m->l4_len;
- hdr_len += (m->ol_flags & PKT_TX_TUNNEL_MASK) ?
+ hdr_len += (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ?
m->outer_l2_len + m->outer_l3_len : 0;
if (hdr_len > HNS3_MAX_TSO_HDR_SIZE)
return -EINVAL;
* implementation function named hns3_prep_pkts to inform users that
* these packets will be discarded.
*/
- if (m->ol_flags & PKT_TX_QINQ_PKT)
+ if (m->ol_flags & RTE_MBUF_F_TX_QINQ_PKT)
return -EINVAL;
eh = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
if (eh->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN)) {
- if (m->ol_flags & PKT_TX_VLAN_PKT)
+ if (m->ol_flags & RTE_MBUF_F_TX_VLAN_PKT)
return -EINVAL;
/* Ensure the incoming packet is not a QinQ packet */
uint16_t cksum = 0;
uint32_t l4_len;
- if (ol_flags & PKT_TX_IPV4) {
+ if (ol_flags & RTE_MBUF_F_TX_IPV4) {
struct rte_ipv4_hdr *ipv4_hdr = rte_pktmbuf_mtod_offset(m,
struct rte_ipv4_hdr *, m->l2_len);
l4_len = rte_be_to_cpu_16(ipv4_hdr->total_length) - m->l3_len;
uint16_t dst_port;
if (tx_queue->udp_cksum_mode == HNS3_SPECIAL_PORT_HW_CKSUM_MODE ||
- ol_flags & PKT_TX_TUNNEL_MASK ||
- (ol_flags & PKT_TX_L4_MASK) != PKT_TX_UDP_CKSUM)
+ ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK ||
+ (ol_flags & RTE_MBUF_F_TX_L4_MASK) != RTE_MBUF_F_TX_UDP_CKSUM)
return true;
/*
* A UDP packet with the same dst_port as VXLAN\VXLAN_GPE\GENEVE will
case RTE_VXLAN_GPE_DEFAULT_PORT:
case RTE_GENEVE_DEFAULT_PORT:
udp_hdr->dgram_cksum = hns3_udp_cksum_help(m);
- m->ol_flags = ol_flags & ~PKT_TX_L4_MASK;
+ m->ol_flags = ol_flags & ~RTE_MBUF_F_TX_L4_MASK;
return false;
default:
return true;
* - HNS3_SPECIAL_PORT_SW_CKSUM_MODE
* In this mode, HW can not do checksum for special UDP port like
* 4789, 4790, 6081 for non-tunnel UDP packets and UDP tunnel
- * packets without the PKT_TX_TUNEL_MASK in the mbuf. So, PMD need
+ * packets without the RTE_MBUF_F_TX_TUNEL_MASK in the mbuf. So, PMD need
* do the checksum for these packets to avoid a checksum error.
*
* - HNS3_SPECIAL_PORT_HW_CKSUM_MODE
unsigned int socket_id;
};
-#define HNS3_TX_CKSUM_OFFLOAD_MASK ( \
- PKT_TX_OUTER_UDP_CKSUM | \
- PKT_TX_OUTER_IP_CKSUM | \
- PKT_TX_IP_CKSUM | \
- PKT_TX_TCP_SEG | \
- PKT_TX_L4_MASK)
+#define HNS3_TX_CKSUM_OFFLOAD_MASK (RTE_MBUF_F_TX_OUTER_UDP_CKSUM | \
+ RTE_MBUF_F_TX_OUTER_IP_CKSUM | \
+ RTE_MBUF_F_TX_IP_CKSUM | \
+ RTE_MBUF_F_TX_TCP_SEG | \
+ RTE_MBUF_F_TX_L4_MASK)
enum hns3_cksum_status {
HNS3_CKSUM_NONE = 0,
BIT(HNS3_RXD_OL4E_B))
if (likely((l234_info & HNS3_RXD_CKSUM_ERR_MASK) == 0)) {
- rxm->ol_flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
+ rxm->ol_flags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD);
return;
}
if (unlikely(l234_info & BIT(HNS3_RXD_L3E_B))) {
- rxm->ol_flags |= PKT_RX_IP_CKSUM_BAD;
+ rxm->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
rxq->dfx_stats.l3_csum_errors++;
} else {
- rxm->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+ rxm->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
}
if (unlikely(l234_info & BIT(HNS3_RXD_L4E_B))) {
- rxm->ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ rxm->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
rxq->dfx_stats.l4_csum_errors++;
} else {
- rxm->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ rxm->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
}
if (unlikely(l234_info & BIT(HNS3_RXD_OL3E_B)))
rxq->dfx_stats.ol3_csum_errors++;
if (unlikely(l234_info & BIT(HNS3_RXD_OL4E_B))) {
- rxm->ol_flags |= PKT_RX_OUTER_L4_CKSUM_BAD;
+ rxm->ol_flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD;
rxq->dfx_stats.ol4_csum_errors++;
}
}
pkt = sw_ring[i].mbuf;
/* init rte_mbuf.rearm_data last 64-bit */
- pkt->ol_flags = PKT_RX_RSS_HASH;
+ pkt->ol_flags = RTE_MBUF_F_RX_RSS_HASH;
l234_info = rxdp[i].rx.l234_info;
ol_info = rxdp[i].rx.ol_info;
for (i = 0; i < (int)bd_vld_num; i++) {
/* init rte_mbuf.rearm_data last 64-bit */
- rx_pkts[i]->ol_flags = PKT_RX_RSS_HASH;
+ rx_pkts[i]->ol_flags = RTE_MBUF_F_RX_RSS_HASH;
ret = hns3_handle_bdinfo(rxq, rx_pkts[i], key->bd_base_info[i],
key->l234_info[i]);
#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
#ifdef RTE_LIBRTE_IEEE1588
-#define I40E_TX_IEEE1588_TMST PKT_TX_IEEE1588_TMST
+#define I40E_TX_IEEE1588_TMST RTE_MBUF_F_TX_IEEE1588_TMST
#else
#define I40E_TX_IEEE1588_TMST 0
#endif
-#define I40E_TX_CKSUM_OFFLOAD_MASK ( \
- PKT_TX_IP_CKSUM | \
- PKT_TX_L4_MASK | \
- PKT_TX_TCP_SEG | \
- PKT_TX_OUTER_IP_CKSUM)
-
-#define I40E_TX_OFFLOAD_MASK ( \
- PKT_TX_OUTER_IPV4 | \
- PKT_TX_OUTER_IPV6 | \
- PKT_TX_IPV4 | \
- PKT_TX_IPV6 | \
- PKT_TX_IP_CKSUM | \
- PKT_TX_L4_MASK | \
- PKT_TX_OUTER_IP_CKSUM | \
- PKT_TX_TCP_SEG | \
- PKT_TX_QINQ_PKT | \
- PKT_TX_VLAN_PKT | \
- PKT_TX_TUNNEL_MASK | \
+#define I40E_TX_CKSUM_OFFLOAD_MASK (RTE_MBUF_F_TX_IP_CKSUM | \
+ RTE_MBUF_F_TX_L4_MASK | \
+ RTE_MBUF_F_TX_TCP_SEG | \
+ RTE_MBUF_F_TX_OUTER_IP_CKSUM)
+
+#define I40E_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_OUTER_IPV4 | \
+ RTE_MBUF_F_TX_OUTER_IPV6 | \
+ RTE_MBUF_F_TX_IPV4 | \
+ RTE_MBUF_F_TX_IPV6 | \
+ RTE_MBUF_F_TX_IP_CKSUM | \
+ RTE_MBUF_F_TX_L4_MASK | \
+ RTE_MBUF_F_TX_OUTER_IP_CKSUM | \
+ RTE_MBUF_F_TX_TCP_SEG | \
+ RTE_MBUF_F_TX_QINQ_PKT | \
+ RTE_MBUF_F_TX_VLAN_PKT | \
+ RTE_MBUF_F_TX_TUNNEL_MASK | \
I40E_TX_IEEE1588_TMST)
#define I40E_TX_OFFLOAD_NOTSUP_MASK \
- (PKT_TX_OFFLOAD_MASK ^ I40E_TX_OFFLOAD_MASK)
+ (RTE_MBUF_F_TX_OFFLOAD_MASK ^ I40E_TX_OFFLOAD_MASK)
-#define I40E_TX_OFFLOAD_SIMPLE_SUP_MASK ( \
- PKT_TX_IPV4 | \
- PKT_TX_IPV6 | \
- PKT_TX_OUTER_IPV4 | \
- PKT_TX_OUTER_IPV6)
+#define I40E_TX_OFFLOAD_SIMPLE_SUP_MASK (RTE_MBUF_F_TX_IPV4 | \
+ RTE_MBUF_F_TX_IPV6 | \
+ RTE_MBUF_F_TX_OUTER_IPV4 | \
+ RTE_MBUF_F_TX_OUTER_IPV6)
#define I40E_TX_OFFLOAD_SIMPLE_NOTSUP_MASK \
- (PKT_TX_OFFLOAD_MASK ^ I40E_TX_OFFLOAD_SIMPLE_SUP_MASK)
+ (RTE_MBUF_F_TX_OFFLOAD_MASK ^ I40E_TX_OFFLOAD_SIMPLE_SUP_MASK)
static int
i40e_monitor_callback(const uint64_t value,
{
if (rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
(1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
- mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+ mb->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
mb->vlan_tci =
rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1);
PMD_RX_LOG(DEBUG, "Descriptor l2tag1: %u",
#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
if (rte_le_to_cpu_16(rxdp->wb.qword2.ext_status) &
(1 << I40E_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT)) {
- mb->ol_flags |= PKT_RX_QINQ_STRIPPED | PKT_RX_QINQ |
- PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN;
+ mb->ol_flags |= RTE_MBUF_F_RX_QINQ_STRIPPED | RTE_MBUF_F_RX_QINQ |
+ RTE_MBUF_F_RX_VLAN_STRIPPED | RTE_MBUF_F_RX_VLAN;
mb->vlan_tci_outer = mb->vlan_tci;
mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.qword2.l2tag2_2);
PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
/* Check if RSS_HASH */
flags = (((qword >> I40E_RX_DESC_STATUS_FLTSTAT_SHIFT) &
I40E_RX_DESC_FLTSTAT_RSS_HASH) ==
- I40E_RX_DESC_FLTSTAT_RSS_HASH) ? PKT_RX_RSS_HASH : 0;
+ I40E_RX_DESC_FLTSTAT_RSS_HASH) ? RTE_MBUF_F_RX_RSS_HASH : 0;
/* Check if FDIR Match */
flags |= (qword & (1 << I40E_RX_DESC_STATUS_FLM_SHIFT) ?
- PKT_RX_FDIR : 0);
+ RTE_MBUF_F_RX_FDIR : 0);
return flags;
}
#define I40E_RX_ERR_BITS 0x3f
if (likely((error_bits & I40E_RX_ERR_BITS) == 0)) {
- flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
+ flags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD);
return flags;
}
if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_IPE_SHIFT)))
- flags |= PKT_RX_IP_CKSUM_BAD;
+ flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
else
- flags |= PKT_RX_IP_CKSUM_GOOD;
+ flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT)))
- flags |= PKT_RX_L4_CKSUM_BAD;
+ flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
else
- flags |= PKT_RX_L4_CKSUM_GOOD;
+ flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT)))
- flags |= PKT_RX_OUTER_IP_CKSUM_BAD;
+ flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
return flags;
}
if ((mb->packet_type & RTE_PTYPE_L2_MASK)
== RTE_PTYPE_L2_ETHER_TIMESYNC)
- pkt_flags = PKT_RX_IEEE1588_PTP;
+ pkt_flags = RTE_MBUF_F_RX_IEEE1588_PTP;
if (tsyn & 0x04) {
- pkt_flags |= PKT_RX_IEEE1588_TMST;
+ pkt_flags |= RTE_MBUF_F_RX_IEEE1588_TMST;
mb->timesync = tsyn & 0x03;
}
if (flexbh == I40E_RX_DESC_EXT_STATUS_FLEXBH_FD_ID) {
mb->hash.fdir.hi =
rte_le_to_cpu_32(rxdp->wb.qword3.hi_dword.fd_id);
- flags |= PKT_RX_FDIR_ID;
+ flags |= RTE_MBUF_F_RX_FDIR_ID;
} else if (flexbh == I40E_RX_DESC_EXT_STATUS_FLEXBH_FLEX) {
mb->hash.fdir.hi =
rte_le_to_cpu_32(rxdp->wb.qword3.hi_dword.flex_bytes_hi);
- flags |= PKT_RX_FDIR_FLX;
+ flags |= RTE_MBUF_F_RX_FDIR_FLX;
}
if (flexbl == I40E_RX_DESC_EXT_STATUS_FLEXBL_FLEX) {
mb->hash.fdir.lo =
rte_le_to_cpu_32(rxdp->wb.qword3.lo_dword.flex_bytes_lo);
- flags |= PKT_RX_FDIR_FLX;
+ flags |= RTE_MBUF_F_RX_FDIR_FLX;
}
#else
mb->hash.fdir.hi =
rte_le_to_cpu_32(rxdp->wb.qword0.hi_dword.fd_id);
- flags |= PKT_RX_FDIR_ID;
+ flags |= RTE_MBUF_F_RX_FDIR_ID;
#endif
return flags;
}
uint32_t *cd_tunneling)
{
/* EIPT: External (outer) IP header type */
- if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
+ if (ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM)
*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
- else if (ol_flags & PKT_TX_OUTER_IPV4)
+ else if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV4)
*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
- else if (ol_flags & PKT_TX_OUTER_IPV6)
+ else if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6)
*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
/* EIPLEN: External (outer) IP header length, in DWords */
I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
/* L4TUNT: L4 Tunneling Type */
- switch (ol_flags & PKT_TX_TUNNEL_MASK) {
- case PKT_TX_TUNNEL_IPIP:
+ switch (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
+ case RTE_MBUF_F_TX_TUNNEL_IPIP:
/* for non UDP / GRE tunneling, set to 00b */
break;
- case PKT_TX_TUNNEL_VXLAN:
- case PKT_TX_TUNNEL_GENEVE:
+ case RTE_MBUF_F_TX_TUNNEL_VXLAN:
+ case RTE_MBUF_F_TX_TUNNEL_GENEVE:
*cd_tunneling |= I40E_TXD_CTX_UDP_TUNNELING;
break;
- case PKT_TX_TUNNEL_GRE:
+ case RTE_MBUF_F_TX_TUNNEL_GRE:
*cd_tunneling |= I40E_TXD_CTX_GRE_TUNNELING;
break;
default:
union i40e_tx_offload tx_offload)
{
/* Set MACLEN */
- if (ol_flags & PKT_TX_TUNNEL_MASK)
+ if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
*td_offset |= (tx_offload.outer_l2_len >> 1)
<< I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
else
<< I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
/* Enable L3 checksum offloads */
- if (ol_flags & PKT_TX_IP_CKSUM) {
+ if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
*td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
*td_offset |= (tx_offload.l3_len >> 2)
<< I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
- } else if (ol_flags & PKT_TX_IPV4) {
+ } else if (ol_flags & RTE_MBUF_F_TX_IPV4) {
*td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4;
*td_offset |= (tx_offload.l3_len >> 2)
<< I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
- } else if (ol_flags & PKT_TX_IPV6) {
+ } else if (ol_flags & RTE_MBUF_F_TX_IPV6) {
*td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
*td_offset |= (tx_offload.l3_len >> 2)
<< I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
}
- if (ol_flags & PKT_TX_TCP_SEG) {
+ if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
*td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
*td_offset |= (tx_offload.l4_len >> 2)
<< I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
}
/* Enable L4 checksum offloads */
- switch (ol_flags & PKT_TX_L4_MASK) {
- case PKT_TX_TCP_CKSUM:
+ switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
+ case RTE_MBUF_F_TX_TCP_CKSUM:
*td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
*td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
break;
- case PKT_TX_SCTP_CKSUM:
+ case RTE_MBUF_F_TX_SCTP_CKSUM:
*td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
*td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
break;
- case PKT_TX_UDP_CKSUM:
+ case RTE_MBUF_F_TX_UDP_CKSUM:
*td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
*td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
ptype_tbl[(uint8_t)((qword1 &
I40E_RXD_QW1_PTYPE_MASK) >>
I40E_RXD_QW1_PTYPE_SHIFT)];
- if (pkt_flags & PKT_RX_RSS_HASH)
+ if (pkt_flags & RTE_MBUF_F_RX_RSS_HASH)
mb->hash.rss = rte_le_to_cpu_32(\
rxdp[j].wb.qword0.hi_dword.rss);
- if (pkt_flags & PKT_RX_FDIR)
+ if (pkt_flags & RTE_MBUF_F_RX_FDIR)
pkt_flags |= i40e_rxd_build_fdir(&rxdp[j], mb);
#ifdef RTE_LIBRTE_IEEE1588
rxm->packet_type =
ptype_tbl[(uint8_t)((qword1 &
I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT)];
- if (pkt_flags & PKT_RX_RSS_HASH)
+ if (pkt_flags & RTE_MBUF_F_RX_RSS_HASH)
rxm->hash.rss =
rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
- if (pkt_flags & PKT_RX_FDIR)
+ if (pkt_flags & RTE_MBUF_F_RX_FDIR)
pkt_flags |= i40e_rxd_build_fdir(&rxd, rxm);
#ifdef RTE_LIBRTE_IEEE1588
first_seg->packet_type =
ptype_tbl[(uint8_t)((qword1 &
I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT)];
- if (pkt_flags & PKT_RX_RSS_HASH)
+ if (pkt_flags & RTE_MBUF_F_RX_RSS_HASH)
first_seg->hash.rss =
rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
- if (pkt_flags & PKT_RX_FDIR)
+ if (pkt_flags & RTE_MBUF_F_RX_FDIR)
pkt_flags |= i40e_rxd_build_fdir(&rxd, first_seg);
#ifdef RTE_LIBRTE_IEEE1588
static inline uint16_t
i40e_calc_context_desc(uint64_t flags)
{
- static uint64_t mask = PKT_TX_OUTER_IP_CKSUM |
- PKT_TX_TCP_SEG |
- PKT_TX_QINQ_PKT |
- PKT_TX_TUNNEL_MASK;
+ static uint64_t mask = RTE_MBUF_F_TX_OUTER_IP_CKSUM |
+ RTE_MBUF_F_TX_TCP_SEG |
+ RTE_MBUF_F_TX_QINQ_PKT |
+ RTE_MBUF_F_TX_TUNNEL_MASK;
#ifdef RTE_LIBRTE_IEEE1588
- mask |= PKT_TX_IEEE1588_TMST;
+ mask |= RTE_MBUF_F_TX_IEEE1588_TMST;
#endif
return (flags & mask) ? 1 : 0;
}
hdr_len = tx_offload.l2_len + tx_offload.l3_len + tx_offload.l4_len;
- hdr_len += (mbuf->ol_flags & PKT_TX_TUNNEL_MASK) ?
+ hdr_len += (mbuf->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ?
tx_offload.outer_l2_len + tx_offload.outer_l3_len : 0;
cd_cmd = I40E_TX_CTX_DESC_TSO;
* the mbuf data size exceeds max data size that hw allows
* per tx desc.
*/
- if (ol_flags & PKT_TX_TCP_SEG)
+ if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
nb_used = (uint16_t)(i40e_calc_pkt_desc(tx_pkt) +
nb_ctx);
else
}
/* Descriptor based VLAN insertion */
- if (ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) {
+ if (ol_flags & (RTE_MBUF_F_TX_VLAN_PKT | RTE_MBUF_F_TX_QINQ_PKT)) {
td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
td_tag = tx_pkt->vlan_tci;
}
/* Fill in tunneling parameters if necessary */
cd_tunneling_params = 0;
- if (ol_flags & PKT_TX_TUNNEL_MASK)
+ if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
i40e_parse_tunneling_params(ol_flags, tx_offload,
&cd_tunneling_params);
/* Enable checksum offloading */
}
/* TSO enabled means no timestamp */
- if (ol_flags & PKT_TX_TCP_SEG)
+ if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
cd_type_cmd_tso_mss |=
i40e_set_tso_ctx(tx_pkt, tx_offload);
else {
#ifdef RTE_LIBRTE_IEEE1588
- if (ol_flags & PKT_TX_IEEE1588_TMST)
+ if (ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST)
cd_type_cmd_tso_mss |=
((uint64_t)I40E_TX_CTX_DESC_TSYN <<
I40E_TXD_CTX_QW1_CMD_SHIFT);
ctx_txd->tunneling_params =
rte_cpu_to_le_32(cd_tunneling_params);
- if (ol_flags & PKT_TX_QINQ_PKT) {
+ if (ol_flags & RTE_MBUF_F_TX_QINQ_PKT) {
cd_l2tag2 = tx_pkt->vlan_tci_outer;
cd_type_cmd_tso_mss |=
((uint64_t)I40E_TX_CTX_DESC_IL2TAG2 <<
slen = m_seg->data_len;
buf_dma_addr = rte_mbuf_data_iova(m_seg);
- while ((ol_flags & PKT_TX_TCP_SEG) &&
+ while ((ol_flags & RTE_MBUF_F_TX_TCP_SEG) &&
unlikely(slen > I40E_MAX_DATA_PER_TXD)) {
txd->buffer_addr =
rte_cpu_to_le_64(buf_dma_addr);
ol_flags = m->ol_flags;
/* Check for m->nb_segs to not exceed the limits. */
- if (!(ol_flags & PKT_TX_TCP_SEG)) {
+ if (!(ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
if (m->nb_segs > I40E_TX_MAX_MTU_SEG ||
m->pkt_len > I40E_FRAME_SIZE_MAX) {
rte_errno = EINVAL;
/* map rss and vlan type to rss hash and vlan flag */
const vector unsigned char vlan_flags = (vector unsigned char){
0, 0, 0, 0,
- PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, 0, 0, 0,
+ RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0};
const vector unsigned char rss_flags = (vector unsigned char){
- 0, PKT_RX_FDIR, 0, 0,
- 0, 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH | PKT_RX_FDIR,
+ 0, RTE_MBUF_F_RX_FDIR, 0, 0,
+ 0, 0, RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_FDIR,
0, 0, 0, 0,
0, 0, 0, 0};
const vector unsigned char l3_l4e_flags = (vector unsigned char){
0,
- PKT_RX_IP_CKSUM_BAD,
- PKT_RX_L4_CKSUM_BAD,
- PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD,
- PKT_RX_OUTER_IP_CKSUM_BAD,
- PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD,
- PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD,
- PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
- | PKT_RX_IP_CKSUM_BAD,
+ RTE_MBUF_F_RX_IP_CKSUM_BAD,
+ RTE_MBUF_F_RX_L4_CKSUM_BAD,
+ RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD,
+ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD,
+ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD,
+ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD,
+ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD
+ | RTE_MBUF_F_RX_IP_CKSUM_BAD,
0, 0, 0, 0, 0, 0, 0, 0};
vlan0 = (vector unsigned int)vec_mergel(descs[0], descs[1]);
* - Position that bit correctly based on packet number
* - OR in the resulting bit to mbuf_flags
*/
- RTE_BUILD_BUG_ON(PKT_RX_FDIR_ID != (1 << 13));
+ RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_FDIR_ID != (1 << 13));
__m256i mbuf_flag_mask = _mm256_set_epi32(0, 0, 0, 1 << 13,
0, 0, 0, 1 << 13);
__m256i desc_flag_bit = _mm256_and_si256(mbuf_flag_mask, fdir_mask);
* destination
*/
const __m256i vlan_flags_shuf = _mm256_set_epi32(
- 0, 0, PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, 0,
- 0, 0, PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, 0);
+ 0, 0,
+ RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED, 0,
+ 0, 0,
+ RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED, 0);
/*
* data to be shuffled by result of flag mask, shifted down 11.
* If RSS/FDIR bits are set, shuffle moves appropriate flags in
*/
const __m256i rss_flags_shuf = _mm256_set_epi8(
0, 0, 0, 0, 0, 0, 0, 0,
- PKT_RX_RSS_HASH | PKT_RX_FDIR, PKT_RX_RSS_HASH, 0, 0,
- 0, 0, PKT_RX_FDIR, 0, /* end up 128-bits */
+ RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_FDIR,
+ RTE_MBUF_F_RX_RSS_HASH, 0, 0,
+ 0, 0, RTE_MBUF_F_RX_FDIR, 0, /* end up 128-bits */
0, 0, 0, 0, 0, 0, 0, 0,
- PKT_RX_RSS_HASH | PKT_RX_FDIR, PKT_RX_RSS_HASH, 0, 0,
- 0, 0, PKT_RX_FDIR, 0);
+ RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_FDIR,
+ RTE_MBUF_F_RX_RSS_HASH, 0, 0,
+ 0, 0, RTE_MBUF_F_RX_FDIR, 0);
/*
* data to be shuffled by the result of the flags mask shifted by 22
*/
const __m256i l3_l4_flags_shuf = _mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
/* shift right 1 bit to make sure it not exceed 255 */
- (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
- PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
- PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
+ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
/* second 128-bits */
0, 0, 0, 0, 0, 0, 0, 0,
- (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
- PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
- PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1);
-
- const __m256i cksum_mask = _mm256_set1_epi32(
- PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_OUTER_IP_CKSUM_BAD);
+ (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
+ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1);
+
+ const __m256i cksum_mask = _mm256_set1_epi32(RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD);
RTE_SET_USED(avx_aligned); /* for 32B descriptors we don't use this */
* order (hi->lo): [1, 3, 5, 7, 0, 2, 4, 6]
* Then OR FDIR flags to mbuf_flags on FDIR ID hit.
*/
- RTE_BUILD_BUG_ON(PKT_RX_FDIR_ID != (1 << 13));
+ RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_FDIR_ID != (1 << 13));
const __m256i pkt_fdir_bit = _mm256_set1_epi32(1 << 13);
const __m256i fdir_mask = _mm256_cmpeq_epi32(fdir, fdir_id);
__m256i fdir_bits = _mm256_and_si256(fdir_mask, pkt_fdir_bit);
* - Position that bit correctly based on packet number
* - OR in the resulting bit to mbuf_flags
*/
- RTE_BUILD_BUG_ON(PKT_RX_FDIR_ID != (1 << 13));
+ RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_FDIR_ID != (1 << 13));
__m256i mbuf_flag_mask = _mm256_set_epi32(0, 0, 0, 1 << 13,
0, 0, 0, 1 << 13);
__m256i desc_flag_bit = _mm256_and_si256(mbuf_flag_mask, fdir_mask);
* destination
*/
const __m256i vlan_flags_shuf = _mm256_set_epi32
- (0, 0, PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, 0,
- 0, 0, PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, 0);
+ (0, 0, RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED, 0,
+ 0, 0, RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED, 0);
/* data to be shuffled by result of flag mask, shifted down 11.
* If RSS/FDIR bits are set, shuffle moves appropriate flags in
*/
const __m256i rss_flags_shuf = _mm256_set_epi8
(0, 0, 0, 0, 0, 0, 0, 0,
- PKT_RX_RSS_HASH | PKT_RX_FDIR, PKT_RX_RSS_HASH, 0, 0,
- 0, 0, PKT_RX_FDIR, 0, /* end up 128-bits */
+ RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_FDIR,
+ RTE_MBUF_F_RX_RSS_HASH,
+ 0, 0,
+ 0, 0, RTE_MBUF_F_RX_FDIR, 0, /* end up 128-bits */
0, 0, 0, 0, 0, 0, 0, 0,
- PKT_RX_RSS_HASH | PKT_RX_FDIR, PKT_RX_RSS_HASH, 0, 0,
- 0, 0, PKT_RX_FDIR, 0);
+ RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_FDIR,
+ RTE_MBUF_F_RX_RSS_HASH,
+ 0, 0,
+ 0, 0, RTE_MBUF_F_RX_FDIR, 0);
/* data to be shuffled by the result of the flags mask shifted by 22
* bits. This gives use the l3_l4 flags.
const __m256i l3_l4_flags_shuf = _mm256_set_epi8
(0, 0, 0, 0, 0, 0, 0, 0,
/* shift right 1 bit to make sure it not exceed 255 */
- (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_OUTER_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_OUTER_IP_CKSUM_BAD) >> 1,
- (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1,
- PKT_RX_IP_CKSUM_BAD >> 1,
- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD) >> 1,
+ RTE_MBUF_F_RX_IP_CKSUM_BAD >> 1,
+ (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD) >> 1,
/* second 128-bits */
0, 0, 0, 0, 0, 0, 0, 0,
- (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_OUTER_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_OUTER_IP_CKSUM_BAD) >> 1,
- (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1,
- PKT_RX_IP_CKSUM_BAD >> 1,
- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1);
+ (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD) >> 1,
+ RTE_MBUF_F_RX_IP_CKSUM_BAD >> 1,
+ (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD) >> 1);
const __m256i cksum_mask = _mm256_set1_epi32
- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_OUTER_IP_CKSUM_BAD);
+ (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD);
uint16_t i, received;
* order (hi->lo): [1, 3, 5, 7, 0, 2, 4, 6]
* Then OR FDIR flags to mbuf_flags on FDIR ID hit.
*/
- RTE_BUILD_BUG_ON(PKT_RX_FDIR_ID != (1 << 13));
+ RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_FDIR_ID != (1 << 13));
const __m256i pkt_fdir_bit = _mm256_set1_epi32(1 << 13);
const __m256i fdir_mask =
_mm256_cmpeq_epi32(fdir, fdir_id);
0x1c03804, 0x1c03804, 0x1c03804, 0x1c03804};
const uint32x4_t cksum_mask = {
- PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_OUTER_IP_CKSUM_BAD,
- PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_OUTER_IP_CKSUM_BAD,
- PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_OUTER_IP_CKSUM_BAD,
- PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_OUTER_IP_CKSUM_BAD};
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD,
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD,
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD,
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD};
/* map rss and vlan type to rss hash and vlan flag */
const uint8x16_t vlan_flags = {
0, 0, 0, 0,
- PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, 0, 0, 0,
+ RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED, 0,
+ 0, 0,
0, 0, 0, 0,
0, 0, 0, 0};
const uint8x16_t rss_flags = {
- 0, PKT_RX_FDIR, 0, 0,
- 0, 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH | PKT_RX_FDIR,
+ 0, RTE_MBUF_F_RX_FDIR, 0, 0,
+ 0, 0, RTE_MBUF_F_RX_RSS_HASH,
+ RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_FDIR,
0, 0, 0, 0,
0, 0, 0, 0};
const uint8x16_t l3_l4e_flags = {
- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1,
- PKT_RX_IP_CKSUM_BAD >> 1,
- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1,
- (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_OUTER_IP_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_OUTER_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD) >> 1,
+ RTE_MBUF_F_RX_IP_CKSUM_BAD >> 1,
+ (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
0, 0, 0, 0, 0, 0, 0, 0};
vlan0 = vzipq_u32(vreinterpretq_u32_u64(descs[0]),
* correct location in the mbuf->olflags
*/
const uint32_t FDIR_ID_BIT_SHIFT = 13;
- RTE_BUILD_BUG_ON(PKT_RX_FDIR_ID != (1 << FDIR_ID_BIT_SHIFT));
+ RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_FDIR_ID != (1 << FDIR_ID_BIT_SHIFT));
v_fd_id_mask = _mm_srli_epi32(v_fd_id_mask, 31);
v_fd_id_mask = _mm_slli_epi32(v_fd_id_mask, FDIR_ID_BIT_SHIFT);
__m128i v_desc0_mask = _mm_and_si128(v_desc_fdir_mask, v_desc0_shift);
descs[0] = _mm_blendv_epi8(descs[0], _mm_setzero_si128(), v_desc0_mask);
- /* Shift to 1 or 0 bit per u32 lane, then to PKT_RX_FDIR_ID offset */
+ /* Shift to 1 or 0 bit per u32 lane, then to RTE_MBUF_F_RX_FDIR_ID offset */
const uint32_t FDIR_ID_BIT_SHIFT = 13;
- RTE_BUILD_BUG_ON(PKT_RX_FDIR_ID != (1 << FDIR_ID_BIT_SHIFT));
+ RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_FDIR_ID != (1 << FDIR_ID_BIT_SHIFT));
__m128i v_mask_one_bit = _mm_srli_epi32(v_fdir_id_mask, 31);
return _mm_slli_epi32(v_mask_one_bit, FDIR_ID_BIT_SHIFT);
}
const __m128i rss_vlan_msk = _mm_set_epi32(
0x1c03804, 0x1c03804, 0x1c03804, 0x1c03804);
- const __m128i cksum_mask = _mm_set_epi32(
- PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_OUTER_IP_CKSUM_BAD,
- PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_OUTER_IP_CKSUM_BAD,
- PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_OUTER_IP_CKSUM_BAD,
- PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_OUTER_IP_CKSUM_BAD);
+ const __m128i cksum_mask = _mm_set_epi32(RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD,
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD,
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD,
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD);
/* map rss and vlan type to rss hash and vlan flag */
const __m128i vlan_flags = _mm_set_epi8(0, 0, 0, 0,
0, 0, 0, 0,
- 0, 0, 0, PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
+ 0, 0, 0,
+ RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
0, 0, 0, 0);
const __m128i rss_flags = _mm_set_epi8(0, 0, 0, 0,
0, 0, 0, 0,
- PKT_RX_RSS_HASH | PKT_RX_FDIR, PKT_RX_RSS_HASH, 0, 0,
- 0, 0, PKT_RX_FDIR, 0);
+ RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_FDIR,
+ RTE_MBUF_F_RX_RSS_HASH, 0, 0,
+ 0, 0, RTE_MBUF_F_RX_FDIR, 0);
const __m128i l3_l4e_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
/* shift right 1 bit to make sure it not exceed 255 */
- (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
- PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
- PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1);
+ (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
+ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1);
/* Unpack "status" from quadword 1, bits 0:32 */
vlan0 = _mm_unpackhi_epi32(descs[0], descs[1]);
#endif
if (desc->flow_id != 0xFFFFFFFF) {
- mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+ mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
}
#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
stat_err = rte_le_to_cpu_16(desc->status_error0);
if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
- mb->ol_flags |= PKT_RX_RSS_HASH;
+ mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
}
#endif
stat_err = rte_le_to_cpu_16(desc->status_error0);
if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
- mb->ol_flags |= PKT_RX_RSS_HASH;
+ mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
}
#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
if (desc->flow_id != 0xFFFFFFFF) {
- mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+ mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
}
stat_err = rte_le_to_cpu_16(desc->status_error0);
if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
- mb->ol_flags |= PKT_RX_RSS_HASH;
+ mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
}
#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
if (desc->flow_id != 0xFFFFFFFF) {
- mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+ mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
}
{
if (rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
(1 << IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
- mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+ mb->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
mb->vlan_tci =
rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1);
} else {
#endif
if (vlan_tci) {
- mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+ mb->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
mb->vlan_tci = vlan_tci;
}
}
/* Check if RSS_HASH */
flags = (((qword >> IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT) &
IAVF_RX_DESC_FLTSTAT_RSS_HASH) ==
- IAVF_RX_DESC_FLTSTAT_RSS_HASH) ? PKT_RX_RSS_HASH : 0;
+ IAVF_RX_DESC_FLTSTAT_RSS_HASH) ? RTE_MBUF_F_RX_RSS_HASH : 0;
/* Check if FDIR Match */
flags |= (qword & (1 << IAVF_RX_DESC_STATUS_FLM_SHIFT) ?
- PKT_RX_FDIR : 0);
+ RTE_MBUF_F_RX_FDIR : 0);
if (likely((error_bits & IAVF_RX_ERR_BITS) == 0)) {
- flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
+ flags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD);
return flags;
}
if (unlikely(error_bits & (1 << IAVF_RX_DESC_ERROR_IPE_SHIFT)))
- flags |= PKT_RX_IP_CKSUM_BAD;
+ flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
else
- flags |= PKT_RX_IP_CKSUM_GOOD;
+ flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
if (unlikely(error_bits & (1 << IAVF_RX_DESC_ERROR_L4E_SHIFT)))
- flags |= PKT_RX_L4_CKSUM_BAD;
+ flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
else
- flags |= PKT_RX_L4_CKSUM_GOOD;
+ flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
/* TODO: Oversize error bit is not processed here */
if (flexbh == IAVF_RX_DESC_EXT_STATUS_FLEXBH_FD_ID) {
mb->hash.fdir.hi =
rte_le_to_cpu_32(rxdp->wb.qword3.hi_dword.fd_id);
- flags |= PKT_RX_FDIR_ID;
+ flags |= RTE_MBUF_F_RX_FDIR_ID;
}
#else
mb->hash.fdir.hi =
rte_le_to_cpu_32(rxdp->wb.qword0.hi_dword.fd_id);
- flags |= PKT_RX_FDIR_ID;
+ flags |= RTE_MBUF_F_RX_FDIR_ID;
#endif
return flags;
}
return 0;
if (likely(!(stat_err0 & IAVF_RX_FLEX_ERR0_BITS))) {
- flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
+ flags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD);
return flags;
}
if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S)))
- flags |= PKT_RX_IP_CKSUM_BAD;
+ flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
else
- flags |= PKT_RX_IP_CKSUM_GOOD;
+ flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)))
- flags |= PKT_RX_L4_CKSUM_BAD;
+ flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
else
- flags |= PKT_RX_L4_CKSUM_GOOD;
+ flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))
- flags |= PKT_RX_OUTER_IP_CKSUM_BAD;
+ flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
return flags;
}
ptype_tbl[(uint8_t)((qword1 &
IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT)];
- if (pkt_flags & PKT_RX_RSS_HASH)
+ if (pkt_flags & RTE_MBUF_F_RX_RSS_HASH)
rxm->hash.rss =
rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
- if (pkt_flags & PKT_RX_FDIR)
+ if (pkt_flags & RTE_MBUF_F_RX_FDIR)
pkt_flags |= iavf_rxd_build_fdir(&rxd, rxm);
rxm->ol_flags |= pkt_flags;
ptype_tbl[(uint8_t)((qword1 &
IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT)];
- if (pkt_flags & PKT_RX_RSS_HASH)
+ if (pkt_flags & RTE_MBUF_F_RX_RSS_HASH)
first_seg->hash.rss =
rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
- if (pkt_flags & PKT_RX_FDIR)
+ if (pkt_flags & RTE_MBUF_F_RX_FDIR)
pkt_flags |= iavf_rxd_build_fdir(&rxd, first_seg);
first_seg->ol_flags |= pkt_flags;
IAVF_RXD_QW1_PTYPE_MASK) >>
IAVF_RXD_QW1_PTYPE_SHIFT)];
- if (pkt_flags & PKT_RX_RSS_HASH)
+ if (pkt_flags & RTE_MBUF_F_RX_RSS_HASH)
mb->hash.rss = rte_le_to_cpu_32(
rxdp[j].wb.qword0.hi_dword.rss);
- if (pkt_flags & PKT_RX_FDIR)
+ if (pkt_flags & RTE_MBUF_F_RX_FDIR)
pkt_flags |= iavf_rxd_build_fdir(&rxdp[j], mb);
mb->ol_flags |= pkt_flags;
static inline uint16_t
iavf_calc_context_desc(uint64_t flags, uint8_t vlan_flag)
{
- if (flags & PKT_TX_TCP_SEG)
+ if (flags & RTE_MBUF_F_TX_TCP_SEG)
return 1;
- if (flags & PKT_TX_VLAN_PKT &&
+ if (flags & RTE_MBUF_F_TX_VLAN_PKT &&
vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2)
return 1;
return 0;
IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
/* Enable L3 checksum offloads */
- if (ol_flags & PKT_TX_IP_CKSUM) {
+ if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
*td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
*td_offset |= (tx_offload.l3_len >> 2) <<
IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
- } else if (ol_flags & PKT_TX_IPV4) {
+ } else if (ol_flags & RTE_MBUF_F_TX_IPV4) {
*td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4;
*td_offset |= (tx_offload.l3_len >> 2) <<
IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
- } else if (ol_flags & PKT_TX_IPV6) {
+ } else if (ol_flags & RTE_MBUF_F_TX_IPV6) {
*td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6;
*td_offset |= (tx_offload.l3_len >> 2) <<
IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
}
- if (ol_flags & PKT_TX_TCP_SEG) {
+ if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
*td_offset |= (tx_offload.l4_len >> 2) <<
IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
}
/* Enable L4 checksum offloads */
- switch (ol_flags & PKT_TX_L4_MASK) {
- case PKT_TX_TCP_CKSUM:
+ switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
+ case RTE_MBUF_F_TX_TCP_CKSUM:
*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
*td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
break;
- case PKT_TX_SCTP_CKSUM:
+ case RTE_MBUF_F_TX_SCTP_CKSUM:
*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
*td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
break;
- case PKT_TX_UDP_CKSUM:
+ case RTE_MBUF_F_TX_UDP_CKSUM:
*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
*td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
}
/* Descriptor based VLAN insertion */
- if (ol_flags & PKT_TX_VLAN_PKT &&
+ if (ol_flags & RTE_MBUF_F_TX_VLAN_PKT &&
txq->vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1) {
td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1;
td_tag = tx_pkt->vlan_tci;
}
/* TSO enabled */
- if (ol_flags & PKT_TX_TCP_SEG)
+ if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
cd_type_cmd_tso_mss |=
iavf_set_tso_ctx(tx_pkt, tx_offload);
- if (ol_flags & PKT_TX_VLAN_PKT &&
- txq->vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2) {
+ if (ol_flags & RTE_MBUF_F_TX_VLAN_PKT &&
+ txq->vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2) {
cd_type_cmd_tso_mss |= IAVF_TX_CTX_DESC_IL2TAG2
<< IAVF_TXD_CTX_QW1_CMD_SHIFT;
cd_l2tag2 = tx_pkt->vlan_tci;
ol_flags = m->ol_flags;
/* Check condition for nb_segs > IAVF_TX_MAX_MTU_SEG. */
- if (!(ol_flags & PKT_TX_TCP_SEG)) {
+ if (!(ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
if (m->nb_segs > IAVF_TX_MAX_MTU_SEG) {
rte_errno = EINVAL;
return i;
}
if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS &&
- ol_flags & (PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN)) {
+ ol_flags & (RTE_MBUF_F_RX_VLAN_STRIPPED | RTE_MBUF_F_RX_VLAN)) {
ret = iavf_check_vlan_up2tc(txq, m);
if (ret != 0) {
rte_errno = -ret;
#define IAVF_TSO_MAX_SEG UINT8_MAX
#define IAVF_TX_MAX_MTU_SEG 8
-#define IAVF_TX_CKSUM_OFFLOAD_MASK ( \
- PKT_TX_IP_CKSUM | \
- PKT_TX_L4_MASK | \
- PKT_TX_TCP_SEG)
-
-#define IAVF_TX_OFFLOAD_MASK ( \
- PKT_TX_OUTER_IPV6 | \
- PKT_TX_OUTER_IPV4 | \
- PKT_TX_IPV6 | \
- PKT_TX_IPV4 | \
- PKT_TX_VLAN_PKT | \
- PKT_TX_IP_CKSUM | \
- PKT_TX_L4_MASK | \
- PKT_TX_TCP_SEG)
+#define IAVF_TX_CKSUM_OFFLOAD_MASK (RTE_MBUF_F_TX_IP_CKSUM | \
+ RTE_MBUF_F_TX_L4_MASK | \
+ RTE_MBUF_F_TX_TCP_SEG)
+
+#define IAVF_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_OUTER_IPV6 | \
+ RTE_MBUF_F_TX_OUTER_IPV4 | \
+ RTE_MBUF_F_TX_IPV6 | \
+ RTE_MBUF_F_TX_IPV4 | \
+ RTE_MBUF_F_TX_VLAN_PKT | \
+ RTE_MBUF_F_TX_IP_CKSUM | \
+ RTE_MBUF_F_TX_L4_MASK | \
+ RTE_MBUF_F_TX_TCP_SEG)
#define IAVF_TX_OFFLOAD_NOTSUP_MASK \
- (PKT_TX_OFFLOAD_MASK ^ IAVF_TX_OFFLOAD_MASK)
+ (RTE_MBUF_F_TX_OFFLOAD_MASK ^ IAVF_TX_OFFLOAD_MASK)
/**
* Rx Flex Descriptors
* destination
*/
const __m256i vlan_flags_shuf =
- _mm256_set_epi32(0, 0, PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, 0,
- 0, 0, PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, 0);
+ _mm256_set_epi32(0, 0,
+ RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
+ 0,
+ 0, 0,
+ RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
+ 0);
/**
* data to be shuffled by result of flag mask, shifted down 11.
* If RSS/FDIR bits are set, shuffle moves appropriate flags in
*/
const __m256i rss_flags_shuf =
_mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
- PKT_RX_RSS_HASH | PKT_RX_FDIR, PKT_RX_RSS_HASH,
- 0, 0, 0, 0, PKT_RX_FDIR, 0,/* end up 128-bits */
+ RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_FDIR,
+ RTE_MBUF_F_RX_RSS_HASH,
+ 0, 0, 0, 0, RTE_MBUF_F_RX_FDIR, 0,
+ /* end up 128-bits */
0, 0, 0, 0, 0, 0, 0, 0,
- PKT_RX_RSS_HASH | PKT_RX_FDIR, PKT_RX_RSS_HASH,
- 0, 0, 0, 0, PKT_RX_FDIR, 0);
+ RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_FDIR,
+ RTE_MBUF_F_RX_RSS_HASH,
+ 0, 0, 0, 0, RTE_MBUF_F_RX_FDIR, 0);
/**
* data to be shuffled by the result of the flags mask shifted by 22
*/
const __m256i l3_l4_flags_shuf = _mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
/* shift right 1 bit to make sure it not exceed 255 */
- (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_OUTER_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_OUTER_IP_CKSUM_BAD) >> 1,
- (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1,
- PKT_RX_IP_CKSUM_BAD >> 1,
- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD) >> 1,
+ RTE_MBUF_F_RX_IP_CKSUM_BAD >> 1,
+ (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD) >> 1,
/* second 128-bits */
0, 0, 0, 0, 0, 0, 0, 0,
- (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_OUTER_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_OUTER_IP_CKSUM_BAD) >> 1,
- (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1,
- PKT_RX_IP_CKSUM_BAD >> 1,
- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1);
+ (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD) >> 1,
+ RTE_MBUF_F_RX_IP_CKSUM_BAD >> 1,
+ (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD) >> 1);
const __m256i cksum_mask =
- _mm256_set1_epi32(PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_OUTER_IP_CKSUM_BAD);
+ _mm256_set1_epi32(RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD);
RTE_SET_USED(avx_aligned); /* for 32B descriptors we don't use this */
flex_rxd_to_fdir_flags_vec_avx2(const __m256i fdir_id0_7)
{
#define FDID_MIS_MAGIC 0xFFFFFFFF
- RTE_BUILD_BUG_ON(PKT_RX_FDIR != (1 << 2));
- RTE_BUILD_BUG_ON(PKT_RX_FDIR_ID != (1 << 13));
- const __m256i pkt_fdir_bit = _mm256_set1_epi32(PKT_RX_FDIR |
- PKT_RX_FDIR_ID);
+ RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_FDIR != (1 << 2));
+ RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_FDIR_ID != (1 << 13));
+ const __m256i pkt_fdir_bit = _mm256_set1_epi32(RTE_MBUF_F_RX_FDIR |
+ RTE_MBUF_F_RX_FDIR_ID);
/* desc->flow_id field == 0xFFFFFFFF means fdir mismatch */
const __m256i fdir_mis_mask = _mm256_set1_epi32(FDID_MIS_MAGIC);
__m256i fdir_mask = _mm256_cmpeq_epi32(fdir_id0_7,
*/
const __m256i l3_l4_flags_shuf = _mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
/* shift right 1 bit to make sure it not exceed 255 */
- (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
- PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
- PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
+ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
/* second 128-bits */
0, 0, 0, 0, 0, 0, 0, 0,
- (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
- PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
- PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1);
+ (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
+ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1);
const __m256i cksum_mask =
- _mm256_set1_epi32(PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_OUTER_IP_CKSUM_BAD);
+ _mm256_set1_epi32(RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD);
/**
* data to be shuffled by result of flag mask, shifted down 12.
* If RSS(bit12)/VLAN(bit13) are set,
const __m256i rss_flags_shuf = _mm256_set_epi8(0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
- PKT_RX_RSS_HASH, 0,
- PKT_RX_RSS_HASH, 0,
+ RTE_MBUF_F_RX_RSS_HASH, 0,
+ RTE_MBUF_F_RX_RSS_HASH, 0,
/* end up 128-bits */
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
- PKT_RX_RSS_HASH, 0,
- PKT_RX_RSS_HASH, 0);
+ RTE_MBUF_F_RX_RSS_HASH, 0,
+ RTE_MBUF_F_RX_RSS_HASH, 0);
const __m256i vlan_flags_shuf = _mm256_set_epi8(0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
- PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
- PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
+ RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
+ RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
0, 0,
/* end up 128-bits */
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
- PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
- PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
+ RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
+ RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
0, 0);
uint16_t i, received;
0, 0, 0, 0,
0, 0, 0, 0,
0, 0,
- PKT_RX_VLAN |
- PKT_RX_VLAN_STRIPPED,
+ RTE_MBUF_F_RX_VLAN |
+ RTE_MBUF_F_RX_VLAN_STRIPPED,
0);
vlan_flags =
* destination
*/
const __m256i vlan_flags_shuf =
- _mm256_set_epi32(0, 0, PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, 0,
- 0, 0, PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, 0);
+ _mm256_set_epi32(0, 0,
+ RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
+ 0,
+ 0, 0,
+ RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
+ 0);
#endif
#ifdef IAVF_RX_RSS_OFFLOAD
*/
const __m256i rss_flags_shuf =
_mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
- PKT_RX_RSS_HASH | PKT_RX_FDIR, PKT_RX_RSS_HASH,
- 0, 0, 0, 0, PKT_RX_FDIR, 0,/* end up 128-bits */
+ RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_FDIR,
+ RTE_MBUF_F_RX_RSS_HASH,
+ 0, 0, 0, 0,
+ RTE_MBUF_F_RX_FDIR, 0,
+ /* end up 128-bits */
0, 0, 0, 0, 0, 0, 0, 0,
- PKT_RX_RSS_HASH | PKT_RX_FDIR, PKT_RX_RSS_HASH,
- 0, 0, 0, 0, PKT_RX_FDIR, 0);
+ RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_FDIR,
+ RTE_MBUF_F_RX_RSS_HASH,
+ 0, 0, 0, 0,
+ RTE_MBUF_F_RX_FDIR, 0);
#endif
#ifdef IAVF_RX_CSUM_OFFLOAD
*/
const __m256i l3_l4_flags_shuf = _mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
/* shift right 1 bit to make sure it not exceed 255 */
- (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_OUTER_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_OUTER_IP_CKSUM_BAD) >> 1,
- (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1,
- PKT_RX_IP_CKSUM_BAD >> 1,
- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD) >> 1,
+ RTE_MBUF_F_RX_IP_CKSUM_BAD >> 1,
+ (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD) >> 1,
/* second 128-bits */
0, 0, 0, 0, 0, 0, 0, 0,
- (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_OUTER_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_OUTER_IP_CKSUM_BAD) >> 1,
- (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1,
- PKT_RX_IP_CKSUM_BAD >> 1,
- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1);
+ (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD) >> 1,
+ RTE_MBUF_F_RX_IP_CKSUM_BAD >> 1,
+ (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD) >> 1);
const __m256i cksum_mask =
- _mm256_set1_epi32(PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_OUTER_IP_CKSUM_BAD);
+ _mm256_set1_epi32(RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD);
#endif
#if defined(IAVF_RX_CSUM_OFFLOAD) || defined(IAVF_RX_VLAN_OFFLOAD) || defined(IAVF_RX_RSS_OFFLOAD)
flex_rxd_to_fdir_flags_vec_avx512(const __m256i fdir_id0_7)
{
#define FDID_MIS_MAGIC 0xFFFFFFFF
- RTE_BUILD_BUG_ON(PKT_RX_FDIR != (1 << 2));
- RTE_BUILD_BUG_ON(PKT_RX_FDIR_ID != (1 << 13));
- const __m256i pkt_fdir_bit = _mm256_set1_epi32(PKT_RX_FDIR |
- PKT_RX_FDIR_ID);
+ RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_FDIR != (1 << 2));
+ RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_FDIR_ID != (1 << 13));
+ const __m256i pkt_fdir_bit = _mm256_set1_epi32(RTE_MBUF_F_RX_FDIR |
+ RTE_MBUF_F_RX_FDIR_ID);
/* desc->flow_id field == 0xFFFFFFFF means fdir mismatch */
const __m256i fdir_mis_mask = _mm256_set1_epi32(FDID_MIS_MAGIC);
__m256i fdir_mask = _mm256_cmpeq_epi32(fdir_id0_7,
*/
const __m256i l3_l4_flags_shuf = _mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
/* shift right 1 bit to make sure it not exceed 255 */
- (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
- PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
- PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
+ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
/* second 128-bits */
0, 0, 0, 0, 0, 0, 0, 0,
- (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
- PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
- PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1);
+ (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
+ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1);
const __m256i cksum_mask =
- _mm256_set1_epi32(PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_OUTER_IP_CKSUM_BAD);
+ _mm256_set1_epi32(RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD);
#endif
#if defined(IAVF_RX_VLAN_OFFLOAD) || defined(IAVF_RX_RSS_OFFLOAD)
/**
(0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
- PKT_RX_RSS_HASH, 0,
- PKT_RX_RSS_HASH, 0,
+ RTE_MBUF_F_RX_RSS_HASH, 0,
+ RTE_MBUF_F_RX_RSS_HASH, 0,
/* end up 128-bits */
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
- PKT_RX_RSS_HASH, 0,
- PKT_RX_RSS_HASH, 0);
+ RTE_MBUF_F_RX_RSS_HASH, 0,
+ RTE_MBUF_F_RX_RSS_HASH, 0);
const __m256i vlan_flags_shuf = _mm256_set_epi8
(0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
- PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
- PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
+ RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
+ RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
0, 0,
/* end up 128-bits */
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
- PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
- PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
+ RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
+ RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
0, 0);
#endif
0, 0, 0, 0,
0, 0, 0, 0,
0, 0,
- PKT_RX_VLAN |
- PKT_RX_VLAN_STRIPPED,
+ RTE_MBUF_F_RX_VLAN |
+ RTE_MBUF_F_RX_VLAN_STRIPPED,
0);
vlan_flags =
IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
/* Enable L3 checksum offloads */
- if (ol_flags & PKT_TX_IP_CKSUM) {
+ if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
td_offset |= (tx_pkt->l3_len >> 2) <<
IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
- } else if (ol_flags & PKT_TX_IPV4) {
+ } else if (ol_flags & RTE_MBUF_F_TX_IPV4) {
td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4;
td_offset |= (tx_pkt->l3_len >> 2) <<
IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
- } else if (ol_flags & PKT_TX_IPV6) {
+ } else if (ol_flags & RTE_MBUF_F_TX_IPV6) {
td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6;
td_offset |= (tx_pkt->l3_len >> 2) <<
IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
}
/* Enable L4 checksum offloads */
- switch (ol_flags & PKT_TX_L4_MASK) {
- case PKT_TX_TCP_CKSUM:
+ switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
+ case RTE_MBUF_F_TX_TCP_CKSUM:
td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
break;
- case PKT_TX_SCTP_CKSUM:
+ case RTE_MBUF_F_TX_SCTP_CKSUM:
td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
break;
- case PKT_TX_UDP_CKSUM:
+ case RTE_MBUF_F_TX_UDP_CKSUM:
td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
#endif
#ifdef IAVF_TX_VLAN_QINQ_OFFLOAD
- if (ol_flags & (PKT_TX_VLAN | PKT_TX_QINQ)) {
+ if (ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) {
td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1;
*txd_hi |= ((uint64_t)tx_pkt->vlan_tci <<
IAVF_TXD_QW1_L2TAG1_SHIFT);
const __m128i rss_vlan_msk = _mm_set_epi32(
0x1c03804, 0x1c03804, 0x1c03804, 0x1c03804);
- const __m128i cksum_mask = _mm_set_epi32(
- PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_OUTER_IP_CKSUM_BAD,
- PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_OUTER_IP_CKSUM_BAD,
- PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_OUTER_IP_CKSUM_BAD,
- PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_OUTER_IP_CKSUM_BAD);
+ const __m128i cksum_mask = _mm_set_epi32(RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD,
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD,
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD,
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD);
/* map rss and vlan type to rss hash and vlan flag */
const __m128i vlan_flags = _mm_set_epi8(0, 0, 0, 0,
0, 0, 0, 0,
- 0, 0, 0, PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
+ 0, 0, 0,
+ RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
0, 0, 0, 0);
const __m128i rss_flags = _mm_set_epi8(0, 0, 0, 0,
0, 0, 0, 0,
- PKT_RX_RSS_HASH | PKT_RX_FDIR, PKT_RX_RSS_HASH, 0, 0,
- 0, 0, PKT_RX_FDIR, 0);
+ RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_FDIR,
+ RTE_MBUF_F_RX_RSS_HASH, 0, 0,
+ 0, 0, RTE_MBUF_F_RX_FDIR, 0);
const __m128i l3_l4e_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
/* shift right 1 bit to make sure it not exceed 255 */
- (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_OUTER_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_OUTER_IP_CKSUM_BAD) >> 1,
- (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1,
- PKT_RX_IP_CKSUM_BAD >> 1,
- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1);
+ (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD) >> 1,
+ RTE_MBUF_F_RX_IP_CKSUM_BAD >> 1,
+ (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD) >> 1);
vlan0 = _mm_unpackhi_epi32(descs[0], descs[1]);
vlan1 = _mm_unpackhi_epi32(descs[2], descs[3]);
flex_rxd_to_fdir_flags_vec(const __m128i fdir_id0_3)
{
#define FDID_MIS_MAGIC 0xFFFFFFFF
- RTE_BUILD_BUG_ON(PKT_RX_FDIR != (1 << 2));
- RTE_BUILD_BUG_ON(PKT_RX_FDIR_ID != (1 << 13));
- const __m128i pkt_fdir_bit = _mm_set1_epi32(PKT_RX_FDIR |
- PKT_RX_FDIR_ID);
+ RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_FDIR != (1 << 2));
+ RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_FDIR_ID != (1 << 13));
+ const __m128i pkt_fdir_bit = _mm_set1_epi32(RTE_MBUF_F_RX_FDIR |
+ RTE_MBUF_F_RX_FDIR_ID);
/* desc->flow_id field == 0xFFFFFFFF means fdir mismatch */
const __m128i fdir_mis_mask = _mm_set1_epi32(FDID_MIS_MAGIC);
__m128i fdir_mask = _mm_cmpeq_epi32(fdir_id0_3,
const __m128i desc_mask = _mm_set_epi32(0x3070, 0x3070,
0x3070, 0x3070);
- const __m128i cksum_mask = _mm_set_epi32(PKT_RX_IP_CKSUM_MASK |
- PKT_RX_L4_CKSUM_MASK |
- PKT_RX_OUTER_IP_CKSUM_BAD,
- PKT_RX_IP_CKSUM_MASK |
- PKT_RX_L4_CKSUM_MASK |
- PKT_RX_OUTER_IP_CKSUM_BAD,
- PKT_RX_IP_CKSUM_MASK |
- PKT_RX_L4_CKSUM_MASK |
- PKT_RX_OUTER_IP_CKSUM_BAD,
- PKT_RX_IP_CKSUM_MASK |
- PKT_RX_L4_CKSUM_MASK |
- PKT_RX_OUTER_IP_CKSUM_BAD);
+ const __m128i cksum_mask = _mm_set_epi32(RTE_MBUF_F_RX_IP_CKSUM_MASK |
+ RTE_MBUF_F_RX_L4_CKSUM_MASK |
+ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD,
+ RTE_MBUF_F_RX_IP_CKSUM_MASK |
+ RTE_MBUF_F_RX_L4_CKSUM_MASK |
+ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD,
+ RTE_MBUF_F_RX_IP_CKSUM_MASK |
+ RTE_MBUF_F_RX_L4_CKSUM_MASK |
+ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD,
+ RTE_MBUF_F_RX_IP_CKSUM_MASK |
+ RTE_MBUF_F_RX_L4_CKSUM_MASK |
+ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD);
/* map the checksum, rss and vlan fields to the checksum, rss
* and vlan flag
*/
const __m128i cksum_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
/* shift right 1 bit to make sure it not exceed 255 */
- (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
- PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
- PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1);
+ (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
+ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1);
const __m128i rss_vlan_flags = _mm_set_epi8(0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
- PKT_RX_RSS_HASH | PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
- PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
- PKT_RX_RSS_HASH, 0);
+ RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
+ RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
+ RTE_MBUF_F_RX_RSS_HASH, 0);
/* merge 4 descriptors */
flags = _mm_unpackhi_epi32(descs[0], descs[1]);
#include "ice_rxtx.h"
#include "ice_rxtx_vec_common.h"
-#define ICE_TX_CKSUM_OFFLOAD_MASK ( \
- PKT_TX_IP_CKSUM | \
- PKT_TX_L4_MASK | \
- PKT_TX_TCP_SEG | \
- PKT_TX_OUTER_IP_CKSUM)
+#define ICE_TX_CKSUM_OFFLOAD_MASK (RTE_MBUF_F_TX_IP_CKSUM | \
+ RTE_MBUF_F_TX_L4_MASK | \
+ RTE_MBUF_F_TX_TCP_SEG | \
+ RTE_MBUF_F_TX_OUTER_IP_CKSUM)
/* Offset of mbuf dynamic field for protocol extraction data */
int rte_net_ice_dynfield_proto_xtr_metadata_offs = -1;
uint16_t stat_err = rte_le_to_cpu_16(desc->status_error0);
if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
- mb->ol_flags |= PKT_RX_RSS_HASH;
+ mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
}
#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
if (desc->flow_id != 0xFFFFFFFF) {
- mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+ mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
}
#endif
#endif
if (desc->flow_id != 0xFFFFFFFF) {
- mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+ mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
}
#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
stat_err = rte_le_to_cpu_16(desc->status_error0);
if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
- mb->ol_flags |= PKT_RX_RSS_HASH;
+ mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
}
#endif
stat_err = rte_le_to_cpu_16(desc->status_error0);
if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
- mb->ol_flags |= PKT_RX_RSS_HASH;
+ mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
}
#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
if (desc->flow_id != 0xFFFFFFFF) {
- mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+ mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
}
stat_err = rte_le_to_cpu_16(desc->status_error0);
if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
- mb->ol_flags |= PKT_RX_RSS_HASH;
+ mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
}
#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
if (desc->flow_id != 0xFFFFFFFF) {
- mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+ mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
}
return 0;
if (likely(!(stat_err0 & ICE_RX_FLEX_ERR0_BITS))) {
- flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
+ flags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD);
return flags;
}
if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S)))
- flags |= PKT_RX_IP_CKSUM_BAD;
+ flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
else
- flags |= PKT_RX_IP_CKSUM_GOOD;
+ flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)))
- flags |= PKT_RX_L4_CKSUM_BAD;
+ flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
else
- flags |= PKT_RX_L4_CKSUM_GOOD;
+ flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))
- flags |= PKT_RX_OUTER_IP_CKSUM_BAD;
+ flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S)))
- flags |= PKT_RX_OUTER_L4_CKSUM_BAD;
+ flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD;
else
- flags |= PKT_RX_OUTER_L4_CKSUM_GOOD;
+ flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD;
return flags;
}
{
if (rte_le_to_cpu_16(rxdp->wb.status_error0) &
(1 << ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
- mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+ mb->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
mb->vlan_tci =
rte_le_to_cpu_16(rxdp->wb.l2tag1);
PMD_RX_LOG(DEBUG, "Descriptor l2tag1: %u",
#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
if (rte_le_to_cpu_16(rxdp->wb.status_error1) &
(1 << ICE_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) {
- mb->ol_flags |= PKT_RX_QINQ_STRIPPED | PKT_RX_QINQ |
- PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN;
+ mb->ol_flags |= RTE_MBUF_F_RX_QINQ_STRIPPED | RTE_MBUF_F_RX_QINQ |
+ RTE_MBUF_F_RX_VLAN_STRIPPED | RTE_MBUF_F_RX_VLAN;
mb->vlan_tci_outer = mb->vlan_tci;
mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
uint32_t *cd_tunneling)
{
/* EIPT: External (outer) IP header type */
- if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
+ if (ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM)
*cd_tunneling |= ICE_TX_CTX_EIPT_IPV4;
- else if (ol_flags & PKT_TX_OUTER_IPV4)
+ else if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV4)
*cd_tunneling |= ICE_TX_CTX_EIPT_IPV4_NO_CSUM;
- else if (ol_flags & PKT_TX_OUTER_IPV6)
+ else if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6)
*cd_tunneling |= ICE_TX_CTX_EIPT_IPV6;
/* EIPLEN: External (outer) IP header length, in DWords */
ICE_TXD_CTX_QW0_EIPLEN_S;
/* L4TUNT: L4 Tunneling Type */
- switch (ol_flags & PKT_TX_TUNNEL_MASK) {
- case PKT_TX_TUNNEL_IPIP:
+ switch (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
+ case RTE_MBUF_F_TX_TUNNEL_IPIP:
/* for non UDP / GRE tunneling, set to 00b */
break;
- case PKT_TX_TUNNEL_VXLAN:
- case PKT_TX_TUNNEL_GTP:
- case PKT_TX_TUNNEL_GENEVE:
+ case RTE_MBUF_F_TX_TUNNEL_VXLAN:
+ case RTE_MBUF_F_TX_TUNNEL_GTP:
+ case RTE_MBUF_F_TX_TUNNEL_GENEVE:
*cd_tunneling |= ICE_TXD_CTX_UDP_TUNNELING;
break;
- case PKT_TX_TUNNEL_GRE:
+ case RTE_MBUF_F_TX_TUNNEL_GRE:
*cd_tunneling |= ICE_TXD_CTX_GRE_TUNNELING;
break;
default:
union ice_tx_offload tx_offload)
{
/* Set MACLEN */
- if (ol_flags & PKT_TX_TUNNEL_MASK)
+ if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
*td_offset |= (tx_offload.outer_l2_len >> 1)
<< ICE_TX_DESC_LEN_MACLEN_S;
else
<< ICE_TX_DESC_LEN_MACLEN_S;
/* Enable L3 checksum offloads */
- if (ol_flags & PKT_TX_IP_CKSUM) {
+ if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
*td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
*td_offset |= (tx_offload.l3_len >> 2) <<
ICE_TX_DESC_LEN_IPLEN_S;
- } else if (ol_flags & PKT_TX_IPV4) {
+ } else if (ol_flags & RTE_MBUF_F_TX_IPV4) {
*td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
*td_offset |= (tx_offload.l3_len >> 2) <<
ICE_TX_DESC_LEN_IPLEN_S;
- } else if (ol_flags & PKT_TX_IPV6) {
+ } else if (ol_flags & RTE_MBUF_F_TX_IPV6) {
*td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
*td_offset |= (tx_offload.l3_len >> 2) <<
ICE_TX_DESC_LEN_IPLEN_S;
}
- if (ol_flags & PKT_TX_TCP_SEG) {
+ if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
*td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
*td_offset |= (tx_offload.l4_len >> 2) <<
ICE_TX_DESC_LEN_L4_LEN_S;
}
/* Enable L4 checksum offloads */
- switch (ol_flags & PKT_TX_L4_MASK) {
- case PKT_TX_TCP_CKSUM:
+ switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
+ case RTE_MBUF_F_TX_TCP_CKSUM:
*td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
*td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
ICE_TX_DESC_LEN_L4_LEN_S;
break;
- case PKT_TX_SCTP_CKSUM:
+ case RTE_MBUF_F_TX_SCTP_CKSUM:
*td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
*td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
ICE_TX_DESC_LEN_L4_LEN_S;
break;
- case PKT_TX_UDP_CKSUM:
+ case RTE_MBUF_F_TX_UDP_CKSUM:
*td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
*td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
ICE_TX_DESC_LEN_L4_LEN_S;
static inline uint16_t
ice_calc_context_desc(uint64_t flags)
{
- static uint64_t mask = PKT_TX_TCP_SEG |
- PKT_TX_QINQ |
- PKT_TX_OUTER_IP_CKSUM |
- PKT_TX_TUNNEL_MASK;
+ static uint64_t mask = RTE_MBUF_F_TX_TCP_SEG |
+ RTE_MBUF_F_TX_QINQ |
+ RTE_MBUF_F_TX_OUTER_IP_CKSUM |
+ RTE_MBUF_F_TX_TUNNEL_MASK;
return (flags & mask) ? 1 : 0;
}
}
hdr_len = tx_offload.l2_len + tx_offload.l3_len + tx_offload.l4_len;
- hdr_len += (mbuf->ol_flags & PKT_TX_TUNNEL_MASK) ?
+ hdr_len += (mbuf->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ?
tx_offload.outer_l2_len + tx_offload.outer_l3_len : 0;
cd_cmd = ICE_TX_CTX_DESC_TSO;
* the mbuf data size exceeds max data size that hw allows
* per tx desc.
*/
- if (ol_flags & PKT_TX_TCP_SEG)
+ if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
nb_used = (uint16_t)(ice_calc_pkt_desc(tx_pkt) +
nb_ctx);
else
}
/* Descriptor based VLAN insertion */
- if (ol_flags & (PKT_TX_VLAN | PKT_TX_QINQ)) {
+ if (ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) {
td_cmd |= ICE_TX_DESC_CMD_IL2TAG1;
td_tag = tx_pkt->vlan_tci;
}
/* Fill in tunneling parameters if necessary */
cd_tunneling_params = 0;
- if (ol_flags & PKT_TX_TUNNEL_MASK)
+ if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
ice_parse_tunneling_params(ol_flags, tx_offload,
&cd_tunneling_params);
txe->mbuf = NULL;
}
- if (ol_flags & PKT_TX_TCP_SEG)
+ if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
cd_type_cmd_tso_mss |=
ice_set_tso_ctx(tx_pkt, tx_offload);
rte_cpu_to_le_32(cd_tunneling_params);
/* TX context descriptor based double VLAN insert */
- if (ol_flags & PKT_TX_QINQ) {
+ if (ol_flags & RTE_MBUF_F_TX_QINQ) {
cd_l2tag2 = tx_pkt->vlan_tci_outer;
cd_type_cmd_tso_mss |=
((uint64_t)ICE_TX_CTX_DESC_IL2TAG2 <<
slen = m_seg->data_len;
buf_dma_addr = rte_mbuf_data_iova(m_seg);
- while ((ol_flags & PKT_TX_TCP_SEG) &&
+ while ((ol_flags & RTE_MBUF_F_TX_TCP_SEG) &&
unlikely(slen > ICE_MAX_DATA_PER_TXD)) {
txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
txd->cmd_type_offset_bsz =
m = tx_pkts[i];
ol_flags = m->ol_flags;
- if (ol_flags & PKT_TX_TCP_SEG &&
+ if (ol_flags & RTE_MBUF_F_TX_TCP_SEG &&
(m->tso_segsz < ICE_MIN_TSO_MSS ||
m->tso_segsz > ICE_MAX_TSO_MSS ||
m->pkt_len > ICE_MAX_TSO_FRAME_SIZE)) {
ice_flex_rxd_to_fdir_flags_vec_avx2(const __m256i fdir_id0_7)
{
#define FDID_MIS_MAGIC 0xFFFFFFFF
- RTE_BUILD_BUG_ON(PKT_RX_FDIR != (1 << 2));
- RTE_BUILD_BUG_ON(PKT_RX_FDIR_ID != (1 << 13));
- const __m256i pkt_fdir_bit = _mm256_set1_epi32(PKT_RX_FDIR |
- PKT_RX_FDIR_ID);
+ RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_FDIR != (1 << 2));
+ RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_FDIR_ID != (1 << 13));
+ const __m256i pkt_fdir_bit = _mm256_set1_epi32(RTE_MBUF_F_RX_FDIR |
+ RTE_MBUF_F_RX_FDIR_ID);
/* desc->flow_id field == 0xFFFFFFFF means fdir mismatch */
const __m256i fdir_mis_mask = _mm256_set1_epi32(FDID_MIS_MAGIC);
__m256i fdir_mask = _mm256_cmpeq_epi32(fdir_id0_7,
* bits. This gives use the l3_l4 flags.
*/
const __m256i l3_l4_flags_shuf =
- _mm256_set_epi8((PKT_RX_OUTER_L4_CKSUM_BAD >> 20 |
- PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD |
- PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD |
- PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD |
- PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD |
- PKT_RX_IP_CKSUM_GOOD) >> 1,
+ _mm256_set_epi8((RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 |
+ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
+ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
+ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
/**
* second 128-bits
* shift right 20 bits to use the low two bits to indicate
* outer checksum status
* shift right 1 bit to make sure it not exceed 255
*/
- (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD |
- PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD |
- PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD |
- PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD |
- PKT_RX_IP_CKSUM_GOOD) >> 1);
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
+ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
+ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1);
const __m256i cksum_mask =
- _mm256_set1_epi32(PKT_RX_IP_CKSUM_MASK |
- PKT_RX_L4_CKSUM_MASK |
- PKT_RX_OUTER_IP_CKSUM_BAD |
- PKT_RX_OUTER_L4_CKSUM_MASK);
+ _mm256_set1_epi32(RTE_MBUF_F_RX_IP_CKSUM_MASK |
+ RTE_MBUF_F_RX_L4_CKSUM_MASK |
+ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_OUTER_L4_CKSUM_MASK);
/**
* data to be shuffled by result of flag mask, shifted down 12.
* If RSS(bit12)/VLAN(bit13) are set,
const __m256i rss_vlan_flags_shuf = _mm256_set_epi8(0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
- PKT_RX_RSS_HASH | PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
- PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
- PKT_RX_RSS_HASH, 0,
+ RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
+ RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
+ RTE_MBUF_F_RX_RSS_HASH, 0,
/* end up 128-bits */
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
- PKT_RX_RSS_HASH | PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
- PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
- PKT_RX_RSS_HASH, 0);
+ RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
+ RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
+ RTE_MBUF_F_RX_RSS_HASH, 0);
RTE_SET_USED(avx_aligned); /* for 32B descriptors we don't use this */
ice_flex_rxd_to_fdir_flags_vec_avx512(const __m256i fdir_id0_7)
{
#define FDID_MIS_MAGIC 0xFFFFFFFF
- RTE_BUILD_BUG_ON(PKT_RX_FDIR != (1 << 2));
- RTE_BUILD_BUG_ON(PKT_RX_FDIR_ID != (1 << 13));
- const __m256i pkt_fdir_bit = _mm256_set1_epi32(PKT_RX_FDIR |
- PKT_RX_FDIR_ID);
+ RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_FDIR != (1 << 2));
+ RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_FDIR_ID != (1 << 13));
+ const __m256i pkt_fdir_bit = _mm256_set1_epi32(RTE_MBUF_F_RX_FDIR |
+ RTE_MBUF_F_RX_FDIR_ID);
/* desc->flow_id field == 0xFFFFFFFF means fdir mismatch */
const __m256i fdir_mis_mask = _mm256_set1_epi32(FDID_MIS_MAGIC);
__m256i fdir_mask = _mm256_cmpeq_epi32(fdir_id0_7,
* bits. This gives use the l3_l4 flags.
*/
const __m256i l3_l4_flags_shuf =
- _mm256_set_epi8((PKT_RX_OUTER_L4_CKSUM_BAD >> 20 |
- PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD |
- PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD |
- PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD |
- PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD |
- PKT_RX_IP_CKSUM_GOOD) >> 1,
+ _mm256_set_epi8((RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 |
+ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
+ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
+ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
/**
* second 128-bits
* shift right 20 bits to use the low two bits to indicate
* outer checksum status
* shift right 1 bit to make sure it not exceed 255
*/
- (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD |
- PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD |
- PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD |
- PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD |
- PKT_RX_IP_CKSUM_GOOD) >> 1);
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
+ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
+ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1);
const __m256i cksum_mask =
- _mm256_set1_epi32(PKT_RX_IP_CKSUM_MASK |
- PKT_RX_L4_CKSUM_MASK |
- PKT_RX_OUTER_IP_CKSUM_BAD |
- PKT_RX_OUTER_L4_CKSUM_MASK);
+ _mm256_set1_epi32(RTE_MBUF_F_RX_IP_CKSUM_MASK |
+ RTE_MBUF_F_RX_L4_CKSUM_MASK |
+ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_OUTER_L4_CKSUM_MASK);
/**
* data to be shuffled by result of flag mask, shifted down 12.
* If RSS(bit12)/VLAN(bit13) are set,
const __m256i rss_vlan_flags_shuf = _mm256_set_epi8(0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
- PKT_RX_RSS_HASH | PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
- PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
- PKT_RX_RSS_HASH, 0,
+ RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
+ RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
+ RTE_MBUF_F_RX_RSS_HASH, 0,
/* 2nd 128-bits */
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
- PKT_RX_RSS_HASH | PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
- PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
- PKT_RX_RSS_HASH, 0);
+ RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
+ RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
+ RTE_MBUF_F_RX_RSS_HASH, 0);
uint16_t i, received;
ICE_TX_DESC_LEN_MACLEN_S;
/* Enable L3 checksum offload */
- if (ol_flags & PKT_TX_IP_CKSUM) {
+ if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
td_offset |= (tx_pkt->l3_len >> 2) <<
ICE_TX_DESC_LEN_IPLEN_S;
- } else if (ol_flags & PKT_TX_IPV4) {
+ } else if (ol_flags & RTE_MBUF_F_TX_IPV4) {
td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
td_offset |= (tx_pkt->l3_len >> 2) <<
ICE_TX_DESC_LEN_IPLEN_S;
- } else if (ol_flags & PKT_TX_IPV6) {
+ } else if (ol_flags & RTE_MBUF_F_TX_IPV6) {
td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
td_offset |= (tx_pkt->l3_len >> 2) <<
ICE_TX_DESC_LEN_IPLEN_S;
}
/* Enable L4 checksum offloads */
- switch (ol_flags & PKT_TX_L4_MASK) {
- case PKT_TX_TCP_CKSUM:
+ switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
+ case RTE_MBUF_F_TX_TCP_CKSUM:
td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
ICE_TX_DESC_LEN_L4_LEN_S;
break;
- case PKT_TX_SCTP_CKSUM:
+ case RTE_MBUF_F_TX_SCTP_CKSUM:
td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
ICE_TX_DESC_LEN_L4_LEN_S;
break;
- case PKT_TX_UDP_CKSUM:
+ case RTE_MBUF_F_TX_UDP_CKSUM:
td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
ICE_TX_DESC_LEN_L4_LEN_S;
*txd_hi |= ((uint64_t)td_offset) << ICE_TXD_QW1_OFFSET_S;
/* Tx VLAN/QINQ insertion Offload */
- if (ol_flags & (PKT_TX_VLAN | PKT_TX_QINQ)) {
+ if (ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) {
td_cmd |= ICE_TX_DESC_CMD_IL2TAG1;
*txd_hi |= ((uint64_t)tx_pkt->vlan_tci <<
ICE_TXD_QW1_L2TAG1_S);
ice_flex_rxd_to_fdir_flags_vec(const __m128i fdir_id0_3)
{
#define FDID_MIS_MAGIC 0xFFFFFFFF
- RTE_BUILD_BUG_ON(PKT_RX_FDIR != (1 << 2));
- RTE_BUILD_BUG_ON(PKT_RX_FDIR_ID != (1 << 13));
- const __m128i pkt_fdir_bit = _mm_set1_epi32(PKT_RX_FDIR |
- PKT_RX_FDIR_ID);
+ RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_FDIR != (1 << 2));
+ RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_FDIR_ID != (1 << 13));
+ const __m128i pkt_fdir_bit = _mm_set1_epi32(RTE_MBUF_F_RX_FDIR |
+ RTE_MBUF_F_RX_FDIR_ID);
/* desc->flow_id field == 0xFFFFFFFF means fdir mismatch */
const __m128i fdir_mis_mask = _mm_set1_epi32(FDID_MIS_MAGIC);
__m128i fdir_mask = _mm_cmpeq_epi32(fdir_id0_3,
*/
const __m128i desc_mask = _mm_set_epi32(0x30f0, 0x30f0,
0x30f0, 0x30f0);
- const __m128i cksum_mask = _mm_set_epi32(PKT_RX_IP_CKSUM_MASK |
- PKT_RX_L4_CKSUM_MASK |
- PKT_RX_OUTER_L4_CKSUM_MASK |
- PKT_RX_OUTER_IP_CKSUM_BAD,
- PKT_RX_IP_CKSUM_MASK |
- PKT_RX_L4_CKSUM_MASK |
- PKT_RX_OUTER_L4_CKSUM_MASK |
- PKT_RX_OUTER_IP_CKSUM_BAD,
- PKT_RX_IP_CKSUM_MASK |
- PKT_RX_L4_CKSUM_MASK |
- PKT_RX_OUTER_L4_CKSUM_MASK |
- PKT_RX_OUTER_IP_CKSUM_BAD,
- PKT_RX_IP_CKSUM_MASK |
- PKT_RX_L4_CKSUM_MASK |
- PKT_RX_OUTER_L4_CKSUM_MASK |
- PKT_RX_OUTER_IP_CKSUM_BAD);
+ const __m128i cksum_mask = _mm_set_epi32(RTE_MBUF_F_RX_IP_CKSUM_MASK |
+ RTE_MBUF_F_RX_L4_CKSUM_MASK |
+ RTE_MBUF_F_RX_OUTER_L4_CKSUM_MASK |
+ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD,
+ RTE_MBUF_F_RX_IP_CKSUM_MASK |
+ RTE_MBUF_F_RX_L4_CKSUM_MASK |
+ RTE_MBUF_F_RX_OUTER_L4_CKSUM_MASK |
+ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD,
+ RTE_MBUF_F_RX_IP_CKSUM_MASK |
+ RTE_MBUF_F_RX_L4_CKSUM_MASK |
+ RTE_MBUF_F_RX_OUTER_L4_CKSUM_MASK |
+ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD,
+ RTE_MBUF_F_RX_IP_CKSUM_MASK |
+ RTE_MBUF_F_RX_L4_CKSUM_MASK |
+ RTE_MBUF_F_RX_OUTER_L4_CKSUM_MASK |
+ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD);
/* map the checksum, rss and vlan fields to the checksum, rss
* and vlan flag
*/
const __m128i cksum_flags =
- _mm_set_epi8((PKT_RX_OUTER_L4_CKSUM_BAD >> 20 |
- PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD |
- PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD |
- PKT_RX_IP_CKSUM_GOOD) >> 1,
+ _mm_set_epi8((RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 |
+ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
+ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
/**
* shift right 20 bits to use the low two bits to indicate
* outer checksum status
* shift right 1 bit to make sure it not exceed 255
*/
- (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD |
- PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD |
- PKT_RX_IP_CKSUM_GOOD) >> 1);
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
+ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1);
const __m128i rss_vlan_flags = _mm_set_epi8(0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
- PKT_RX_RSS_HASH | PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
- PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
- PKT_RX_RSS_HASH, 0);
+ RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
+ RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
+ RTE_MBUF_F_RX_RSS_HASH, 0);
/* merge 4 descriptors */
flags = _mm_unpackhi_epi32(descs[0], descs[1]);
#define IGC_TSO_MAX_MSS 9216
/* Bit Mask to indicate what bits required for building TX context */
-#define IGC_TX_OFFLOAD_MASK ( \
- PKT_TX_OUTER_IPV4 | \
- PKT_TX_IPV6 | \
- PKT_TX_IPV4 | \
- PKT_TX_VLAN_PKT | \
- PKT_TX_IP_CKSUM | \
- PKT_TX_L4_MASK | \
- PKT_TX_TCP_SEG | \
- PKT_TX_UDP_SEG)
-
-#define IGC_TX_OFFLOAD_SEG (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG)
+#define IGC_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_OUTER_IPV4 | \
+ RTE_MBUF_F_TX_IPV6 | \
+ RTE_MBUF_F_TX_IPV4 | \
+ RTE_MBUF_F_TX_VLAN_PKT | \
+ RTE_MBUF_F_TX_IP_CKSUM | \
+ RTE_MBUF_F_TX_L4_MASK | \
+ RTE_MBUF_F_TX_TCP_SEG | \
+ RTE_MBUF_F_TX_UDP_SEG)
+
+#define IGC_TX_OFFLOAD_SEG (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG)
#define IGC_ADVTXD_POPTS_TXSM 0x00000200 /* L4 Checksum offload request */
#define IGC_ADVTXD_POPTS_IXSM 0x00000100 /* IP Checksum offload request */
/* L4 Packet TYPE of Reserved */
#define IGC_ADVTXD_TUCMD_L4T_RSV 0x00001800
-#define IGC_TX_OFFLOAD_NOTSUP_MASK (PKT_TX_OFFLOAD_MASK ^ IGC_TX_OFFLOAD_MASK)
+#define IGC_TX_OFFLOAD_NOTSUP_MASK (RTE_MBUF_F_TX_OFFLOAD_MASK ^ IGC_TX_OFFLOAD_MASK)
/**
* Structure associated with each descriptor of the RX ring of a RX queue.
static inline uint64_t
rx_desc_statuserr_to_pkt_flags(uint32_t statuserr)
{
- static uint64_t l4_chksum_flags[] = {0, 0, PKT_RX_L4_CKSUM_GOOD,
- PKT_RX_L4_CKSUM_BAD};
+ static uint64_t l4_chksum_flags[] = {0, 0,
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD,
+ RTE_MBUF_F_RX_L4_CKSUM_BAD};
- static uint64_t l3_chksum_flags[] = {0, 0, PKT_RX_IP_CKSUM_GOOD,
- PKT_RX_IP_CKSUM_BAD};
+ static uint64_t l3_chksum_flags[] = {0, 0,
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD,
+ RTE_MBUF_F_RX_IP_CKSUM_BAD};
uint64_t pkt_flags = 0;
uint32_t tmp;
if (statuserr & IGC_RXD_STAT_VP)
- pkt_flags |= PKT_RX_VLAN_STRIPPED;
+ pkt_flags |= RTE_MBUF_F_RX_VLAN_STRIPPED;
tmp = !!(statuserr & (IGC_RXD_STAT_L4CS | IGC_RXD_STAT_UDPCS));
tmp = (tmp << 1) | (uint32_t)!!(statuserr & IGC_RXD_EXT_ERR_L4E);
rxm->vlan_tci = rte_le_to_cpu_16(rxd->wb.upper.vlan);
pkt_flags = (hlen_type_rss & IGC_RXD_RSS_TYPE_MASK) ?
- PKT_RX_RSS_HASH : 0;
+ RTE_MBUF_F_RX_RSS_HASH : 0;
if (hlen_type_rss & IGC_RXD_VPKT)
- pkt_flags |= PKT_RX_VLAN;
+ pkt_flags |= RTE_MBUF_F_RX_VLAN;
pkt_flags |= rx_desc_statuserr_to_pkt_flags(staterr);
if (ol_para.tso_segsz > IGC_TSO_MAX_MSS || ol_para.l2_len +
ol_para.l3_len + ol_para.l4_len > IGC_TSO_MAX_HDRLEN) {
ol_req &= ~IGC_TX_OFFLOAD_SEG;
- ol_req |= PKT_TX_TCP_CKSUM;
+ ol_req |= RTE_MBUF_F_TX_TCP_CKSUM;
}
return ol_req;
}
/* Specify which HW CTX to upload. */
mss_l4len_idx = (ctx_curr << IGC_ADVTXD_IDX_SHIFT);
- if (ol_flags & PKT_TX_VLAN_PKT)
+ if (ol_flags & RTE_MBUF_F_TX_VLAN_PKT)
tx_offload_mask.vlan_tci = 0xffff;
/* check if TCP segmentation required for this packet */
if (ol_flags & IGC_TX_OFFLOAD_SEG) {
/* implies IP cksum in IPv4 */
- if (ol_flags & PKT_TX_IP_CKSUM)
+ if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
type_tucmd_mlhl = IGC_ADVTXD_TUCMD_IPV4 |
IGC_ADVTXD_DTYP_CTXT | IGC_ADVTXD_DCMD_DEXT;
else
type_tucmd_mlhl = IGC_ADVTXD_TUCMD_IPV6 |
IGC_ADVTXD_DTYP_CTXT | IGC_ADVTXD_DCMD_DEXT;
- if (ol_flags & PKT_TX_TCP_SEG)
+ if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_TCP;
else
type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_UDP;
mss_l4len_idx |= (uint32_t)tx_offload.l4_len <<
IGC_ADVTXD_L4LEN_SHIFT;
} else { /* no TSO, check if hardware checksum is needed */
- if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK))
+ if (ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_L4_MASK))
tx_offload_mask.data |= TX_MACIP_LEN_CMP_MASK;
- if (ol_flags & PKT_TX_IP_CKSUM)
+ if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
type_tucmd_mlhl = IGC_ADVTXD_TUCMD_IPV4;
- switch (ol_flags & PKT_TX_L4_MASK) {
- case PKT_TX_TCP_CKSUM:
+ switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
+ case RTE_MBUF_F_TX_TCP_CKSUM:
type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_TCP |
IGC_ADVTXD_DTYP_CTXT | IGC_ADVTXD_DCMD_DEXT;
mss_l4len_idx |= (uint32_t)sizeof(struct rte_tcp_hdr)
<< IGC_ADVTXD_L4LEN_SHIFT;
break;
- case PKT_TX_UDP_CKSUM:
+ case RTE_MBUF_F_TX_UDP_CKSUM:
type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_UDP |
IGC_ADVTXD_DTYP_CTXT | IGC_ADVTXD_DCMD_DEXT;
mss_l4len_idx |= (uint32_t)sizeof(struct rte_udp_hdr)
<< IGC_ADVTXD_L4LEN_SHIFT;
break;
- case PKT_TX_SCTP_CKSUM:
+ case RTE_MBUF_F_TX_SCTP_CKSUM:
type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_SCTP |
IGC_ADVTXD_DTYP_CTXT | IGC_ADVTXD_DCMD_DEXT;
mss_l4len_idx |= (uint32_t)sizeof(struct rte_sctp_hdr)
uint32_t cmdtype;
static uint32_t vlan_cmd[2] = {0, IGC_ADVTXD_DCMD_VLE};
static uint32_t tso_cmd[2] = {0, IGC_ADVTXD_DCMD_TSE};
- cmdtype = vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
+ cmdtype = vlan_cmd[(ol_flags & RTE_MBUF_F_TX_VLAN_PKT) != 0];
cmdtype |= tso_cmd[(ol_flags & IGC_TX_OFFLOAD_SEG) != 0];
return cmdtype;
}
static const uint32_t l3_olinfo[2] = {0, IGC_ADVTXD_POPTS_IXSM};
uint32_t tmp;
- tmp = l4_olinfo[(ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM];
- tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
+ tmp = l4_olinfo[(ol_flags & RTE_MBUF_F_TX_L4_MASK) != RTE_MBUF_F_TX_L4_NO_CKSUM];
+ tmp |= l3_olinfo[(ol_flags & RTE_MBUF_F_TX_IP_CKSUM) != 0];
tmp |= l4_olinfo[(ol_flags & IGC_TX_OFFLOAD_SEG) != 0];
return tmp;
}
* Timer 0 should be used to for packet timestamping,
* sample the packet timestamp to reg 0
*/
- if (ol_flags & PKT_TX_IEEE1588_TMST)
+ if (ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST)
cmd_type_len |= IGC_ADVTXD_MAC_TSTAMP;
if (tx_ol_req) {
struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *)
(l3_hdr + txm->l3_len);
- if (txm->ol_flags & PKT_TX_IP_CKSUM) {
+ if (txm->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr;
ipv4_hdr->hdr_checksum = 0;
tcp_hdr->cksum = 0;
struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *)
(l3_hdr + txm->l3_len);
- if (txm->ol_flags & PKT_TX_IPV4) {
+ if (txm->ol_flags & RTE_MBUF_F_TX_IPV4) {
struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr;
ipv4_hdr->hdr_checksum = 0;
tcp_hdr->cksum = 0;
uint32_t offset = 0;
bool start, done;
bool encap;
- bool has_vlan = !!(txm->ol_flags & PKT_TX_VLAN_PKT);
+ bool has_vlan = !!(txm->ol_flags & RTE_MBUF_F_TX_VLAN_PKT);
uint16_t vlan_tci = txm->vlan_tci;
uint64_t ol_flags = txm->ol_flags;
- encap = ((ol_flags & PKT_TX_OUTER_IP_CKSUM) ||
- (ol_flags & PKT_TX_OUTER_UDP_CKSUM)) &&
- ((ol_flags & PKT_TX_OUTER_IPV4) ||
- (ol_flags & PKT_TX_OUTER_IPV6));
+ encap = ((ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) ||
+ (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) &&
+ ((ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) ||
+ (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6));
/* Preload inner-most TCP csum field with IP pseudo hdr
* calculated with IP length set to zero. HW will later
desc = &desc_base[q->head_idx];
info = IONIC_INFO_PTR(q, q->head_idx);
- if ((ol_flags & PKT_TX_IP_CKSUM) &&
+ if ((ol_flags & RTE_MBUF_F_TX_IP_CKSUM) &&
(txq->flags & IONIC_QCQ_F_CSUM_L3)) {
opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW;
flags |= IONIC_TXQ_DESC_FLAG_CSUM_L3;
}
- if (((ol_flags & PKT_TX_TCP_CKSUM) &&
+ if (((ol_flags & RTE_MBUF_F_TX_TCP_CKSUM) &&
(txq->flags & IONIC_QCQ_F_CSUM_TCP)) ||
- ((ol_flags & PKT_TX_UDP_CKSUM) &&
+ ((ol_flags & RTE_MBUF_F_TX_UDP_CKSUM) &&
(txq->flags & IONIC_QCQ_F_CSUM_UDP))) {
opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW;
flags |= IONIC_TXQ_DESC_FLAG_CSUM_L4;
if (opcode == IONIC_TXQ_DESC_OPCODE_CSUM_NONE)
stats->no_csum++;
- has_vlan = (ol_flags & PKT_TX_VLAN_PKT);
- encap = ((ol_flags & PKT_TX_OUTER_IP_CKSUM) ||
- (ol_flags & PKT_TX_OUTER_UDP_CKSUM)) &&
- ((ol_flags & PKT_TX_OUTER_IPV4) ||
- (ol_flags & PKT_TX_OUTER_IPV6));
+ has_vlan = (ol_flags & RTE_MBUF_F_TX_VLAN_PKT);
+ encap = ((ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) ||
+ (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) &&
+ ((ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) ||
+ (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6));
flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
rte_prefetch0(&q->info[next_q_head_idx]);
}
- if (tx_pkts[nb_tx]->ol_flags & PKT_TX_TCP_SEG)
+ if (tx_pkts[nb_tx]->ol_flags & RTE_MBUF_F_TX_TCP_SEG)
err = ionic_tx_tso(txq, tx_pkts[nb_tx]);
else
err = ionic_tx(txq, tx_pkts[nb_tx]);
*
**********************************************************************/
-#define IONIC_TX_OFFLOAD_MASK ( \
- PKT_TX_IPV4 | \
- PKT_TX_IPV6 | \
- PKT_TX_VLAN | \
- PKT_TX_IP_CKSUM | \
- PKT_TX_TCP_SEG | \
- PKT_TX_L4_MASK)
+#define IONIC_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_IPV4 | \
+ RTE_MBUF_F_TX_IPV6 | \
+ RTE_MBUF_F_TX_VLAN | \
+ RTE_MBUF_F_TX_IP_CKSUM | \
+ RTE_MBUF_F_TX_TCP_SEG | \
+ RTE_MBUF_F_TX_L4_MASK)
#define IONIC_TX_OFFLOAD_NOTSUP_MASK \
- (PKT_TX_OFFLOAD_MASK ^ IONIC_TX_OFFLOAD_MASK)
+ (RTE_MBUF_F_TX_OFFLOAD_MASK ^ IONIC_TX_OFFLOAD_MASK)
uint16_t
ionic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
}
/* RSS */
- pkt_flags |= PKT_RX_RSS_HASH;
+ pkt_flags |= RTE_MBUF_F_RX_RSS_HASH;
rxm->hash.rss = cq_desc->rss_hash;
/* Vlan Strip */
if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN) {
- pkt_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+ pkt_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
rxm->vlan_tci = cq_desc->vlan_tci;
}
/* Checksum */
if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) {
if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_OK)
- pkt_flags |= PKT_RX_IP_CKSUM_GOOD;
+ pkt_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
else if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD)
- pkt_flags |= PKT_RX_IP_CKSUM_BAD;
+ pkt_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
if ((cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_TCP_OK) ||
(cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_UDP_OK))
- pkt_flags |= PKT_RX_L4_CKSUM_GOOD;
+ pkt_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
else if ((cq_desc->csum_flags &
IONIC_RXQ_COMP_CSUM_F_TCP_BAD) ||
(cq_desc->csum_flags &
IONIC_RXQ_COMP_CSUM_F_UDP_BAD))
- pkt_flags |= PKT_RX_L4_CKSUM_BAD;
+ pkt_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
}
rxm->ol_flags = pkt_flags;
rxq = dev->data->rx_queues[queue];
if (on) {
- rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+ rxq->vlan_flags = RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
} else {
- rxq->vlan_flags = PKT_RX_VLAN;
+ rxq->vlan_flags = RTE_MBUF_F_RX_VLAN;
rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
}
}
#include "ixgbe_rxtx.h"
#ifdef RTE_LIBRTE_IEEE1588
-#define IXGBE_TX_IEEE1588_TMST PKT_TX_IEEE1588_TMST
+#define IXGBE_TX_IEEE1588_TMST RTE_MBUF_F_TX_IEEE1588_TMST
#else
#define IXGBE_TX_IEEE1588_TMST 0
#endif
/* Bit Mask to indicate what bits required for building TX context */
-#define IXGBE_TX_OFFLOAD_MASK ( \
- PKT_TX_OUTER_IPV6 | \
- PKT_TX_OUTER_IPV4 | \
- PKT_TX_IPV6 | \
- PKT_TX_IPV4 | \
- PKT_TX_VLAN_PKT | \
- PKT_TX_IP_CKSUM | \
- PKT_TX_L4_MASK | \
- PKT_TX_TCP_SEG | \
- PKT_TX_MACSEC | \
- PKT_TX_OUTER_IP_CKSUM | \
- PKT_TX_SEC_OFFLOAD | \
+#define IXGBE_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_OUTER_IPV6 | \
+ RTE_MBUF_F_TX_OUTER_IPV4 | \
+ RTE_MBUF_F_TX_IPV6 | \
+ RTE_MBUF_F_TX_IPV4 | \
+ RTE_MBUF_F_TX_VLAN_PKT | \
+ RTE_MBUF_F_TX_IP_CKSUM | \
+ RTE_MBUF_F_TX_L4_MASK | \
+ RTE_MBUF_F_TX_TCP_SEG | \
+ RTE_MBUF_F_TX_MACSEC | \
+ RTE_MBUF_F_TX_OUTER_IP_CKSUM | \
+ RTE_MBUF_F_TX_SEC_OFFLOAD | \
IXGBE_TX_IEEE1588_TMST)
#define IXGBE_TX_OFFLOAD_NOTSUP_MASK \
- (PKT_TX_OFFLOAD_MASK ^ IXGBE_TX_OFFLOAD_MASK)
+ (RTE_MBUF_F_TX_OFFLOAD_MASK ^ IXGBE_TX_OFFLOAD_MASK)
#if 1
#define RTE_PMD_USE_PREFETCH
/* Specify which HW CTX to upload. */
mss_l4len_idx |= (ctx_idx << IXGBE_ADVTXD_IDX_SHIFT);
- if (ol_flags & PKT_TX_VLAN_PKT) {
+ if (ol_flags & RTE_MBUF_F_TX_VLAN_PKT) {
tx_offload_mask.vlan_tci |= ~0;
}
/* check if TCP segmentation required for this packet */
- if (ol_flags & PKT_TX_TCP_SEG) {
+ if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
/* implies IP cksum in IPv4 */
- if (ol_flags & PKT_TX_IP_CKSUM)
+ if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4 |
IXGBE_ADVTXD_TUCMD_L4T_TCP |
IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
mss_l4len_idx |= tx_offload.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT;
mss_l4len_idx |= tx_offload.l4_len << IXGBE_ADVTXD_L4LEN_SHIFT;
} else { /* no TSO, check if hardware checksum is needed */
- if (ol_flags & PKT_TX_IP_CKSUM) {
+ if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4;
tx_offload_mask.l2_len |= ~0;
tx_offload_mask.l3_len |= ~0;
}
- switch (ol_flags & PKT_TX_L4_MASK) {
- case PKT_TX_UDP_CKSUM:
+ switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
+ case RTE_MBUF_F_TX_UDP_CKSUM:
type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP |
IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
mss_l4len_idx |= sizeof(struct rte_udp_hdr)
tx_offload_mask.l2_len |= ~0;
tx_offload_mask.l3_len |= ~0;
break;
- case PKT_TX_TCP_CKSUM:
+ case RTE_MBUF_F_TX_TCP_CKSUM:
type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP |
IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
mss_l4len_idx |= sizeof(struct rte_tcp_hdr)
tx_offload_mask.l2_len |= ~0;
tx_offload_mask.l3_len |= ~0;
break;
- case PKT_TX_SCTP_CKSUM:
+ case RTE_MBUF_F_TX_SCTP_CKSUM:
type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP |
IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
mss_l4len_idx |= sizeof(struct rte_sctp_hdr)
}
}
- if (ol_flags & PKT_TX_OUTER_IP_CKSUM) {
+ if (ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) {
tx_offload_mask.outer_l2_len |= ~0;
tx_offload_mask.outer_l3_len |= ~0;
tx_offload_mask.l2_len |= ~0;
<< IXGBE_ADVTXD_TUNNEL_LEN;
}
#ifdef RTE_LIB_SECURITY
- if (ol_flags & PKT_TX_SEC_OFFLOAD) {
+ if (ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
union ixgbe_crypto_tx_desc_md *md =
(union ixgbe_crypto_tx_desc_md *)mdata;
seqnum_seed |=
ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
vlan_macip_lens = tx_offload.l3_len;
- if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
+ if (ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM)
vlan_macip_lens |= (tx_offload.outer_l2_len <<
IXGBE_ADVTXD_MACLEN_SHIFT);
else
{
uint32_t tmp = 0;
- if ((ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM)
+ if ((ol_flags & RTE_MBUF_F_TX_L4_MASK) != RTE_MBUF_F_TX_L4_NO_CKSUM)
tmp |= IXGBE_ADVTXD_POPTS_TXSM;
- if (ol_flags & PKT_TX_IP_CKSUM)
+ if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
tmp |= IXGBE_ADVTXD_POPTS_IXSM;
- if (ol_flags & PKT_TX_TCP_SEG)
+ if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
tmp |= IXGBE_ADVTXD_POPTS_TXSM;
return tmp;
}
{
uint32_t cmdtype = 0;
- if (ol_flags & PKT_TX_VLAN_PKT)
+ if (ol_flags & RTE_MBUF_F_TX_VLAN_PKT)
cmdtype |= IXGBE_ADVTXD_DCMD_VLE;
- if (ol_flags & PKT_TX_TCP_SEG)
+ if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
cmdtype |= IXGBE_ADVTXD_DCMD_TSE;
- if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
+ if (ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM)
cmdtype |= (1 << IXGBE_ADVTXD_OUTERIPCS_SHIFT);
- if (ol_flags & PKT_TX_MACSEC)
+ if (ol_flags & RTE_MBUF_F_TX_MACSEC)
cmdtype |= IXGBE_ADVTXD_MAC_LINKSEC;
return cmdtype;
}
*/
ol_flags = tx_pkt->ol_flags;
#ifdef RTE_LIB_SECURITY
- use_ipsec = txq->using_ipsec && (ol_flags & PKT_TX_SEC_OFFLOAD);
+ use_ipsec = txq->using_ipsec && (ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD);
#endif
/* If hardware offload required */
IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
#ifdef RTE_LIBRTE_IEEE1588
- if (ol_flags & PKT_TX_IEEE1588_TMST)
+ if (ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST)
cmd_type_len |= IXGBE_ADVTXD_MAC_1588;
#endif
olinfo_status = 0;
if (tx_ol_req) {
- if (ol_flags & PKT_TX_TCP_SEG) {
+ if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
/* when TSO is on, paylen in descriptor is the
* not the packet len but the tcp payload len */
pkt_len -= (tx_offload.l2_len +
ixgbe_rxd_pkt_info_to_pkt_flags(uint16_t pkt_info)
{
static uint64_t ip_rss_types_map[16] __rte_cache_aligned = {
- 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
- 0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH,
- PKT_RX_RSS_HASH, 0, 0, 0,
- 0, 0, 0, PKT_RX_FDIR,
+ 0, RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH,
+ 0, RTE_MBUF_F_RX_RSS_HASH, 0, RTE_MBUF_F_RX_RSS_HASH,
+ RTE_MBUF_F_RX_RSS_HASH, 0, 0, 0,
+ 0, 0, 0, RTE_MBUF_F_RX_FDIR,
};
#ifdef RTE_LIBRTE_IEEE1588
static uint64_t ip_pkt_etqf_map[8] = {
- 0, 0, 0, PKT_RX_IEEE1588_PTP,
+ 0, 0, 0, RTE_MBUF_F_RX_IEEE1588_PTP,
0, 0, 0, 0,
};
#ifdef RTE_LIBRTE_IEEE1588
if (rx_status & IXGBE_RXD_STAT_TMST)
- pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
+ pkt_flags = pkt_flags | RTE_MBUF_F_RX_IEEE1588_TMST;
#endif
return pkt_flags;
}
* Bit 30: L4I, L4I integrity error
*/
static uint64_t error_to_pkt_flags_map[4] = {
- PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD,
- PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
- PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD,
- PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD,
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD,
+ RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD,
+ RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD
};
pkt_flags = error_to_pkt_flags_map[(rx_status >>
IXGBE_RXDADV_ERR_CKSUM_BIT) & IXGBE_RXDADV_ERR_CKSUM_MSK];
if ((rx_status & IXGBE_RXDADV_ERR_TCPE) &&
(pkt_info & IXGBE_RXDADV_PKTTYPE_UDP) &&
rx_udp_csum_zero_err)
- pkt_flags &= ~PKT_RX_L4_CKSUM_BAD;
+ pkt_flags &= ~RTE_MBUF_F_RX_L4_CKSUM_BAD;
if ((rx_status & IXGBE_RXD_STAT_OUTERIPCS) &&
(rx_status & IXGBE_RXDADV_ERR_OUTERIPER)) {
- pkt_flags |= PKT_RX_OUTER_IP_CKSUM_BAD;
+ pkt_flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
}
#ifdef RTE_LIB_SECURITY
if (rx_status & IXGBE_RXD_STAT_SECP) {
- pkt_flags |= PKT_RX_SEC_OFFLOAD;
+ pkt_flags |= RTE_MBUF_F_RX_SEC_OFFLOAD;
if (rx_status & IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG)
- pkt_flags |= PKT_RX_SEC_OFFLOAD_FAILED;
+ pkt_flags |= RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED;
}
#endif
ixgbe_rxd_pkt_info_to_pkt_type
(pkt_info[j], rxq->pkt_type_mask);
- if (likely(pkt_flags & PKT_RX_RSS_HASH))
+ if (likely(pkt_flags & RTE_MBUF_F_RX_RSS_HASH))
mb->hash.rss = rte_le_to_cpu_32(
rxdp[j].wb.lower.hi_dword.rss);
- else if (pkt_flags & PKT_RX_FDIR) {
+ else if (pkt_flags & RTE_MBUF_F_RX_FDIR) {
mb->hash.fdir.hash = rte_le_to_cpu_16(
rxdp[j].wb.lower.hi_dword.csum_ip.csum) &
IXGBE_ATR_HASH_MASK;
rxm->port = rxq->port_id;
pkt_info = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
- /* Only valid if PKT_RX_VLAN set in pkt_flags */
+ /* Only valid if RTE_MBUF_F_RX_VLAN set in pkt_flags */
rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
pkt_flags = rx_desc_status_to_pkt_flags(staterr, vlan_flags);
ixgbe_rxd_pkt_info_to_pkt_type(pkt_info,
rxq->pkt_type_mask);
- if (likely(pkt_flags & PKT_RX_RSS_HASH))
+ if (likely(pkt_flags & RTE_MBUF_F_RX_RSS_HASH))
rxm->hash.rss = rte_le_to_cpu_32(
rxd.wb.lower.hi_dword.rss);
- else if (pkt_flags & PKT_RX_FDIR) {
+ else if (pkt_flags & RTE_MBUF_F_RX_FDIR) {
rxm->hash.fdir.hash = rte_le_to_cpu_16(
rxd.wb.lower.hi_dword.csum_ip.csum) &
IXGBE_ATR_HASH_MASK;
head->port = rxq->port_id;
- /* The vlan_tci field is only valid when PKT_RX_VLAN is
+ /* The vlan_tci field is only valid when RTE_MBUF_F_RX_VLAN is
* set in the pkt_flags field.
*/
head->vlan_tci = rte_le_to_cpu_16(desc->wb.upper.vlan);
head->packet_type =
ixgbe_rxd_pkt_info_to_pkt_type(pkt_info, rxq->pkt_type_mask);
- if (likely(pkt_flags & PKT_RX_RSS_HASH))
+ if (likely(pkt_flags & RTE_MBUF_F_RX_RSS_HASH))
head->hash.rss = rte_le_to_cpu_32(desc->wb.lower.hi_dword.rss);
- else if (pkt_flags & PKT_RX_FDIR) {
+ else if (pkt_flags & RTE_MBUF_F_RX_FDIR) {
head->hash.fdir.hash =
rte_le_to_cpu_16(desc->wb.lower.hi_dword.csum_ip.csum)
& IXGBE_ATR_HASH_MASK;
0x00, 0x00, 0x00, 0x00};
const uint8x16_t rss_flags = {
- 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
- 0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH,
- PKT_RX_RSS_HASH, 0, 0, 0,
- 0, 0, 0, PKT_RX_FDIR};
+ 0, RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH,
+ RTE_MBUF_F_RX_RSS_HASH,
+ 0, RTE_MBUF_F_RX_RSS_HASH, 0, RTE_MBUF_F_RX_RSS_HASH,
+ RTE_MBUF_F_RX_RSS_HASH, 0, 0, 0,
+ 0, 0, 0, RTE_MBUF_F_RX_FDIR};
/* mask everything except vlan present and l4/ip csum error */
const uint8x16_t vlan_csum_msk = {
/* map vlan present (0x8), IPE (0x2), L4E (0x1) to ol_flags */
const uint8x16_t vlan_csum_map_lo = {
- PKT_RX_IP_CKSUM_GOOD,
- PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
- PKT_RX_IP_CKSUM_BAD,
- PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD,
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD,
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD,
+ RTE_MBUF_F_RX_IP_CKSUM_BAD,
+ RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD,
0, 0, 0, 0,
- vlan_flags | PKT_RX_IP_CKSUM_GOOD,
- vlan_flags | PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
- vlan_flags | PKT_RX_IP_CKSUM_BAD,
- vlan_flags | PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD,
+ vlan_flags | RTE_MBUF_F_RX_IP_CKSUM_GOOD,
+ vlan_flags | RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD,
+ vlan_flags | RTE_MBUF_F_RX_IP_CKSUM_BAD,
+ vlan_flags | RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD,
0, 0, 0, 0};
const uint8x16_t vlan_csum_map_hi = {
- PKT_RX_L4_CKSUM_GOOD >> sizeof(uint8_t), 0,
- PKT_RX_L4_CKSUM_GOOD >> sizeof(uint8_t), 0,
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD >> sizeof(uint8_t), 0,
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD >> sizeof(uint8_t), 0,
0, 0, 0, 0,
- PKT_RX_L4_CKSUM_GOOD >> sizeof(uint8_t), 0,
- PKT_RX_L4_CKSUM_GOOD >> sizeof(uint8_t), 0,
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD >> sizeof(uint8_t), 0,
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD >> sizeof(uint8_t), 0,
0, 0, 0, 0};
/* change mask from 0x200(IXGBE_RXDADV_PKTTYPE_UDP) to 0x2 */
0, 0, 0, 0};
const uint8x16_t udp_csum_bad_shuf = {
- 0xFF, ~(uint8_t)PKT_RX_L4_CKSUM_BAD, 0, 0,
+ 0xFF, ~(uint8_t)RTE_MBUF_F_RX_L4_CKSUM_BAD, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0};
vtag_lo = vorrq_u8(ptype, vtag_lo);
/* convert the UDP header present 0x2 to 0x1 for aligning with each
- * PKT_RX_L4_CKSUM_BAD value in low byte of 8 bits word ol_flag in
+ * RTE_MBUF_F_RX_L4_CKSUM_BAD value in low byte of 8 bits word ol_flag in
* vtag_lo (4x8). Then mask out the bad checksum value by shuffle and
* bit-mask.
*/
sw_ring = &rxq->sw_ring[rxq->rx_tail];
/* ensure these 2 flags are in the lower 8 bits */
- RTE_BUILD_BUG_ON((PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED) > UINT8_MAX);
+ RTE_BUILD_BUG_ON((RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED) > UINT8_MAX);
vlan_flags = rxq->vlan_flags & UINT8_MAX;
/* A. load 4 packet in one loop
const __m128i ipsec_proc_msk =
_mm_set1_epi32(IXGBE_RXDADV_IPSEC_STATUS_SECP);
const __m128i ipsec_err_flag =
- _mm_set1_epi32(PKT_RX_SEC_OFFLOAD_FAILED |
- PKT_RX_SEC_OFFLOAD);
- const __m128i ipsec_proc_flag = _mm_set1_epi32(PKT_RX_SEC_OFFLOAD);
+ _mm_set1_epi32(RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED |
+ RTE_MBUF_F_RX_SEC_OFFLOAD);
+ const __m128i ipsec_proc_flag = _mm_set1_epi32(RTE_MBUF_F_RX_SEC_OFFLOAD);
rearm = _mm_set_epi32(*rearm3, *rearm2, *rearm1, *rearm0);
sterr = _mm_set_epi32(_mm_extract_epi32(descs[3], 2),
0x00FF, 0x00FF, 0x00FF, 0x00FF);
/* map rss type to rss hash flag */
- const __m128i rss_flags = _mm_set_epi8(PKT_RX_FDIR, 0, 0, 0,
- 0, 0, 0, PKT_RX_RSS_HASH,
- PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH, 0,
- PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, 0);
+ const __m128i rss_flags = _mm_set_epi8(RTE_MBUF_F_RX_FDIR, 0, 0, 0,
+ 0, 0, 0,
+ RTE_MBUF_F_RX_RSS_HASH,
+ RTE_MBUF_F_RX_RSS_HASH, 0,
+ RTE_MBUF_F_RX_RSS_HASH, 0,
+ RTE_MBUF_F_RX_RSS_HASH,
+ RTE_MBUF_F_RX_RSS_HASH,
+ RTE_MBUF_F_RX_RSS_HASH, 0);
/* mask everything except vlan present and l4/ip csum error */
const __m128i vlan_csum_msk = _mm_set_epi16(
/* map vlan present (0x8), IPE (0x2), L4E (0x1) to ol_flags */
const __m128i vlan_csum_map_lo = _mm_set_epi8(
0, 0, 0, 0,
- vlan_flags | PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD,
- vlan_flags | PKT_RX_IP_CKSUM_BAD,
- vlan_flags | PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
- vlan_flags | PKT_RX_IP_CKSUM_GOOD,
+ vlan_flags | RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD,
+ vlan_flags | RTE_MBUF_F_RX_IP_CKSUM_BAD,
+ vlan_flags | RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD,
+ vlan_flags | RTE_MBUF_F_RX_IP_CKSUM_GOOD,
0, 0, 0, 0,
- PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD,
- PKT_RX_IP_CKSUM_BAD,
- PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
- PKT_RX_IP_CKSUM_GOOD);
+ RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD,
+ RTE_MBUF_F_RX_IP_CKSUM_BAD,
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD,
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD);
const __m128i vlan_csum_map_hi = _mm_set_epi8(
0, 0, 0, 0,
- 0, PKT_RX_L4_CKSUM_GOOD >> sizeof(uint8_t), 0,
- PKT_RX_L4_CKSUM_GOOD >> sizeof(uint8_t),
+ 0, RTE_MBUF_F_RX_L4_CKSUM_GOOD >> sizeof(uint8_t), 0,
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD >> sizeof(uint8_t),
0, 0, 0, 0,
- 0, PKT_RX_L4_CKSUM_GOOD >> sizeof(uint8_t), 0,
- PKT_RX_L4_CKSUM_GOOD >> sizeof(uint8_t));
+ 0, RTE_MBUF_F_RX_L4_CKSUM_GOOD >> sizeof(uint8_t), 0,
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD >> sizeof(uint8_t));
/* mask everything except UDP header present if specified */
const __m128i udp_hdr_p_msk = _mm_set_epi16
const __m128i udp_csum_bad_shuf = _mm_set_epi8
(0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, ~(uint8_t)PKT_RX_L4_CKSUM_BAD, 0xFF);
+ 0, 0, 0, 0, 0, 0, ~(uint8_t)RTE_MBUF_F_RX_L4_CKSUM_BAD, 0xFF);
ptype0 = _mm_unpacklo_epi16(descs[0], descs[1]);
ptype1 = _mm_unpacklo_epi16(descs[2], descs[3]);
vtag1 = _mm_or_si128(ptype0, vtag1);
/* convert the UDP header present 0x200 to 0x1 for aligning with each
- * PKT_RX_L4_CKSUM_BAD value in low byte of 16 bits word ol_flag in
+ * RTE_MBUF_F_RX_L4_CKSUM_BAD value in low byte of 16 bits word ol_flag in
* vtag1 (4x16). Then mask out the bad checksum value by shuffle and
* bit-mask.
*/
sw_ring = &rxq->sw_ring[rxq->rx_tail];
/* ensure these 2 flags are in the lower 8 bits */
- RTE_BUILD_BUG_ON((PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED) > UINT8_MAX);
+ RTE_BUILD_BUG_ON((RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED) > UINT8_MAX);
vlan_flags = rxq->vlan_flags & UINT8_MAX;
/* A. load 4 packet in one loop
if (rh->r_dh.has_hash) {
uint64_t *hash_ptr;
- nicbuf->ol_flags |= PKT_RX_RSS_HASH;
+ nicbuf->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
hash_ptr = rte_pktmbuf_mtod(nicbuf,
uint64_t *);
lio_swap_8B_data(hash_ptr, 1);
uint64_t *hash_ptr;
nicbuf->ol_flags |=
- PKT_RX_RSS_HASH;
+ RTE_MBUF_F_RX_RSS_HASH;
hash_ptr = rte_pktmbuf_mtod(
nicbuf, uint64_t *);
lio_swap_8B_data(hash_ptr, 1);
struct rte_mbuf *m = rx_pkts[data_pkts - 1];
if (rh->r_dh.csum_verified & LIO_IP_CSUM_VERIFIED)
- m->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+ m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
if (rh->r_dh.csum_verified & LIO_L4_CSUM_VERIFIED)
- m->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ m->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
}
if (droq->refill_count >= droq->refill_threshold) {
cmdsetup.s.iq_no = iq_no;
/* check checksum offload flags to form cmd */
- if (m->ol_flags & PKT_TX_IP_CKSUM)
+ if (m->ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
cmdsetup.s.ip_csum = 1;
- if (m->ol_flags & PKT_TX_OUTER_IP_CKSUM)
+ if (m->ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM)
cmdsetup.s.tnl_csum = 1;
- else if ((m->ol_flags & PKT_TX_TCP_CKSUM) ||
- (m->ol_flags & PKT_TX_UDP_CKSUM))
+ else if ((m->ol_flags & RTE_MBUF_F_TX_TCP_CKSUM) ||
+ (m->ol_flags & RTE_MBUF_F_TX_UDP_CKSUM))
cmdsetup.s.transport_csum = 1;
if (m->nb_segs == 1) {
{
struct mlx4_sq *sq = &txq->msq;
const uint8_t tunneled = txq->priv->hw_csum_l2tun &&
- (buf->ol_flags & PKT_TX_TUNNEL_MASK);
+ (buf->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK);
tinfo->tso_header_size = buf->l2_len + buf->l3_len + buf->l4_len;
if (tunneled)
uint16_t flags16[2];
} srcrb;
uint32_t lkey;
- bool tso = txq->priv->tso && (buf->ol_flags & PKT_TX_TCP_SEG);
+ bool tso = txq->priv->tso && (buf->ol_flags & RTE_MBUF_F_TX_TCP_SEG);
/* Clean up old buffer. */
if (likely(elt->buf != NULL)) {
/* Enable HW checksum offload if requested */
if (txq->csum &&
(buf->ol_flags &
- (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM))) {
+ (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_TCP_CKSUM | RTE_MBUF_F_TX_UDP_CKSUM))) {
const uint64_t is_tunneled = (buf->ol_flags &
- (PKT_TX_TUNNEL_GRE |
- PKT_TX_TUNNEL_VXLAN));
+ (RTE_MBUF_F_TX_TUNNEL_GRE |
+ RTE_MBUF_F_TX_TUNNEL_VXLAN));
if (is_tunneled && txq->csum_l2tun) {
owner_opcode |= MLX4_WQE_CTRL_IIP_HDR_CSUM |
MLX4_WQE_CTRL_IL4_HDR_CSUM;
- if (buf->ol_flags & PKT_TX_OUTER_IP_CKSUM)
+ if (buf->ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM)
srcrb.flags |=
RTE_BE32(MLX4_WQE_CTRL_IP_HDR_CSUM);
} else {
ol_flags |=
mlx4_transpose(flags,
MLX4_CQE_STATUS_IP_HDR_CSUM_OK,
- PKT_RX_IP_CKSUM_GOOD) |
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD) |
mlx4_transpose(flags,
MLX4_CQE_STATUS_TCP_UDP_CSUM_OK,
- PKT_RX_L4_CKSUM_GOOD);
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD);
if ((flags & MLX4_CQE_L2_TUNNEL) && csum_l2tun)
ol_flags |=
mlx4_transpose(flags,
MLX4_CQE_L2_TUNNEL_IPOK,
- PKT_RX_IP_CKSUM_GOOD) |
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD) |
mlx4_transpose(flags,
MLX4_CQE_L2_TUNNEL_L4_CSUM,
- PKT_RX_L4_CKSUM_GOOD);
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD);
return ol_flags;
}
/* Update packet information. */
pkt->packet_type =
rxq_cq_to_pkt_type(cqe, rxq->l2tun_offload);
- pkt->ol_flags = PKT_RX_RSS_HASH;
+ pkt->ol_flags = RTE_MBUF_F_RX_RSS_HASH;
pkt->hash.rss = cqe->immed_rss_invalid;
if (rxq->crc_present)
len -= RTE_ETHER_CRC_LEN;
{
uint64_t ol_flags = m->ol_flags;
const struct mlx5_flow_tbl_data_entry *tble;
- const uint64_t mask = PKT_RX_FDIR | PKT_RX_FDIR_ID;
+ const uint64_t mask = RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
if (!is_tunnel_offload_active(dev)) {
info->flags = 0;
ol_flags =
TRANSPOSE(flags,
MLX5_CQE_RX_L3_HDR_VALID,
- PKT_RX_IP_CKSUM_GOOD) |
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD) |
TRANSPOSE(flags,
MLX5_CQE_RX_L4_HDR_VALID,
- PKT_RX_L4_CKSUM_GOOD);
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD);
return ol_flags;
}
rss_hash_res = rte_be_to_cpu_32(mcqe->rx_hash_result);
if (rss_hash_res) {
pkt->hash.rss = rss_hash_res;
- pkt->ol_flags |= PKT_RX_RSS_HASH;
+ pkt->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
}
}
if (rxq->mark) {
mark = ((mcqe->byte_cnt_flow & 0xff) << 8) |
(mcqe->flow_tag_high << 16);
if (MLX5_FLOW_MARK_IS_VALID(mark)) {
- pkt->ol_flags |= PKT_RX_FDIR;
+ pkt->ol_flags |= RTE_MBUF_F_RX_FDIR;
if (mark != RTE_BE32(MLX5_FLOW_MARK_DEFAULT)) {
- pkt->ol_flags |= PKT_RX_FDIR_ID;
+ pkt->ol_flags |= RTE_MBUF_F_RX_FDIR_ID;
pkt->hash.fdir.hi = mlx5_flow_mark_get(mark);
}
}
vlan_strip = mcqe->hdr_type &
RTE_BE16(MLX5_CQE_VLAN_STRIPPED);
if (vlan_strip) {
- pkt->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+ pkt->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
pkt->vlan_tci = rte_be_to_cpu_16(cqe->vlan_info);
}
}
}
pkt = seg;
MLX5_ASSERT(len >= (rxq->crc_present << 2));
- pkt->ol_flags &= EXT_ATTACHED_MBUF;
+ pkt->ol_flags &= RTE_MBUF_F_EXTERNAL;
rxq_cq_to_mbuf(rxq, pkt, cqe, mcqe);
if (rxq->crc_present)
len -= RTE_ETHER_CRC_LEN;
mlx5_lro_update_hdr
(rte_pktmbuf_mtod(pkt, uint8_t *), cqe,
mcqe, rxq, len);
- pkt->ol_flags |= PKT_RX_LRO;
+ pkt->ol_flags |= RTE_MBUF_F_RX_LRO;
pkt->tso_segsz = len / cqe->lro_num_seg;
}
}
if (cqe->lro_num_seg > 1) {
mlx5_lro_update_hdr(rte_pktmbuf_mtod(pkt, uint8_t *),
cqe, mcqe, rxq, len);
- pkt->ol_flags |= PKT_RX_LRO;
+ pkt->ol_flags |= RTE_MBUF_F_RX_LRO;
pkt->tso_segsz = len / cqe->lro_num_seg;
}
PKT_LEN(pkt) = len;
shinfo = &buf->shinfos[strd_idx];
rte_mbuf_ext_refcnt_set(shinfo, 1);
/*
- * EXT_ATTACHED_MBUF will be set to pkt->ol_flags when
+ * RTE_MBUF_F_EXTERNAL will be set to pkt->ol_flags when
* attaching the stride to mbuf and more offload flags
* will be added below by calling rxq_cq_to_mbuf().
* Other fields will be overwritten.
buf_len, shinfo);
/* Set mbuf head-room. */
SET_DATA_OFF(pkt, RTE_PKTMBUF_HEADROOM);
- MLX5_ASSERT(pkt->ol_flags == EXT_ATTACHED_MBUF);
+ MLX5_ASSERT(pkt->ol_flags == RTE_MBUF_F_EXTERNAL);
MLX5_ASSERT(rte_pktmbuf_tailroom(pkt) >=
len - (hdrm_overlap > 0 ? hdrm_overlap : 0));
DATA_LEN(pkt) = len;
mbuf_init->nb_segs = 1;
mbuf_init->port = rxq->port_id;
if (priv->flags & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF)
- mbuf_init->ol_flags = EXT_ATTACHED_MBUF;
+ mbuf_init->ol_flags = RTE_MBUF_F_EXTERNAL;
/*
* prevent compiler reordering:
* rearm_data covers previous fields.
/*
* The index should have:
- * bit[0] = PKT_TX_TCP_SEG
- * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM
- * bit[4] = PKT_TX_IP_CKSUM
- * bit[8] = PKT_TX_OUTER_IP_CKSUM
+ * bit[0] = RTE_MBUF_F_TX_TCP_SEG
+ * bit[2:3] = RTE_MBUF_F_TX_UDP_CKSUM, RTE_MBUF_F_TX_TCP_CKSUM
+ * bit[4] = RTE_MBUF_F_TX_IP_CKSUM
+ * bit[8] = RTE_MBUF_F_TX_OUTER_IP_CKSUM
* bit[9] = tunnel
*/
for (i = 0; i < RTE_DIM(mlx5_cksum_table); ++i) {
/*
* The index should have:
- * bit[0:1] = PKT_TX_L4_MASK
- * bit[4] = PKT_TX_IPV6
- * bit[8] = PKT_TX_OUTER_IPV6
- * bit[9] = PKT_TX_OUTER_UDP
+ * bit[0:1] = RTE_MBUF_F_TX_L4_MASK
+ * bit[4] = RTE_MBUF_F_TX_IPV6
+ * bit[8] = RTE_MBUF_F_TX_OUTER_IPV6
+ * bit[9] = RTE_MBUF_F_TX_OUTER_UDP
*/
for (i = 0; i < RTE_DIM(mlx5_swp_types_table); ++i) {
v = 0;
v |= MLX5_ETH_WQE_L4_OUTER_UDP;
if (i & (1 << 4))
v |= MLX5_ETH_WQE_L3_INNER_IPV6;
- if ((i & 3) == (PKT_TX_UDP_CKSUM >> 52))
+ if ((i & 3) == (RTE_MBUF_F_TX_UDP_CKSUM >> 52))
v |= MLX5_ETH_WQE_L4_INNER_UDP;
mlx5_swp_types_table[i] = v;
}
const vector unsigned char fdir_flags =
(vector unsigned char)
(vector unsigned int){
- PKT_RX_FDIR, PKT_RX_FDIR,
- PKT_RX_FDIR, PKT_RX_FDIR};
+ RTE_MBUF_F_RX_FDIR, RTE_MBUF_F_RX_FDIR,
+ RTE_MBUF_F_RX_FDIR, RTE_MBUF_F_RX_FDIR};
const vector unsigned char fdir_all_flags =
(vector unsigned char)
(vector unsigned int){
- PKT_RX_FDIR | PKT_RX_FDIR_ID,
- PKT_RX_FDIR | PKT_RX_FDIR_ID,
- PKT_RX_FDIR | PKT_RX_FDIR_ID,
- PKT_RX_FDIR | PKT_RX_FDIR_ID};
+ RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID,
+ RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID,
+ RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID,
+ RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID};
vector unsigned char fdir_id_flags =
(vector unsigned char)
(vector unsigned int){
- PKT_RX_FDIR_ID, PKT_RX_FDIR_ID,
- PKT_RX_FDIR_ID, PKT_RX_FDIR_ID};
+ RTE_MBUF_F_RX_FDIR_ID, RTE_MBUF_F_RX_FDIR_ID,
+ RTE_MBUF_F_RX_FDIR_ID, RTE_MBUF_F_RX_FDIR_ID};
/* Extract flow_tag field. */
vector unsigned char ftag0 = vec_perm(mcqe1,
zero, flow_mark_shuf);
ol_flags_mask,
(vector unsigned long)fdir_all_flags);
- /* Set PKT_RX_FDIR if flow tag is non-zero. */
+ /* Set RTE_MBUF_F_RX_FDIR if flow tag is non-zero. */
invalid_mask = (vector unsigned char)
vec_cmpeq((vector unsigned int)ftag,
(vector unsigned int)zero);
const vector unsigned char vlan_mask =
(vector unsigned char)
(vector unsigned int) {
- (PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED),
- (PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED),
- (PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED),
- (PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED)};
+ (RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED),
+ (RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED),
+ (RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED),
+ (RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED)};
const vector unsigned char cv_mask =
(vector unsigned char)
(vector unsigned int) {
}
const vector unsigned char hash_mask =
(vector unsigned char)(vector unsigned int) {
- PKT_RX_RSS_HASH,
- PKT_RX_RSS_HASH,
- PKT_RX_RSS_HASH,
- PKT_RX_RSS_HASH};
+ RTE_MBUF_F_RX_RSS_HASH,
+ RTE_MBUF_F_RX_RSS_HASH,
+ RTE_MBUF_F_RX_RSS_HASH,
+ RTE_MBUF_F_RX_RSS_HASH};
const vector unsigned char rearm_flags =
(vector unsigned char)(vector unsigned int) {
(uint32_t)t_pkt->ol_flags,
vector unsigned char pinfo, ptype;
vector unsigned char ol_flags = (vector unsigned char)
(vector unsigned int){
- rxq->rss_hash * PKT_RX_RSS_HASH |
+ rxq->rss_hash * RTE_MBUF_F_RX_RSS_HASH |
rxq->hw_timestamp * rxq->timestamp_rx_flag,
- rxq->rss_hash * PKT_RX_RSS_HASH |
+ rxq->rss_hash * RTE_MBUF_F_RX_RSS_HASH |
rxq->hw_timestamp * rxq->timestamp_rx_flag,
- rxq->rss_hash * PKT_RX_RSS_HASH |
+ rxq->rss_hash * RTE_MBUF_F_RX_RSS_HASH |
rxq->hw_timestamp * rxq->timestamp_rx_flag,
- rxq->rss_hash * PKT_RX_RSS_HASH |
+ rxq->rss_hash * RTE_MBUF_F_RX_RSS_HASH |
rxq->hw_timestamp * rxq->timestamp_rx_flag};
vector unsigned char cv_flags;
const vector unsigned char zero = (vector unsigned char){0};
(vector unsigned char)(vector unsigned int){
0x00000003, 0x00000003, 0x00000003, 0x00000003};
const vector unsigned char cv_flag_sel = (vector unsigned char){
- 0, (uint8_t)(PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED),
- (uint8_t)(PKT_RX_IP_CKSUM_GOOD >> 1), 0,
- (uint8_t)(PKT_RX_L4_CKSUM_GOOD >> 1), 0,
- (uint8_t)((PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1),
+ 0, (uint8_t)(RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED),
+ (uint8_t)(RTE_MBUF_F_RX_IP_CKSUM_GOOD >> 1), 0,
+ (uint8_t)(RTE_MBUF_F_RX_L4_CKSUM_GOOD >> 1), 0,
+ (uint8_t)((RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD) >> 1),
0, 0, 0, 0, 0, 0, 0, 0, 0};
const vector unsigned char cv_mask =
(vector unsigned char)(vector unsigned int){
- PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
- PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
- PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
- PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
- PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
- PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
- PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
- PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED};
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
+ RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
+ RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
+ RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
+ RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED};
const vector unsigned char mbuf_init =
(vector unsigned char)vec_vsx_ld
(0, (vector unsigned char *)&rxq->mbuf_initializer);
0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00};
const vector unsigned char fdir_flags =
(vector unsigned char)(vector unsigned int){
- PKT_RX_FDIR, PKT_RX_FDIR,
- PKT_RX_FDIR, PKT_RX_FDIR};
+ RTE_MBUF_F_RX_FDIR, RTE_MBUF_F_RX_FDIR,
+ RTE_MBUF_F_RX_FDIR, RTE_MBUF_F_RX_FDIR};
vector unsigned char fdir_id_flags =
(vector unsigned char)(vector unsigned int){
- PKT_RX_FDIR_ID, PKT_RX_FDIR_ID,
- PKT_RX_FDIR_ID, PKT_RX_FDIR_ID};
+ RTE_MBUF_F_RX_FDIR_ID, RTE_MBUF_F_RX_FDIR_ID,
+ RTE_MBUF_F_RX_FDIR_ID, RTE_MBUF_F_RX_FDIR_ID};
vector unsigned char flow_tag, invalid_mask;
flow_tag = (vector unsigned char)
vec_and((vector unsigned long)pinfo,
(vector unsigned long)pinfo_ft_mask);
- /* Check if flow tag is non-zero then set PKT_RX_FDIR. */
+ /* Check if flow tag is non-zero then set RTE_MBUF_F_RX_FDIR. */
invalid_mask = (vector unsigned char)
vec_cmpeq((vector unsigned int)flow_tag,
(vector unsigned int)zero);
const uint32x4_t ft_mask =
vdupq_n_u32(MLX5_FLOW_MARK_DEFAULT);
const uint32x4_t fdir_flags =
- vdupq_n_u32(PKT_RX_FDIR);
+ vdupq_n_u32(RTE_MBUF_F_RX_FDIR);
const uint32x4_t fdir_all_flags =
- vdupq_n_u32(PKT_RX_FDIR |
- PKT_RX_FDIR_ID);
+ vdupq_n_u32(RTE_MBUF_F_RX_FDIR |
+ RTE_MBUF_F_RX_FDIR_ID);
uint32x4_t fdir_id_flags =
- vdupq_n_u32(PKT_RX_FDIR_ID);
+ vdupq_n_u32(RTE_MBUF_F_RX_FDIR_ID);
uint32x4_t invalid_mask, ftag;
__asm__ volatile
invalid_mask = vceqzq_u32(ftag);
ol_flags_mask = vorrq_u32(ol_flags_mask,
fdir_all_flags);
- /* Set PKT_RX_FDIR if flow tag is non-zero. */
+ /* Set RTE_MBUF_F_RX_FDIR if flow tag is non-zero. */
ol_flags = vorrq_u32(ol_flags,
vbicq_u32(fdir_flags, invalid_mask));
/* Mask out invalid entries. */
const uint8_t pkt_hdr3 =
mcq[pos % 8 + 3].hdr_type;
const uint32x4_t vlan_mask =
- vdupq_n_u32(PKT_RX_VLAN |
- PKT_RX_VLAN_STRIPPED);
+ vdupq_n_u32(RTE_MBUF_F_RX_VLAN |
+ RTE_MBUF_F_RX_VLAN_STRIPPED);
const uint32x4_t cv_mask =
vdupq_n_u32(MLX5_CQE_VLAN_STRIPPED);
const uint32x4_t pkt_cv = {
}
}
const uint32x4_t hash_flags =
- vdupq_n_u32(PKT_RX_RSS_HASH);
+ vdupq_n_u32(RTE_MBUF_F_RX_RSS_HASH);
const uint32x4_t rearm_flags =
vdupq_n_u32((uint32_t)t_pkt->ol_flags);
uint16x4_t ptype;
uint32x4_t pinfo, cv_flags;
uint32x4_t ol_flags =
- vdupq_n_u32(rxq->rss_hash * PKT_RX_RSS_HASH |
+ vdupq_n_u32(rxq->rss_hash * RTE_MBUF_F_RX_RSS_HASH |
rxq->hw_timestamp * rxq->timestamp_rx_flag);
const uint32x4_t ptype_ol_mask = { 0x106, 0x106, 0x106, 0x106 };
const uint8x16_t cv_flag_sel = {
0,
- (uint8_t)(PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED),
- (uint8_t)(PKT_RX_IP_CKSUM_GOOD >> 1),
+ (uint8_t)(RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED),
+ (uint8_t)(RTE_MBUF_F_RX_IP_CKSUM_GOOD >> 1),
0,
- (uint8_t)(PKT_RX_L4_CKSUM_GOOD >> 1),
+ (uint8_t)(RTE_MBUF_F_RX_L4_CKSUM_GOOD >> 1),
0,
- (uint8_t)((PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1),
+ (uint8_t)((RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD) >> 1),
0, 0, 0, 0, 0, 0, 0, 0, 0
};
const uint32x4_t cv_mask =
- vdupq_n_u32(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
- PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED);
+ vdupq_n_u32(RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
+ RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED);
const uint64x2_t mbuf_init = vld1q_u64
((const uint64_t *)&rxq->mbuf_initializer);
uint64x2_t rearm0, rearm1, rearm2, rearm3;
if (rxq->mark) {
const uint32x4_t ft_def = vdupq_n_u32(MLX5_FLOW_MARK_DEFAULT);
- const uint32x4_t fdir_flags = vdupq_n_u32(PKT_RX_FDIR);
- uint32x4_t fdir_id_flags = vdupq_n_u32(PKT_RX_FDIR_ID);
+ const uint32x4_t fdir_flags = vdupq_n_u32(RTE_MBUF_F_RX_FDIR);
+ uint32x4_t fdir_id_flags = vdupq_n_u32(RTE_MBUF_F_RX_FDIR_ID);
uint32x4_t invalid_mask;
- /* Check if flow tag is non-zero then set PKT_RX_FDIR. */
+ /* Check if flow tag is non-zero then set RTE_MBUF_F_RX_FDIR. */
invalid_mask = vceqzq_u32(flow_tag);
ol_flags = vorrq_u32(ol_flags,
vbicq_u32(fdir_flags, invalid_mask));
const __m128i ft_mask =
_mm_set1_epi32(0xffffff00);
const __m128i fdir_flags =
- _mm_set1_epi32(PKT_RX_FDIR);
+ _mm_set1_epi32(RTE_MBUF_F_RX_FDIR);
const __m128i fdir_all_flags =
- _mm_set1_epi32(PKT_RX_FDIR |
- PKT_RX_FDIR_ID);
+ _mm_set1_epi32(RTE_MBUF_F_RX_FDIR |
+ RTE_MBUF_F_RX_FDIR_ID);
__m128i fdir_id_flags =
- _mm_set1_epi32(PKT_RX_FDIR_ID);
+ _mm_set1_epi32(RTE_MBUF_F_RX_FDIR_ID);
/* Extract flow_tag field. */
__m128i ftag0 =
ol_flags_mask = _mm_or_si128(ol_flags_mask,
fdir_all_flags);
- /* Set PKT_RX_FDIR if flow tag is non-zero. */
+ /* Set RTE_MBUF_F_RX_FDIR if flow tag is non-zero. */
ol_flags = _mm_or_si128(ol_flags,
_mm_andnot_si128(invalid_mask,
fdir_flags));
const uint8_t pkt_hdr3 =
_mm_extract_epi8(mcqe2, 8);
const __m128i vlan_mask =
- _mm_set1_epi32(PKT_RX_VLAN |
- PKT_RX_VLAN_STRIPPED);
+ _mm_set1_epi32(RTE_MBUF_F_RX_VLAN |
+ RTE_MBUF_F_RX_VLAN_STRIPPED);
const __m128i cv_mask =
_mm_set1_epi32(MLX5_CQE_VLAN_STRIPPED);
const __m128i pkt_cv =
}
}
const __m128i hash_flags =
- _mm_set1_epi32(PKT_RX_RSS_HASH);
+ _mm_set1_epi32(RTE_MBUF_F_RX_RSS_HASH);
const __m128i rearm_flags =
_mm_set1_epi32((uint32_t)t_pkt->ol_flags);
{
__m128i pinfo0, pinfo1;
__m128i pinfo, ptype;
- __m128i ol_flags = _mm_set1_epi32(rxq->rss_hash * PKT_RX_RSS_HASH |
+ __m128i ol_flags = _mm_set1_epi32(rxq->rss_hash * RTE_MBUF_F_RX_RSS_HASH |
rxq->hw_timestamp * rxq->timestamp_rx_flag);
__m128i cv_flags;
const __m128i zero = _mm_setzero_si128();
const __m128i pinfo_mask = _mm_set1_epi32(0x3);
const __m128i cv_flag_sel =
_mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, 0,
- (uint8_t)((PKT_RX_IP_CKSUM_GOOD |
- PKT_RX_L4_CKSUM_GOOD) >> 1),
+ (uint8_t)((RTE_MBUF_F_RX_IP_CKSUM_GOOD |
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD) >> 1),
0,
- (uint8_t)(PKT_RX_L4_CKSUM_GOOD >> 1),
+ (uint8_t)(RTE_MBUF_F_RX_L4_CKSUM_GOOD >> 1),
0,
- (uint8_t)(PKT_RX_IP_CKSUM_GOOD >> 1),
- (uint8_t)(PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED),
+ (uint8_t)(RTE_MBUF_F_RX_IP_CKSUM_GOOD >> 1),
+ (uint8_t)(RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED),
0);
const __m128i cv_mask =
- _mm_set1_epi32(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
- PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED);
+ _mm_set1_epi32(RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
+ RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED);
const __m128i mbuf_init =
_mm_load_si128((__m128i *)&rxq->mbuf_initializer);
__m128i rearm0, rearm1, rearm2, rearm3;
ptype = _mm_unpacklo_epi64(pinfo0, pinfo1);
if (rxq->mark) {
const __m128i pinfo_ft_mask = _mm_set1_epi32(0xffffff00);
- const __m128i fdir_flags = _mm_set1_epi32(PKT_RX_FDIR);
- __m128i fdir_id_flags = _mm_set1_epi32(PKT_RX_FDIR_ID);
+ const __m128i fdir_flags = _mm_set1_epi32(RTE_MBUF_F_RX_FDIR);
+ __m128i fdir_id_flags = _mm_set1_epi32(RTE_MBUF_F_RX_FDIR_ID);
__m128i flow_tag, invalid_mask;
flow_tag = _mm_and_si128(pinfo, pinfo_ft_mask);
- /* Check if flow tag is non-zero then set PKT_RX_FDIR. */
+ /* Check if flow tag is non-zero then set RTE_MBUF_F_RX_FDIR. */
invalid_mask = _mm_cmpeq_epi32(flow_tag, zero);
ol_flags = _mm_or_si128(ol_flags,
_mm_andnot_si128(invalid_mask,
/* Mbuf dynamic flag offset for inline. */
extern uint64_t rte_net_mlx5_dynf_inline_mask;
-#define PKT_TX_DYNF_NOINLINE rte_net_mlx5_dynf_inline_mask
+#define RTE_MBUF_F_TX_DYNF_NOINLINE rte_net_mlx5_dynf_inline_mask
extern uint32_t mlx5_ptype_table[] __rte_cache_aligned;
extern uint8_t mlx5_cksum_table[1 << 10] __rte_cache_aligned;
if (!MLX5_TXOFF_CONFIG(SWP))
return 0;
ol = loc->mbuf->ol_flags;
- tunnel = ol & PKT_TX_TUNNEL_MASK;
+ tunnel = ol & RTE_MBUF_F_TX_TUNNEL_MASK;
/*
* Check whether Software Parser is required.
* Only customized tunnels may ask for.
*/
- if (likely(tunnel != PKT_TX_TUNNEL_UDP && tunnel != PKT_TX_TUNNEL_IP))
+ if (likely(tunnel != RTE_MBUF_F_TX_TUNNEL_UDP && tunnel != RTE_MBUF_F_TX_TUNNEL_IP))
return 0;
/*
* The index should have:
- * bit[0:1] = PKT_TX_L4_MASK
- * bit[4] = PKT_TX_IPV6
- * bit[8] = PKT_TX_OUTER_IPV6
- * bit[9] = PKT_TX_OUTER_UDP
+ * bit[0:1] = RTE_MBUF_F_TX_L4_MASK
+ * bit[4] = RTE_MBUF_F_TX_IPV6
+ * bit[8] = RTE_MBUF_F_TX_OUTER_IPV6
+ * bit[9] = RTE_MBUF_F_TX_OUTER_UDP
*/
- idx = (ol & (PKT_TX_L4_MASK | PKT_TX_IPV6 | PKT_TX_OUTER_IPV6)) >> 52;
- idx |= (tunnel == PKT_TX_TUNNEL_UDP) ? (1 << 9) : 0;
+ idx = (ol & (RTE_MBUF_F_TX_L4_MASK | RTE_MBUF_F_TX_IPV6 | RTE_MBUF_F_TX_OUTER_IPV6)) >> 52;
+ idx |= (tunnel == RTE_MBUF_F_TX_TUNNEL_UDP) ? (1 << 9) : 0;
*swp_flags = mlx5_swp_types_table[idx];
/*
* Set offsets for SW parser. Since ConnectX-5, SW parser just
* should be set regardless of HW offload.
*/
off = loc->mbuf->outer_l2_len;
- if (MLX5_TXOFF_CONFIG(VLAN) && ol & PKT_TX_VLAN_PKT)
+ if (MLX5_TXOFF_CONFIG(VLAN) && ol & RTE_MBUF_F_TX_VLAN_PKT)
off += sizeof(struct rte_vlan_hdr);
set = (off >> 1) << 8; /* Outer L3 offset. */
off += loc->mbuf->outer_l3_len;
- if (tunnel == PKT_TX_TUNNEL_UDP)
+ if (tunnel == RTE_MBUF_F_TX_TUNNEL_UDP)
set |= off >> 1; /* Outer L4 offset. */
- if (ol & (PKT_TX_IPV4 | PKT_TX_IPV6)) { /* Inner IP. */
- const uint64_t csum = ol & PKT_TX_L4_MASK;
+ if (ol & (RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IPV6)) { /* Inner IP. */
+ const uint64_t csum = ol & RTE_MBUF_F_TX_L4_MASK;
off += loc->mbuf->l2_len;
set |= (off >> 1) << 24; /* Inner L3 offset. */
- if (csum == PKT_TX_TCP_CKSUM ||
- csum == PKT_TX_UDP_CKSUM ||
- (MLX5_TXOFF_CONFIG(TSO) && ol & PKT_TX_TCP_SEG)) {
+ if (csum == RTE_MBUF_F_TX_TCP_CKSUM ||
+ csum == RTE_MBUF_F_TX_UDP_CKSUM ||
+ (MLX5_TXOFF_CONFIG(TSO) && ol & RTE_MBUF_F_TX_TCP_SEG)) {
off += loc->mbuf->l3_len;
set |= (off >> 1) << 16; /* Inner L4 offset. */
}
txq_ol_cksum_to_cs(struct rte_mbuf *buf)
{
uint32_t idx;
- uint8_t is_tunnel = !!(buf->ol_flags & PKT_TX_TUNNEL_MASK);
- const uint64_t ol_flags_mask = PKT_TX_TCP_SEG | PKT_TX_L4_MASK |
- PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM;
+ uint8_t is_tunnel = !!(buf->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK);
+ const uint64_t ol_flags_mask = RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_L4_MASK |
+ RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_OUTER_IP_CKSUM;
/*
* The index should have:
- * bit[0] = PKT_TX_TCP_SEG
- * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM
- * bit[4] = PKT_TX_IP_CKSUM
- * bit[8] = PKT_TX_OUTER_IP_CKSUM
+ * bit[0] = RTE_MBUF_F_TX_TCP_SEG
+ * bit[2:3] = RTE_MBUF_F_TX_UDP_CKSUM, RTE_MBUF_F_TX_TCP_CKSUM
+ * bit[4] = RTE_MBUF_F_TX_IP_CKSUM
+ * bit[8] = RTE_MBUF_F_TX_OUTER_IP_CKSUM
* bit[9] = tunnel
*/
idx = ((buf->ol_flags & ol_flags_mask) >> 50) | (!!is_tunnel << 9);
es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
/* Fill metadata field if needed. */
es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
- loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
+ loc->mbuf->ol_flags & RTE_MBUF_F_TX_DYNF_METADATA ?
*RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
/* Engage VLAN tag insertion feature if requested. */
if (MLX5_TXOFF_CONFIG(VLAN) &&
- loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
+ loc->mbuf->ol_flags & RTE_MBUF_F_TX_VLAN_PKT) {
/*
* We should get here only if device support
* this feature correctly.
es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
/* Fill metadata field if needed. */
es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
- loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
+ loc->mbuf->ol_flags & RTE_MBUF_F_TX_DYNF_METADATA ?
*RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
es->inline_hdr_sz = RTE_BE16(MLX5_ESEG_MIN_INLINE_SIZE);
es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
/* Fill metadata field if needed. */
es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
- loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
+ loc->mbuf->ol_flags & RTE_MBUF_F_TX_DYNF_METADATA ?
*RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
es->inline_hdr_sz = rte_cpu_to_be_16(inlen);
MLX5_ASSERT(loc->mbuf_nseg > 1);
MLX5_ASSERT(loc->mbuf);
--loc->mbuf_nseg;
- if (loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE) {
+ if (loc->mbuf->ol_flags & RTE_MBUF_F_TX_DYNF_NOINLINE) {
unsigned int diff;
if (copy >= must) {
es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
/* Fill metadata field if needed. */
es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
- loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
+ loc->mbuf->ol_flags & RTE_MBUF_F_TX_DYNF_METADATA ?
*RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
MLX5_ASSERT(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
pdst = (uint8_t *)&es->inline_data;
* the required space in WQE ring buffer.
*/
dlen = rte_pktmbuf_pkt_len(loc->mbuf);
- if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
+ if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & RTE_MBUF_F_TX_VLAN_PKT)
vlan = sizeof(struct rte_vlan_hdr);
inlen = loc->mbuf->l2_len + vlan +
loc->mbuf->l3_len + loc->mbuf->l4_len;
if (unlikely((!inlen || !loc->mbuf->tso_segsz)))
return MLX5_TXCMP_CODE_ERROR;
- if (loc->mbuf->ol_flags & PKT_TX_TUNNEL_MASK)
+ if (loc->mbuf->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
inlen += loc->mbuf->outer_l2_len + loc->mbuf->outer_l3_len;
/* Packet must contain all TSO headers. */
if (unlikely(inlen > MLX5_MAX_TSO_HEADER ||
/* Update sent data bytes counter. */
txq->stats.obytes += rte_pktmbuf_pkt_len(loc->mbuf);
if (MLX5_TXOFF_CONFIG(VLAN) &&
- loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
+ loc->mbuf->ol_flags & RTE_MBUF_F_TX_VLAN_PKT)
txq->stats.obytes += sizeof(struct rte_vlan_hdr);
#endif
/*
* to estimate the required space for WQE.
*/
dlen = rte_pktmbuf_pkt_len(loc->mbuf);
- if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
+ if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & RTE_MBUF_F_TX_VLAN_PKT)
vlan = sizeof(struct rte_vlan_hdr);
inlen = dlen + vlan;
/* Check against minimal length. */
return MLX5_TXCMP_CODE_ERROR;
MLX5_ASSERT(txq->inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
if (inlen > txq->inlen_send ||
- loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE) {
+ loc->mbuf->ol_flags & RTE_MBUF_F_TX_DYNF_NOINLINE) {
struct rte_mbuf *mbuf;
unsigned int nxlen;
uintptr_t start;
* support the offload, will do with software inline.
*/
inlen = MLX5_ESEG_MIN_INLINE_SIZE;
- } else if (mbuf->ol_flags & PKT_TX_DYNF_NOINLINE ||
+ } else if (mbuf->ol_flags & RTE_MBUF_F_TX_DYNF_NOINLINE ||
nxlen > txq->inlen_send) {
return mlx5_tx_packet_multi_send(txq, loc, olx);
} else {
if (loc->elts_free < NB_SEGS(loc->mbuf))
return MLX5_TXCMP_CODE_EXIT;
if (MLX5_TXOFF_CONFIG(TSO) &&
- unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)) {
+ unlikely(loc->mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
/* Proceed with multi-segment TSO. */
ret = mlx5_tx_packet_multi_tso(txq, loc, olx);
} else if (MLX5_TXOFF_CONFIG(INLINE)) {
continue;
/* Here ends the series of multi-segment packets. */
if (MLX5_TXOFF_CONFIG(TSO) &&
- unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG))
+ unlikely(loc->mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG))
return MLX5_TXCMP_CODE_TSO;
return MLX5_TXCMP_CODE_SINGLE;
}
}
dlen = rte_pktmbuf_data_len(loc->mbuf);
if (MLX5_TXOFF_CONFIG(VLAN) &&
- loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
+ loc->mbuf->ol_flags & RTE_MBUF_F_TX_VLAN_PKT) {
vlan = sizeof(struct rte_vlan_hdr);
}
/*
loc->mbuf->l3_len + loc->mbuf->l4_len;
if (unlikely((!hlen || !loc->mbuf->tso_segsz)))
return MLX5_TXCMP_CODE_ERROR;
- if (loc->mbuf->ol_flags & PKT_TX_TUNNEL_MASK)
+ if (loc->mbuf->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
hlen += loc->mbuf->outer_l2_len +
loc->mbuf->outer_l3_len;
/* Segment must contain all TSO headers. */
if (MLX5_TXOFF_CONFIG(MULTI) &&
unlikely(NB_SEGS(loc->mbuf) > 1))
return MLX5_TXCMP_CODE_MULTI;
- if (likely(!(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)))
+ if (likely(!(loc->mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG)))
return MLX5_TXCMP_CODE_SINGLE;
/* Continue with the next TSO packet. */
}
/* Check for TSO packet. */
if (newp &&
MLX5_TXOFF_CONFIG(TSO) &&
- unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG))
+ unlikely(loc->mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG))
return MLX5_TXCMP_CODE_TSO;
/* Check if eMPW is enabled at all. */
if (!MLX5_TXOFF_CONFIG(EMPW))
return MLX5_TXCMP_CODE_SINGLE;
/* Check if eMPW can be engaged. */
if (MLX5_TXOFF_CONFIG(VLAN) &&
- unlikely(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) &&
+ unlikely(loc->mbuf->ol_flags & RTE_MBUF_F_TX_VLAN_PKT) &&
(!MLX5_TXOFF_CONFIG(INLINE) ||
unlikely((rte_pktmbuf_data_len(loc->mbuf) +
sizeof(struct rte_vlan_hdr)) > txq->inlen_empw))) {
return false;
/* Fill metadata field if needed. */
if (MLX5_TXOFF_CONFIG(METADATA) &&
- es->metadata != (loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
+ es->metadata != (loc->mbuf->ol_flags & RTE_MBUF_F_TX_DYNF_METADATA ?
*RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0))
return false;
/* Legacy MPW can send packets with the same length only. */
return false;
/* There must be no VLAN packets in eMPW loop. */
if (MLX5_TXOFF_CONFIG(VLAN))
- MLX5_ASSERT(!(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT));
+ MLX5_ASSERT(!(loc->mbuf->ol_flags & RTE_MBUF_F_TX_VLAN_PKT));
/* Check if the scheduling is requested. */
if (MLX5_TXOFF_CONFIG(TXPP) &&
loc->mbuf->ol_flags & txq->ts_mask)
}
/* Inline or not inline - that's the Question. */
if (dlen > txq->inlen_empw ||
- loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE)
+ loc->mbuf->ol_flags & RTE_MBUF_F_TX_DYNF_NOINLINE)
goto pointer_empw;
if (MLX5_TXOFF_CONFIG(MPW)) {
if (dlen > txq->inlen_send)
}
/* Inline entire packet, optional VLAN insertion. */
if (MLX5_TXOFF_CONFIG(VLAN) &&
- loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
+ loc->mbuf->ol_flags & RTE_MBUF_F_TX_VLAN_PKT) {
/*
* The packet length must be checked in
* mlx5_tx_able_to_empw() and packet
MLX5_ASSERT(room >= MLX5_WQE_DSEG_SIZE);
if (MLX5_TXOFF_CONFIG(VLAN))
MLX5_ASSERT(!(loc->mbuf->ol_flags &
- PKT_TX_VLAN_PKT));
+ RTE_MBUF_F_TX_VLAN_PKT));
mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx);
/* We have to store mbuf in elts.*/
txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
inlen = rte_pktmbuf_data_len(loc->mbuf);
if (MLX5_TXOFF_CONFIG(VLAN) &&
- loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
+ loc->mbuf->ol_flags & RTE_MBUF_F_TX_VLAN_PKT) {
vlan = sizeof(struct rte_vlan_hdr);
inlen += vlan;
}
if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE)
return MLX5_TXCMP_CODE_ERROR;
if (loc->mbuf->ol_flags &
- PKT_TX_DYNF_NOINLINE) {
+ RTE_MBUF_F_TX_DYNF_NOINLINE) {
/*
* The hint flag not to inline packet
* data is set. Check whether we can
/* Update sent data bytes counter. */
txq->stats.obytes += rte_pktmbuf_data_len(loc->mbuf);
if (MLX5_TXOFF_CONFIG(VLAN) &&
- loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
+ loc->mbuf->ol_flags & RTE_MBUF_F_TX_VLAN_PKT)
txq->stats.obytes +=
sizeof(struct rte_vlan_hdr);
#endif
}
/* Dedicated branch for single-segment TSO packets. */
if (MLX5_TXOFF_CONFIG(TSO) &&
- unlikely(loc.mbuf->ol_flags & PKT_TX_TCP_SEG)) {
+ unlikely(loc.mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
/*
* TSO might require special way for inlining
* (dedicated parameters) and is sent with
#define MVNETA_TX_OFFLOADS (MVNETA_TX_OFFLOAD_CHECKSUM | \
DEV_TX_OFFLOAD_MULTI_SEGS)
-#define MVNETA_TX_PKT_OFFLOADS (PKT_TX_IP_CKSUM | \
- PKT_TX_TCP_CKSUM | \
- PKT_TX_UDP_CKSUM)
+#define MVNETA_TX_PKT_OFFLOADS (RTE_MBUF_F_TX_IP_CKSUM | \
+ RTE_MBUF_F_TX_TCP_CKSUM | \
+ RTE_MBUF_F_TX_UDP_CKSUM)
struct mvneta_priv {
/* Hot fields, used in fast path. */
* default value
*/
*l3_type = NETA_OUTQ_L3_TYPE_IPV4;
- *gen_l3_cksum = ol_flags & PKT_TX_IP_CKSUM ? 1 : 0;
+ *gen_l3_cksum = ol_flags & RTE_MBUF_F_TX_IP_CKSUM ? 1 : 0;
- if (ol_flags & PKT_TX_IPV6) {
+ if (ol_flags & RTE_MBUF_F_TX_IPV6) {
*l3_type = NETA_OUTQ_L3_TYPE_IPV6;
/* no checksum for ipv6 header */
*gen_l3_cksum = 0;
}
- if (ol_flags & PKT_TX_TCP_CKSUM) {
+ if (ol_flags & RTE_MBUF_F_TX_TCP_CKSUM) {
*l4_type = NETA_OUTQ_L4_TYPE_TCP;
*gen_l4_cksum = 1;
- } else if (ol_flags & PKT_TX_UDP_CKSUM) {
+ } else if (ol_flags & RTE_MBUF_F_TX_UDP_CKSUM) {
*l4_type = NETA_OUTQ_L4_TYPE_UDP;
*gen_l4_cksum = 1;
} else {
status = neta_ppio_inq_desc_get_l3_pkt_error(desc);
if (unlikely(status != NETA_DESC_ERR_OK))
- flags = PKT_RX_IP_CKSUM_BAD;
+ flags = RTE_MBUF_F_RX_IP_CKSUM_BAD;
else
- flags = PKT_RX_IP_CKSUM_GOOD;
+ flags = RTE_MBUF_F_RX_IP_CKSUM_GOOD;
status = neta_ppio_inq_desc_get_l4_pkt_error(desc);
if (unlikely(status != NETA_DESC_ERR_OK))
- flags |= PKT_RX_L4_CKSUM_BAD;
+ flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
else
- flags |= PKT_RX_L4_CKSUM_GOOD;
+ flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
return flags;
}
#define MRVL_TX_OFFLOADS (MRVL_TX_OFFLOAD_CHECKSUM | \
DEV_TX_OFFLOAD_MULTI_SEGS)
-#define MRVL_TX_PKT_OFFLOADS (PKT_TX_IP_CKSUM | \
- PKT_TX_TCP_CKSUM | \
- PKT_TX_UDP_CKSUM)
+#define MRVL_TX_PKT_OFFLOADS (RTE_MBUF_F_TX_IP_CKSUM | \
+ RTE_MBUF_F_TX_TCP_CKSUM | \
+ RTE_MBUF_F_TX_UDP_CKSUM)
static const char * const valid_args[] = {
MRVL_IFACE_NAME_ARG,
if (RTE_ETH_IS_IPV4_HDR(packet_type)) {
status = pp2_ppio_inq_desc_get_l3_pkt_error(desc);
if (unlikely(status != PP2_DESC_ERR_OK))
- flags |= PKT_RX_IP_CKSUM_BAD;
+ flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
else
- flags |= PKT_RX_IP_CKSUM_GOOD;
+ flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
}
if (((packet_type & RTE_PTYPE_L4_UDP) == RTE_PTYPE_L4_UDP) ||
((packet_type & RTE_PTYPE_L4_TCP) == RTE_PTYPE_L4_TCP)) {
status = pp2_ppio_inq_desc_get_l4_pkt_error(desc);
if (unlikely(status != PP2_DESC_ERR_OK))
- flags |= PKT_RX_L4_CKSUM_BAD;
+ flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
else
- flags |= PKT_RX_L4_CKSUM_GOOD;
+ flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
}
return flags;
* default value
*/
*l3_type = PP2_OUTQ_L3_TYPE_IPV4;
- *gen_l3_cksum = ol_flags & PKT_TX_IP_CKSUM ? 1 : 0;
+ *gen_l3_cksum = ol_flags & RTE_MBUF_F_TX_IP_CKSUM ? 1 : 0;
- if (ol_flags & PKT_TX_IPV6) {
+ if (ol_flags & RTE_MBUF_F_TX_IPV6) {
*l3_type = PP2_OUTQ_L3_TYPE_IPV6;
/* no checksum for ipv6 header */
*gen_l3_cksum = 0;
}
- if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM) {
+ if ((ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_TCP_CKSUM) {
*l4_type = PP2_OUTQ_L4_TYPE_TCP;
*gen_l4_cksum = 1;
- } else if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM) {
+ } else if ((ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_UDP_CKSUM) {
*l4_type = PP2_OUTQ_L4_TYPE_UDP;
*gen_l4_cksum = 1;
} else {
if (info->vlan_info != HN_NDIS_VLAN_INFO_INVALID) {
m->vlan_tci = info->vlan_info;
- m->ol_flags |= PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN;
+ m->ol_flags |= RTE_MBUF_F_RX_VLAN_STRIPPED | RTE_MBUF_F_RX_VLAN;
/* NDIS always strips tag, put it back if necessary */
if (!hv->vlan_strip && rte_vlan_insert(&m)) {
if (info->csum_info != HN_NDIS_RXCSUM_INFO_INVALID) {
if (info->csum_info & NDIS_RXCSUM_INFO_IPCS_OK)
- m->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+ m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
if (info->csum_info & (NDIS_RXCSUM_INFO_UDPCS_OK
| NDIS_RXCSUM_INFO_TCPCS_OK))
- m->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ m->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
else if (info->csum_info & (NDIS_RXCSUM_INFO_TCPCS_FAILED
| NDIS_RXCSUM_INFO_UDPCS_FAILED))
- m->ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ m->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
}
if (info->hash_info != HN_NDIS_HASH_INFO_INVALID) {
- m->ol_flags |= PKT_RX_RSS_HASH;
+ m->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
m->hash.rss = info->hash_value;
}
NDIS_PKTINFO_TYPE_HASHVAL);
*pi_data = queue_id;
- if (m->ol_flags & PKT_TX_VLAN_PKT) {
+ if (m->ol_flags & RTE_MBUF_F_TX_VLAN_PKT) {
pi_data = hn_rndis_pktinfo_append(pkt, NDIS_VLAN_INFO_SIZE,
NDIS_PKTINFO_TYPE_VLAN);
*pi_data = m->vlan_tci;
}
- if (m->ol_flags & PKT_TX_TCP_SEG) {
+ if (m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
pi_data = hn_rndis_pktinfo_append(pkt, NDIS_LSO2_INFO_SIZE,
NDIS_PKTINFO_TYPE_LSO);
- if (m->ol_flags & PKT_TX_IPV6) {
+ if (m->ol_flags & RTE_MBUF_F_TX_IPV6) {
*pi_data = NDIS_LSO2_INFO_MAKEIPV6(hlen,
m->tso_segsz);
} else {
m->tso_segsz);
}
} else if (m->ol_flags &
- (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | PKT_TX_IP_CKSUM)) {
+ (RTE_MBUF_F_TX_TCP_CKSUM | RTE_MBUF_F_TX_UDP_CKSUM | RTE_MBUF_F_TX_IP_CKSUM)) {
pi_data = hn_rndis_pktinfo_append(pkt, NDIS_TXCSUM_INFO_SIZE,
NDIS_PKTINFO_TYPE_CSUM);
*pi_data = 0;
- if (m->ol_flags & PKT_TX_IPV6)
+ if (m->ol_flags & RTE_MBUF_F_TX_IPV6)
*pi_data |= NDIS_TXCSUM_INFO_IPV6;
- if (m->ol_flags & PKT_TX_IPV4) {
+ if (m->ol_flags & RTE_MBUF_F_TX_IPV4) {
*pi_data |= NDIS_TXCSUM_INFO_IPV4;
- if (m->ol_flags & PKT_TX_IP_CKSUM)
+ if (m->ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
*pi_data |= NDIS_TXCSUM_INFO_IPCS;
}
- if (m->ol_flags & PKT_TX_TCP_CKSUM)
+ if (m->ol_flags & RTE_MBUF_F_TX_TCP_CKSUM)
*pi_data |= NDIS_TXCSUM_INFO_MKTCPCS(hlen);
- else if (m->ol_flags & PKT_TX_UDP_CKSUM)
+ else if (m->ol_flags & RTE_MBUF_F_TX_UDP_CKSUM)
*pi_data |= NDIS_TXCSUM_INFO_MKUDPCS(hlen);
}
ol_flags = mb->ol_flags;
- if (!(ol_flags & PKT_TX_TCP_SEG))
+ if (!(ol_flags & RTE_MBUF_F_TX_TCP_SEG))
goto clean_txd;
txd->l3_offset = mb->l2_len;
ol_flags = mb->ol_flags;
/* IPv6 does not need checksum */
- if (ol_flags & PKT_TX_IP_CKSUM)
+ if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
txd->flags |= PCIE_DESC_TX_IP4_CSUM;
- switch (ol_flags & PKT_TX_L4_MASK) {
- case PKT_TX_UDP_CKSUM:
+ switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
+ case RTE_MBUF_F_TX_UDP_CKSUM:
txd->flags |= PCIE_DESC_TX_UDP_CSUM;
break;
- case PKT_TX_TCP_CKSUM:
+ case RTE_MBUF_F_TX_TCP_CKSUM:
txd->flags |= PCIE_DESC_TX_TCP_CSUM;
break;
}
- if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK))
+ if (ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_L4_MASK))
txd->flags |= PCIE_DESC_TX_CSUM;
}
/* If IPv4 and IP checksum error, fail */
if (unlikely((rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM) &&
!(rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM_OK)))
- mb->ol_flags |= PKT_RX_IP_CKSUM_BAD;
+ mb->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
else
- mb->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+ mb->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
/* If neither UDP nor TCP return */
if (!(rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM) &&
return;
if (likely(rxd->rxd.flags & PCIE_DESC_RX_L4_CSUM_OK))
- mb->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ mb->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
else
- mb->ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ mb->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
}
#define NFP_HASH_OFFSET ((uint8_t *)mbuf->buf_addr + mbuf->data_off - 4)
}
mbuf->hash.rss = hash;
- mbuf->ol_flags |= PKT_RX_RSS_HASH;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
switch (hash_type) {
case NFP_NET_RSS_IPV4:
if ((rxds->rxd.flags & PCIE_DESC_RX_VLAN) &&
(hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN)) {
mb->vlan_tci = rte_cpu_to_le_32(rxds->rxd.vlan);
- mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+ mb->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
}
/* Adding the mbuf to the mbuf array passed by the app */
nfp_net_tx_tso(txq, &txd, pkt);
nfp_net_tx_cksum(txq, &txd, pkt);
- if ((pkt->ol_flags & PKT_TX_VLAN_PKT) &&
+ if ((pkt->ol_flags & RTE_MBUF_F_TX_VLAN_PKT) &&
(hw->cap & NFP_NET_CFG_CTRL_TXVLAN)) {
txd.flags |= PCIE_DESC_TX_VLAN;
txd.vlan = pkt->vlan_tci;
* 0x2 - TCP L4 checksum
* 0x3 - SCTP L4 checksum
*/
- const uint8_t csum = (!(((ol_flags ^ PKT_TX_UDP_CKSUM) >> 52) & 0x3) +
- (!(((ol_flags ^ PKT_TX_TCP_CKSUM) >> 52) & 0x3) * 2) +
- (!(((ol_flags ^ PKT_TX_SCTP_CKSUM) >> 52) & 0x3) * 3));
-
- const uint8_t is_tunnel_parsed = (!!(ol_flags & PKT_TX_TUNNEL_GTP) ||
- !!(ol_flags & PKT_TX_TUNNEL_VXLAN_GPE) ||
- !!(ol_flags & PKT_TX_TUNNEL_VXLAN) ||
- !!(ol_flags & PKT_TX_TUNNEL_GRE) ||
- !!(ol_flags & PKT_TX_TUNNEL_GENEVE) ||
- !!(ol_flags & PKT_TX_TUNNEL_IP) ||
- !!(ol_flags & PKT_TX_TUNNEL_IPIP));
-
- const uint8_t csum_outer = (!!(ol_flags & PKT_TX_OUTER_UDP_CKSUM) ||
- !!(ol_flags & PKT_TX_TUNNEL_UDP));
+ const uint8_t csum = (!(((ol_flags ^ RTE_MBUF_F_TX_UDP_CKSUM) >> 52) & 0x3) +
+ (!(((ol_flags ^ RTE_MBUF_F_TX_TCP_CKSUM) >> 52) & 0x3) * 2) +
+ (!(((ol_flags ^ RTE_MBUF_F_TX_SCTP_CKSUM) >> 52) & 0x3) * 3));
+
+ const uint8_t is_tunnel_parsed = (!!(ol_flags & RTE_MBUF_F_TX_TUNNEL_GTP) ||
+ !!(ol_flags & RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE) ||
+ !!(ol_flags & RTE_MBUF_F_TX_TUNNEL_VXLAN) ||
+ !!(ol_flags & RTE_MBUF_F_TX_TUNNEL_GRE) ||
+ !!(ol_flags & RTE_MBUF_F_TX_TUNNEL_GENEVE) ||
+ !!(ol_flags & RTE_MBUF_F_TX_TUNNEL_IP) ||
+ !!(ol_flags & RTE_MBUF_F_TX_TUNNEL_IPIP));
+
+ const uint8_t csum_outer = (!!(ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM) ||
+ !!(ol_flags & RTE_MBUF_F_TX_TUNNEL_UDP));
const uint8_t outer_l2_len = m->outer_l2_len;
const uint8_t l2_len = m->l2_len;
send_hdr->w0.l3ptr = outer_l2_len;
send_hdr->w0.l4ptr = outer_l2_len + m->outer_l3_len;
/* Set clk3 for PKO to calculate IPV4 header checksum */
- send_hdr->w0.ckl3 = !!(ol_flags & PKT_TX_OUTER_IPV4);
+ send_hdr->w0.ckl3 = !!(ol_flags & RTE_MBUF_F_TX_OUTER_IPV4);
/* Outer L4 */
send_hdr->w0.ckl4 = csum_outer;
/* Set clke for PKO to calculate inner IPV4 header
* checksum.
*/
- send_hdr->w0.ckle = !!(ol_flags & PKT_TX_IPV4);
+ send_hdr->w0.ckle = !!(ol_flags & RTE_MBUF_F_TX_IPV4);
/* Inner L4 */
send_hdr->w0.cklf = csum;
send_hdr->w0.l3ptr = l2_len;
send_hdr->w0.l4ptr = l2_len + m->l3_len;
/* Set clk3 for PKO to calculate IPV4 header checksum */
- send_hdr->w0.ckl3 = !!(ol_flags & PKT_TX_IPV4);
+ send_hdr->w0.ckl3 = !!(ol_flags & RTE_MBUF_F_TX_IPV4);
/* Inner L4 */
send_hdr->w0.ckl4 = csum;
send_hdr->w0.l3ptr = outer_l2_len;
send_hdr->w0.l4ptr = outer_l2_len + m->outer_l3_len;
/* Set clk3 for PKO to calculate IPV4 header checksum */
- send_hdr->w0.ckl3 = !!(ol_flags & PKT_TX_OUTER_IPV4);
+ send_hdr->w0.ckl3 = !!(ol_flags & RTE_MBUF_F_TX_OUTER_IPV4);
/* Outer L4 */
send_hdr->w0.ckl4 = csum_outer;
send_hdr->w0.l3ptr = l2_len;
send_hdr->w0.l4ptr = l2_len + m->l3_len;
/* Set clk3 for PKO to calculate IPV4 header checksum */
- send_hdr->w0.ckl3 = !!(ol_flags & PKT_TX_IPV4);
+ send_hdr->w0.ckl3 = !!(ol_flags & RTE_MBUF_F_TX_IPV4);
/* Inner L4 */
send_hdr->w0.ckl4 = csum;
uint16_t flags = 0;
/* Fastpath is dependent on these enums */
- RTE_BUILD_BUG_ON(PKT_TX_TCP_CKSUM != (1ULL << 52));
- RTE_BUILD_BUG_ON(PKT_TX_SCTP_CKSUM != (2ULL << 52));
- RTE_BUILD_BUG_ON(PKT_TX_UDP_CKSUM != (3ULL << 52));
- RTE_BUILD_BUG_ON(PKT_TX_IP_CKSUM != (1ULL << 54));
- RTE_BUILD_BUG_ON(PKT_TX_IPV4 != (1ULL << 55));
- RTE_BUILD_BUG_ON(PKT_TX_OUTER_IP_CKSUM != (1ULL << 58));
- RTE_BUILD_BUG_ON(PKT_TX_OUTER_IPV4 != (1ULL << 59));
- RTE_BUILD_BUG_ON(PKT_TX_OUTER_IPV6 != (1ULL << 60));
- RTE_BUILD_BUG_ON(PKT_TX_OUTER_UDP_CKSUM != (1ULL << 41));
+ RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_TCP_CKSUM != (1ULL << 52));
+ RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_SCTP_CKSUM != (2ULL << 52));
+ RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_UDP_CKSUM != (3ULL << 52));
+ RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_IP_CKSUM != (1ULL << 54));
+ RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_IPV4 != (1ULL << 55));
+ RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_OUTER_IP_CKSUM != (1ULL << 58));
+ RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_OUTER_IPV4 != (1ULL << 59));
+ RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_OUTER_IPV6 != (1ULL << 60));
+ RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_OUTER_UDP_CKSUM != (1ULL << 41));
RTE_BUILD_BUG_ON(RTE_MBUF_L2_LEN_BITS != 7);
RTE_BUILD_BUG_ON(RTE_MBUF_L3_LEN_BITS != 9);
RTE_BUILD_BUG_ON(RTE_MBUF_OUTL2_LEN_BITS != 7);
errlev = idx & 0xf;
errcode = (idx & 0xff0) >> 4;
- val = PKT_RX_IP_CKSUM_UNKNOWN;
- val |= PKT_RX_L4_CKSUM_UNKNOWN;
- val |= PKT_RX_OUTER_L4_CKSUM_UNKNOWN;
+ val = RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN;
+ val |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN;
+ val |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_UNKNOWN;
switch (errlev) {
case NPC_ERRLEV_RE:
* including Outer L2 length mismatch error
*/
if (errcode) {
- val |= PKT_RX_IP_CKSUM_BAD;
- val |= PKT_RX_L4_CKSUM_BAD;
+ val |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
+ val |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
} else {
- val |= PKT_RX_IP_CKSUM_GOOD;
- val |= PKT_RX_L4_CKSUM_GOOD;
+ val |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
+ val |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
}
break;
case NPC_ERRLEV_LC:
if (errcode == NPC_EC_OIP4_CSUM ||
errcode == NPC_EC_IP_FRAG_OFFSET_1) {
- val |= PKT_RX_IP_CKSUM_BAD;
- val |= PKT_RX_OUTER_IP_CKSUM_BAD;
+ val |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
+ val |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
} else {
- val |= PKT_RX_IP_CKSUM_GOOD;
+ val |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
}
break;
case NPC_ERRLEV_LG:
if (errcode == NPC_EC_IIP4_CSUM)
- val |= PKT_RX_IP_CKSUM_BAD;
+ val |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
else
- val |= PKT_RX_IP_CKSUM_GOOD;
+ val |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
break;
case NPC_ERRLEV_NIX:
if (errcode == NIX_RX_PERRCODE_OL4_CHK ||
errcode == NIX_RX_PERRCODE_OL4_LEN ||
errcode == NIX_RX_PERRCODE_OL4_PORT) {
- val |= PKT_RX_IP_CKSUM_GOOD;
- val |= PKT_RX_L4_CKSUM_BAD;
- val |= PKT_RX_OUTER_L4_CKSUM_BAD;
+ val |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
+ val |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
+ val |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD;
} else if (errcode == NIX_RX_PERRCODE_IL4_CHK ||
errcode == NIX_RX_PERRCODE_IL4_LEN ||
errcode == NIX_RX_PERRCODE_IL4_PORT) {
- val |= PKT_RX_IP_CKSUM_GOOD;
- val |= PKT_RX_L4_CKSUM_BAD;
+ val |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
+ val |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
} else if (errcode == NIX_RX_PERRCODE_IL3_LEN ||
errcode == NIX_RX_PERRCODE_OL3_LEN) {
- val |= PKT_RX_IP_CKSUM_BAD;
+ val |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
} else {
- val |= PKT_RX_IP_CKSUM_GOOD;
- val |= PKT_RX_L4_CKSUM_GOOD;
+ val |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
+ val |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
}
break;
}
nix_vlan_update(const uint64_t w2, uint64_t ol_flags, uint8x16_t *f)
{
if (w2 & BIT_ULL(21) /* vtag0_gone */) {
- ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+ ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
*f = vsetq_lane_u16((uint16_t)(w2 >> 32), *f, 5);
}
nix_qinq_update(const uint64_t w2, uint64_t ol_flags, struct rte_mbuf *mbuf)
{
if (w2 & BIT_ULL(23) /* vtag1_gone */) {
- ol_flags |= PKT_RX_QINQ | PKT_RX_QINQ_STRIPPED;
+ ol_flags |= RTE_MBUF_F_RX_QINQ | RTE_MBUF_F_RX_QINQ_STRIPPED;
mbuf->vlan_tci_outer = (uint16_t)(w2 >> 48);
}
f1 = vsetq_lane_u32(cq1_w0, f1, 3);
f2 = vsetq_lane_u32(cq2_w0, f2, 3);
f3 = vsetq_lane_u32(cq3_w0, f3, 3);
- ol_flags0 = PKT_RX_RSS_HASH;
- ol_flags1 = PKT_RX_RSS_HASH;
- ol_flags2 = PKT_RX_RSS_HASH;
- ol_flags3 = PKT_RX_RSS_HASH;
+ ol_flags0 = RTE_MBUF_F_RX_RSS_HASH;
+ ol_flags1 = RTE_MBUF_F_RX_RSS_HASH;
+ ol_flags2 = RTE_MBUF_F_RX_RSS_HASH;
+ ol_flags3 = RTE_MBUF_F_RX_RSS_HASH;
} else {
ol_flags0 = 0; ol_flags1 = 0;
ol_flags2 = 0; ol_flags3 = 0;
*/
*otx2_timestamp_dynfield(mbuf, tstamp) =
rte_be_to_cpu_64(*tstamp_ptr);
- /* PKT_RX_IEEE1588_TMST flag needs to be set only in case
+ /* RTE_MBUF_F_RX_IEEE1588_TMST flag needs to be set only in case
* PTP packets are received.
*/
if (mbuf->packet_type == RTE_PTYPE_L2_ETHER_TIMESYNC) {
tstamp->rx_tstamp =
*otx2_timestamp_dynfield(mbuf, tstamp);
tstamp->rx_ready = 1;
- mbuf->ol_flags |= PKT_RX_IEEE1588_PTP |
- PKT_RX_IEEE1588_TMST |
+ mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP |
+ RTE_MBUF_F_RX_IEEE1588_TMST |
tstamp->rx_tstamp_dynflag;
}
}
* 0 to OTX2_FLOW_ACTION_FLAG_DEFAULT - 2
*/
if (likely(match_id)) {
- ol_flags |= PKT_RX_FDIR;
+ ol_flags |= RTE_MBUF_F_RX_FDIR;
if (match_id != OTX2_FLOW_ACTION_FLAG_DEFAULT) {
- ol_flags |= PKT_RX_FDIR_ID;
+ ol_flags |= RTE_MBUF_F_RX_FDIR_ID;
mbuf->hash.fdir.hi = match_id - 1;
}
}
int i;
if (unlikely(nix_rx_sec_cptres_get(cq) != OTX2_SEC_COMP_GOOD))
- return PKT_RX_SEC_OFFLOAD | PKT_RX_SEC_OFFLOAD_FAILED;
+ return RTE_MBUF_F_RX_SEC_OFFLOAD | RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED;
/* 20 bits of tag would have the SPI */
spi = cq->tag & 0xFFFFF;
if (sa->replay_win_sz) {
if (cpt_ipsec_ip_antireplay_check(sa, l3_ptr) < 0)
- return PKT_RX_SEC_OFFLOAD | PKT_RX_SEC_OFFLOAD_FAILED;
+ return RTE_MBUF_F_RX_SEC_OFFLOAD | RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED;
}
l2_ptr_actual = RTE_PTR_ADD(l2_ptr,
m_len = ip_len + l2_len;
m->data_len = m_len;
m->pkt_len = m_len;
- return PKT_RX_SEC_OFFLOAD;
+ return RTE_MBUF_F_RX_SEC_OFFLOAD;
}
static __rte_always_inline void
if (flag & NIX_RX_OFFLOAD_RSS_F) {
mbuf->hash.rss = tag;
- ol_flags |= PKT_RX_RSS_HASH;
+ ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
}
if (flag & NIX_RX_OFFLOAD_CHECKSUM_F)
if (flag & NIX_RX_OFFLOAD_VLAN_STRIP_F) {
if (rx->vtag0_gone) {
- ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+ ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
mbuf->vlan_tci = rx->vtag0_tci;
}
if (rx->vtag1_gone) {
- ol_flags |= PKT_RX_QINQ | PKT_RX_QINQ_STRIPPED;
+ ol_flags |= RTE_MBUF_F_RX_QINQ | RTE_MBUF_F_RX_QINQ_STRIPPED;
mbuf->vlan_tci_outer = rx->vtag1_tci;
}
}
const uint8x16_t tbl = {
/* [0-15] = il4type:il3type */
0x04, /* none (IPv6 assumed) */
- 0x14, /* PKT_TX_TCP_CKSUM (IPv6 assumed) */
- 0x24, /* PKT_TX_SCTP_CKSUM (IPv6 assumed) */
- 0x34, /* PKT_TX_UDP_CKSUM (IPv6 assumed) */
- 0x03, /* PKT_TX_IP_CKSUM */
- 0x13, /* PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM */
- 0x23, /* PKT_TX_IP_CKSUM | PKT_TX_SCTP_CKSUM */
- 0x33, /* PKT_TX_IP_CKSUM | PKT_TX_UDP_CKSUM */
- 0x02, /* PKT_TX_IPV4 */
- 0x12, /* PKT_TX_IPV4 | PKT_TX_TCP_CKSUM */
- 0x22, /* PKT_TX_IPV4 | PKT_TX_SCTP_CKSUM */
- 0x32, /* PKT_TX_IPV4 | PKT_TX_UDP_CKSUM */
- 0x03, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM */
- 0x13, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
- * PKT_TX_TCP_CKSUM
+ 0x14, /* RTE_MBUF_F_TX_TCP_CKSUM (IPv6 assumed) */
+ 0x24, /* RTE_MBUF_F_TX_SCTP_CKSUM (IPv6 assumed) */
+ 0x34, /* RTE_MBUF_F_TX_UDP_CKSUM (IPv6 assumed) */
+ 0x03, /* RTE_MBUF_F_TX_IP_CKSUM */
+ 0x13, /* RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_TCP_CKSUM */
+ 0x23, /* RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_SCTP_CKSUM */
+ 0x33, /* RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_UDP_CKSUM */
+ 0x02, /* RTE_MBUF_F_TX_IPV4 */
+ 0x12, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_TCP_CKSUM */
+ 0x22, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_SCTP_CKSUM */
+ 0x32, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_UDP_CKSUM */
+ 0x03, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM */
+ 0x13, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM |
+ * RTE_MBUF_F_TX_TCP_CKSUM
*/
- 0x23, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
- * PKT_TX_SCTP_CKSUM
+ 0x23, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM |
+ * RTE_MBUF_F_TX_SCTP_CKSUM
*/
- 0x33, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
- * PKT_TX_UDP_CKSUM
+ 0x33, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM |
+ * RTE_MBUF_F_TX_UDP_CKSUM
*/
};
{
/* [0-15] = il4type:il3type */
0x04, /* none (IPv6) */
- 0x14, /* PKT_TX_TCP_CKSUM (IPv6) */
- 0x24, /* PKT_TX_SCTP_CKSUM (IPv6) */
- 0x34, /* PKT_TX_UDP_CKSUM (IPv6) */
- 0x03, /* PKT_TX_IP_CKSUM */
- 0x13, /* PKT_TX_IP_CKSUM |
- * PKT_TX_TCP_CKSUM
+ 0x14, /* RTE_MBUF_F_TX_TCP_CKSUM (IPv6) */
+ 0x24, /* RTE_MBUF_F_TX_SCTP_CKSUM (IPv6) */
+ 0x34, /* RTE_MBUF_F_TX_UDP_CKSUM (IPv6) */
+ 0x03, /* RTE_MBUF_F_TX_IP_CKSUM */
+ 0x13, /* RTE_MBUF_F_TX_IP_CKSUM |
+ * RTE_MBUF_F_TX_TCP_CKSUM
*/
- 0x23, /* PKT_TX_IP_CKSUM |
- * PKT_TX_SCTP_CKSUM
+ 0x23, /* RTE_MBUF_F_TX_IP_CKSUM |
+ * RTE_MBUF_F_TX_SCTP_CKSUM
*/
- 0x33, /* PKT_TX_IP_CKSUM |
- * PKT_TX_UDP_CKSUM
+ 0x33, /* RTE_MBUF_F_TX_IP_CKSUM |
+ * RTE_MBUF_F_TX_UDP_CKSUM
*/
- 0x02, /* PKT_TX_IPV4 */
- 0x12, /* PKT_TX_IPV4 |
- * PKT_TX_TCP_CKSUM
+ 0x02, /* RTE_MBUF_F_TX_IPV4 */
+ 0x12, /* RTE_MBUF_F_TX_IPV4 |
+ * RTE_MBUF_F_TX_TCP_CKSUM
*/
- 0x22, /* PKT_TX_IPV4 |
- * PKT_TX_SCTP_CKSUM
+ 0x22, /* RTE_MBUF_F_TX_IPV4 |
+ * RTE_MBUF_F_TX_SCTP_CKSUM
*/
- 0x32, /* PKT_TX_IPV4 |
- * PKT_TX_UDP_CKSUM
+ 0x32, /* RTE_MBUF_F_TX_IPV4 |
+ * RTE_MBUF_F_TX_UDP_CKSUM
*/
- 0x03, /* PKT_TX_IPV4 |
- * PKT_TX_IP_CKSUM
+ 0x03, /* RTE_MBUF_F_TX_IPV4 |
+ * RTE_MBUF_F_TX_IP_CKSUM
*/
- 0x13, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
- * PKT_TX_TCP_CKSUM
+ 0x13, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM |
+ * RTE_MBUF_F_TX_TCP_CKSUM
*/
- 0x23, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
- * PKT_TX_SCTP_CKSUM
+ 0x23, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM |
+ * RTE_MBUF_F_TX_SCTP_CKSUM
*/
- 0x33, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
- * PKT_TX_UDP_CKSUM
+ 0x33, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM |
+ * RTE_MBUF_F_TX_UDP_CKSUM
*/
},
NIX_TX_OFFLOAD_TSO_F)
#define NIX_UDP_TUN_BITMASK \
- ((1ull << (PKT_TX_TUNNEL_VXLAN >> 45)) | \
- (1ull << (PKT_TX_TUNNEL_GENEVE >> 45)))
+ ((1ull << (RTE_MBUF_F_TX_TUNNEL_VXLAN >> 45)) | \
+ (1ull << (RTE_MBUF_F_TX_TUNNEL_GENEVE >> 45)))
#define NIX_LSO_FORMAT_IDX_TSOV4 (0)
#define NIX_LSO_FORMAT_IDX_TSOV6 (1)
if (flags & NIX_TX_OFFLOAD_TSTAMP_F) {
struct nix_send_mem_s *send_mem;
uint16_t off = (no_segdw - 1) << 1;
- const uint8_t is_ol_tstamp = !(ol_flags & PKT_TX_IEEE1588_TMST);
+ const uint8_t is_ol_tstamp = !(ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST);
send_mem = (struct nix_send_mem_s *)(cmd + off);
if (flags & NIX_TX_MULTI_SEG_F) {
rte_compiler_barrier();
}
- /* Packets for which PKT_TX_IEEE1588_TMST is not set, tx tstamp
+ /* Packets for which RTE_MBUF_F_TX_IEEE1588_TMST is not set, tx tstamp
* should not be recorded, hence changing the alg type to
* NIX_SENDMEMALG_SET and also changing send mem addr field to
* next 8 bytes as it corrpt the actual tx tstamp registered
uint64_t mask, ol_flags = m->ol_flags;
if (flags & NIX_TX_OFFLOAD_TSO_F &&
- (ol_flags & PKT_TX_TCP_SEG)) {
+ (ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
uintptr_t mdata = rte_pktmbuf_mtod(m, uintptr_t);
uint16_t *iplen, *oiplen, *oudplen;
uint16_t lso_sb, paylen;
- mask = -!!(ol_flags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6));
+ mask = -!!(ol_flags & (RTE_MBUF_F_TX_OUTER_IPV4 | RTE_MBUF_F_TX_OUTER_IPV6));
lso_sb = (mask & (m->outer_l2_len + m->outer_l3_len)) +
m->l2_len + m->l3_len + m->l4_len;
/* Get iplen position assuming no tunnel hdr */
iplen = (uint16_t *)(mdata + m->l2_len +
- (2 << !!(ol_flags & PKT_TX_IPV6)));
+ (2 << !!(ol_flags & RTE_MBUF_F_TX_IPV6)));
/* Handle tunnel tso */
if ((flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F) &&
- (ol_flags & PKT_TX_TUNNEL_MASK)) {
+ (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)) {
const uint8_t is_udp_tun = (NIX_UDP_TUN_BITMASK >>
- ((ol_flags & PKT_TX_TUNNEL_MASK) >> 45)) & 0x1;
+ ((ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) >> 45)) & 0x1;
oiplen = (uint16_t *)(mdata + m->outer_l2_len +
- (2 << !!(ol_flags & PKT_TX_OUTER_IPV6)));
+ (2 << !!(ol_flags & RTE_MBUF_F_TX_OUTER_IPV6)));
*oiplen = rte_cpu_to_be_16(rte_be_to_cpu_16(*oiplen) -
paylen);
/* Update iplen position to inner ip hdr */
iplen = (uint16_t *)(mdata + lso_sb - m->l3_len -
- m->l4_len + (2 << !!(ol_flags & PKT_TX_IPV6)));
+ m->l4_len + (2 << !!(ol_flags & RTE_MBUF_F_TX_IPV6)));
}
*iplen = rte_cpu_to_be_16(rte_be_to_cpu_16(*iplen) - paylen);
if ((flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F) &&
(flags & NIX_TX_OFFLOAD_L3_L4_CSUM_F)) {
- const uint8_t csum = !!(ol_flags & PKT_TX_OUTER_UDP_CKSUM);
+ const uint8_t csum = !!(ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM);
const uint8_t ol3type =
- ((!!(ol_flags & PKT_TX_OUTER_IPV4)) << 1) +
- ((!!(ol_flags & PKT_TX_OUTER_IPV6)) << 2) +
- !!(ol_flags & PKT_TX_OUTER_IP_CKSUM);
+ ((!!(ol_flags & RTE_MBUF_F_TX_OUTER_IPV4)) << 1) +
+ ((!!(ol_flags & RTE_MBUF_F_TX_OUTER_IPV6)) << 2) +
+ !!(ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM);
/* Outer L3 */
w1.ol3type = ol3type;
w1.ol4type = csum + (csum << 1);
/* Inner L3 */
- w1.il3type = ((!!(ol_flags & PKT_TX_IPV4)) << 1) +
- ((!!(ol_flags & PKT_TX_IPV6)) << 2);
+ w1.il3type = ((!!(ol_flags & RTE_MBUF_F_TX_IPV4)) << 1) +
+ ((!!(ol_flags & RTE_MBUF_F_TX_IPV6)) << 2);
w1.il3ptr = w1.ol4ptr + m->l2_len;
w1.il4ptr = w1.il3ptr + m->l3_len;
/* Increment it by 1 if it is IPV4 as 3 is with csum */
- w1.il3type = w1.il3type + !!(ol_flags & PKT_TX_IP_CKSUM);
+ w1.il3type = w1.il3type + !!(ol_flags & RTE_MBUF_F_TX_IP_CKSUM);
/* Inner L4 */
- w1.il4type = (ol_flags & PKT_TX_L4_MASK) >> 52;
+ w1.il4type = (ol_flags & RTE_MBUF_F_TX_L4_MASK) >> 52;
/* In case of no tunnel header use only
* shift IL3/IL4 fields a bit to use
((w1.u & 0X00000000FFFFFFFF) >> (mask << 4));
} else if (flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F) {
- const uint8_t csum = !!(ol_flags & PKT_TX_OUTER_UDP_CKSUM);
+ const uint8_t csum = !!(ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM);
const uint8_t outer_l2_len = m->outer_l2_len;
/* Outer L3 */
w1.ol3ptr = outer_l2_len;
w1.ol4ptr = outer_l2_len + m->outer_l3_len;
/* Increment it by 1 if it is IPV4 as 3 is with csum */
- w1.ol3type = ((!!(ol_flags & PKT_TX_OUTER_IPV4)) << 1) +
- ((!!(ol_flags & PKT_TX_OUTER_IPV6)) << 2) +
- !!(ol_flags & PKT_TX_OUTER_IP_CKSUM);
+ w1.ol3type = ((!!(ol_flags & RTE_MBUF_F_TX_OUTER_IPV4)) << 1) +
+ ((!!(ol_flags & RTE_MBUF_F_TX_OUTER_IPV6)) << 2) +
+ !!(ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM);
/* Outer L4 */
w1.ol4type = csum + (csum << 1);
w1.ol3ptr = l2_len;
w1.ol4ptr = l2_len + m->l3_len;
/* Increment it by 1 if it is IPV4 as 3 is with csum */
- w1.ol3type = ((!!(ol_flags & PKT_TX_IPV4)) << 1) +
- ((!!(ol_flags & PKT_TX_IPV6)) << 2) +
- !!(ol_flags & PKT_TX_IP_CKSUM);
+ w1.ol3type = ((!!(ol_flags & RTE_MBUF_F_TX_IPV4)) << 1) +
+ ((!!(ol_flags & RTE_MBUF_F_TX_IPV6)) << 2) +
+ !!(ol_flags & RTE_MBUF_F_TX_IP_CKSUM);
/* Inner L4 */
- w1.ol4type = (ol_flags & PKT_TX_L4_MASK) >> 52;
+ w1.ol4type = (ol_flags & RTE_MBUF_F_TX_L4_MASK) >> 52;
}
if (flags & NIX_TX_NEED_EXT_HDR &&
flags & NIX_TX_OFFLOAD_VLAN_QINQ_F) {
- send_hdr_ext->w1.vlan1_ins_ena = !!(ol_flags & PKT_TX_VLAN);
+ send_hdr_ext->w1.vlan1_ins_ena = !!(ol_flags & RTE_MBUF_F_TX_VLAN);
/* HW will update ptr after vlan0 update */
send_hdr_ext->w1.vlan1_ins_ptr = 12;
send_hdr_ext->w1.vlan1_ins_tci = m->vlan_tci;
- send_hdr_ext->w1.vlan0_ins_ena = !!(ol_flags & PKT_TX_QINQ);
+ send_hdr_ext->w1.vlan0_ins_ena = !!(ol_flags & RTE_MBUF_F_TX_QINQ);
/* 2B before end of l2 header */
send_hdr_ext->w1.vlan0_ins_ptr = 12;
send_hdr_ext->w1.vlan0_ins_tci = m->vlan_tci_outer;
}
if (flags & NIX_TX_OFFLOAD_TSO_F &&
- (ol_flags & PKT_TX_TCP_SEG)) {
+ (ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
uint16_t lso_sb;
uint64_t mask;
send_hdr_ext->w0.lso = 1;
send_hdr_ext->w0.lso_mps = m->tso_segsz;
send_hdr_ext->w0.lso_format =
- NIX_LSO_FORMAT_IDX_TSOV4 + !!(ol_flags & PKT_TX_IPV6);
+ NIX_LSO_FORMAT_IDX_TSOV4 + !!(ol_flags & RTE_MBUF_F_TX_IPV6);
w1.ol4type = NIX_SENDL4TYPE_TCP_CKSUM;
/* Handle tunnel tso */
if ((flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F) &&
- (ol_flags & PKT_TX_TUNNEL_MASK)) {
+ (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)) {
const uint8_t is_udp_tun = (NIX_UDP_TUN_BITMASK >>
- ((ol_flags & PKT_TX_TUNNEL_MASK) >> 45)) & 0x1;
+ ((ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) >> 45)) & 0x1;
uint8_t shift = is_udp_tun ? 32 : 0;
- shift += (!!(ol_flags & PKT_TX_OUTER_IPV6) << 4);
- shift += (!!(ol_flags & PKT_TX_IPV6) << 3);
+ shift += (!!(ol_flags & RTE_MBUF_F_TX_OUTER_IPV6) << 4);
+ shift += (!!(ol_flags & RTE_MBUF_F_TX_IPV6) << 3);
w1.il4type = NIX_SENDL4TYPE_TCP_CKSUM;
w1.ol4type = is_udp_tun ? NIX_SENDL4TYPE_UDP_CKSUM : 0;
"L4 csum failed, flags = 0x%x\n",
parse_flag);
rxq->rx_hw_errors++;
- ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
} else {
- ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
}
if (unlikely(qede_check_tunn_csum_l3(parse_flag))) {
"Outer L3 csum failed, flags = 0x%x\n",
parse_flag);
rxq->rx_hw_errors++;
- ol_flags |= PKT_RX_OUTER_IP_CKSUM_BAD;
+ ol_flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
} else {
- ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+ ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
}
flags = fp_cqe->tunnel_pars_flags.flags;
"L4 csum failed, flags = 0x%x\n",
parse_flag);
rxq->rx_hw_errors++;
- ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
} else {
- ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
}
if (unlikely(qede_check_notunn_csum_l3(rx_mb, parse_flag))) {
PMD_RX_LOG(ERR, rxq, "IP csum failed, flags = 0x%x\n",
parse_flag);
rxq->rx_hw_errors++;
- ol_flags |= PKT_RX_IP_CKSUM_BAD;
+ ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
} else {
- ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+ ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
}
if (unlikely(CQE_HAS_VLAN(parse_flag) ||
CQE_HAS_OUTER_VLAN(parse_flag))) {
/* Note: FW doesn't indicate Q-in-Q packet */
- ol_flags |= PKT_RX_VLAN;
+ ol_flags |= RTE_MBUF_F_RX_VLAN;
if (qdev->vlan_strip_flg) {
- ol_flags |= PKT_RX_VLAN_STRIPPED;
+ ol_flags |= RTE_MBUF_F_RX_VLAN_STRIPPED;
rx_mb->vlan_tci = vlan_tci;
}
}
if (rss_enable) {
- ol_flags |= PKT_RX_RSS_HASH;
+ ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
rx_mb->hash.rss = rss_hash;
}
tpa_info = &rxq->tpa_info[cqe_start_tpa->tpa_agg_index];
tpa_start_flg = true;
/* Mark it as LRO packet */
- ol_flags |= PKT_RX_LRO;
+ ol_flags |= RTE_MBUF_F_RX_LRO;
/* In split mode, seg_len is same as len_on_first_bd
* and bw_ext_bd_len_list will be empty since there are
* no additional buffers
"L4 csum failed, flags = 0x%x\n",
parse_flag);
rxq->rx_hw_errors++;
- ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
} else {
- ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
}
if (unlikely(qede_check_tunn_csum_l3(parse_flag))) {
"Outer L3 csum failed, flags = 0x%x\n",
parse_flag);
rxq->rx_hw_errors++;
- ol_flags |= PKT_RX_OUTER_IP_CKSUM_BAD;
+ ol_flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
} else {
- ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+ ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
}
if (tpa_start_flg)
"L4 csum failed, flags = 0x%x\n",
parse_flag);
rxq->rx_hw_errors++;
- ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
} else {
- ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
}
if (unlikely(qede_check_notunn_csum_l3(rx_mb, parse_flag))) {
PMD_RX_LOG(ERR, rxq, "IP csum failed, flags = 0x%x\n",
parse_flag);
rxq->rx_hw_errors++;
- ol_flags |= PKT_RX_IP_CKSUM_BAD;
+ ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
} else {
- ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+ ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
}
if (CQE_HAS_VLAN(parse_flag) ||
CQE_HAS_OUTER_VLAN(parse_flag)) {
/* Note: FW doesn't indicate Q-in-Q packet */
- ol_flags |= PKT_RX_VLAN;
+ ol_flags |= RTE_MBUF_F_RX_VLAN;
if (qdev->vlan_strip_flg) {
- ol_flags |= PKT_RX_VLAN_STRIPPED;
+ ol_flags |= RTE_MBUF_F_RX_VLAN_STRIPPED;
rx_mb->vlan_tci = vlan_tci;
}
}
/* RSS Hash */
if (qdev->rss_enable) {
- ol_flags |= PKT_RX_RSS_HASH;
+ ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
rx_mb->hash.rss = rss_hash;
}
for (i = 0; i < nb_pkts; i++) {
m = tx_pkts[i];
ol_flags = m->ol_flags;
- if (ol_flags & PKT_TX_TCP_SEG) {
+ if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
if (m->nb_segs >= ETH_TX_MAX_BDS_PER_LSO_PACKET) {
rte_errno = EINVAL;
break;
}
if (ol_flags & QEDE_TX_OFFLOAD_NOTSUP_MASK) {
/* We support only limited tunnel protocols */
- if (ol_flags & PKT_TX_TUNNEL_MASK) {
+ if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
uint64_t temp;
- temp = ol_flags & PKT_TX_TUNNEL_MASK;
- if (temp == PKT_TX_TUNNEL_VXLAN ||
- temp == PKT_TX_TUNNEL_GENEVE ||
- temp == PKT_TX_TUNNEL_MPLSINUDP ||
- temp == PKT_TX_TUNNEL_GRE)
+ temp = ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK;
+ if (temp == RTE_MBUF_F_TX_TUNNEL_VXLAN ||
+ temp == RTE_MBUF_F_TX_TUNNEL_GENEVE ||
+ temp == RTE_MBUF_F_TX_TUNNEL_MPLSINUDP ||
+ temp == RTE_MBUF_F_TX_TUNNEL_GRE)
continue;
}
<< ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
/* Offload the IP checksum in the hardware */
- if (tx_ol_flags & PKT_TX_IP_CKSUM)
+ if (tx_ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
bd1_bd_flags_bf |=
1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
/* L4 checksum offload (tcp or udp) */
- if ((tx_ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)) &&
- (tx_ol_flags & (PKT_TX_UDP_CKSUM | PKT_TX_TCP_CKSUM)))
+ if ((tx_ol_flags & (RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IPV6)) &&
+ (tx_ol_flags & (RTE_MBUF_F_TX_UDP_CKSUM | RTE_MBUF_F_TX_TCP_CKSUM)))
bd1_bd_flags_bf |=
1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
* offloads. Don't rely on pkt_type marked by Rx, instead use
* tx_ol_flags to decide.
*/
- tunn_flg = !!(tx_ol_flags & PKT_TX_TUNNEL_MASK);
+ tunn_flg = !!(tx_ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK);
if (tunn_flg) {
/* Check against max which is Tunnel IPv6 + ext */
}
/* Outer IP checksum offload */
- if (tx_ol_flags & (PKT_TX_OUTER_IP_CKSUM |
- PKT_TX_OUTER_IPV4)) {
+ if (tx_ol_flags & (RTE_MBUF_F_TX_OUTER_IP_CKSUM |
+ RTE_MBUF_F_TX_OUTER_IPV4)) {
bd1_bd_flags_bf |=
ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK <<
ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
* and inner layers lengths need to be provided in
* mbuf.
*/
- if ((tx_ol_flags & PKT_TX_TUNNEL_MASK) ==
- PKT_TX_TUNNEL_MPLSINUDP) {
+ if ((tx_ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ==
+ RTE_MBUF_F_TX_TUNNEL_MPLSINUDP) {
mplsoudp_flg = true;
#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
qede_mpls_tunn_tx_sanity_check(mbuf, txq);
1 << ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT;
/* Mark inner IPv6 if present */
- if (tx_ol_flags & PKT_TX_IPV6)
+ if (tx_ol_flags & RTE_MBUF_F_TX_IPV6)
bd2_bf1 |=
1 << ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT;
/* Inner L4 offsets */
- if ((tx_ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)) &&
- (tx_ol_flags & (PKT_TX_UDP_CKSUM |
- PKT_TX_TCP_CKSUM))) {
+ if ((tx_ol_flags & (RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IPV6)) &&
+ (tx_ol_flags & (RTE_MBUF_F_TX_UDP_CKSUM |
+ RTE_MBUF_F_TX_TCP_CKSUM))) {
/* Determines if BD3 is needed */
tunn_ipv6_ext_flg = true;
- if ((tx_ol_flags & PKT_TX_L4_MASK) ==
- PKT_TX_UDP_CKSUM) {
+ if ((tx_ol_flags & RTE_MBUF_F_TX_L4_MASK) ==
+ RTE_MBUF_F_TX_UDP_CKSUM) {
bd2_bf1 |=
1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT;
}
} /* End MPLSoUDP */
} /* End Tunnel handling */
- if (tx_ol_flags & PKT_TX_TCP_SEG) {
+ if (tx_ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
lso_flg = true;
if (unlikely(txq->nb_tx_avail <
ETH_TX_MIN_BDS_PER_LSO_PKT))
bd1_bd_flags_bf |= 1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT;
bd1_bd_flags_bf |=
1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
- /* PKT_TX_TCP_SEG implies PKT_TX_TCP_CKSUM */
+ /* RTE_MBUF_F_TX_TCP_SEG implies RTE_MBUF_F_TX_TCP_CKSUM */
bd1_bd_flags_bf |=
1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
mss = rte_cpu_to_le_16(mbuf->tso_segsz);
}
/* Descriptor based VLAN insertion */
- if (tx_ol_flags & PKT_TX_VLAN_PKT) {
+ if (tx_ol_flags & RTE_MBUF_F_TX_VLAN_PKT) {
vlan = rte_cpu_to_le_16(mbuf->vlan_tci);
bd1_bd_flags_bf |=
1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
}
/* Offload the IP checksum in the hardware */
- if (tx_ol_flags & PKT_TX_IP_CKSUM) {
+ if (tx_ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
bd1_bd_flags_bf |=
1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
/* There's no DPDK flag to request outer-L4 csum
* csum offload is requested then we need to force
* recalculation of L4 tunnel header csum also.
*/
- if (tunn_flg && ((tx_ol_flags & PKT_TX_TUNNEL_MASK) !=
- PKT_TX_TUNNEL_GRE)) {
+ if (tunn_flg && ((tx_ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) !=
+ RTE_MBUF_F_TX_TUNNEL_GRE)) {
bd1_bd_flags_bf |=
ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK <<
ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
}
/* L4 checksum offload (tcp or udp) */
- if ((tx_ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)) &&
- (tx_ol_flags & (PKT_TX_UDP_CKSUM | PKT_TX_TCP_CKSUM))) {
+ if ((tx_ol_flags & (RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IPV6)) &&
+ (tx_ol_flags & (RTE_MBUF_F_TX_UDP_CKSUM | RTE_MBUF_F_TX_TCP_CKSUM))) {
bd1_bd_flags_bf |=
1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
/* There's no DPDK flag to request outer-L4 csum
#define QEDE_PKT_TYPE_TUNN_MAX_TYPE 0x20 /* 2^5 */
-#define QEDE_TX_CSUM_OFFLOAD_MASK (PKT_TX_IP_CKSUM | \
- PKT_TX_TCP_CKSUM | \
- PKT_TX_UDP_CKSUM | \
- PKT_TX_OUTER_IP_CKSUM | \
- PKT_TX_TCP_SEG | \
- PKT_TX_IPV4 | \
- PKT_TX_IPV6)
+#define QEDE_TX_CSUM_OFFLOAD_MASK (RTE_MBUF_F_TX_IP_CKSUM | \
+ RTE_MBUF_F_TX_TCP_CKSUM | \
+ RTE_MBUF_F_TX_UDP_CKSUM | \
+ RTE_MBUF_F_TX_OUTER_IP_CKSUM | \
+ RTE_MBUF_F_TX_TCP_SEG | \
+ RTE_MBUF_F_TX_IPV4 | \
+ RTE_MBUF_F_TX_IPV6)
#define QEDE_TX_OFFLOAD_MASK (QEDE_TX_CSUM_OFFLOAD_MASK | \
- PKT_TX_VLAN_PKT | \
- PKT_TX_TUNNEL_MASK)
+ RTE_MBUF_F_TX_VLAN_PKT | \
+ RTE_MBUF_F_TX_TUNNEL_MASK)
#define QEDE_TX_OFFLOAD_NOTSUP_MASK \
- (PKT_TX_OFFLOAD_MASK ^ QEDE_TX_OFFLOAD_MASK)
+ (RTE_MBUF_F_TX_OFFLOAD_MASK ^ QEDE_TX_OFFLOAD_MASK)
/* TPA related structures */
struct qede_agg_info {
unsigned int nb_vlan_descs)
{
unsigned int descs_required = m->nb_segs;
- unsigned int tcph_off = ((m->ol_flags & PKT_TX_TUNNEL_MASK) ?
+ unsigned int tcph_off = ((m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ?
m->outer_l2_len + m->outer_l3_len : 0) +
m->l2_len + m->l3_len;
unsigned int header_len = tcph_off + m->l4_len;
* to proceed with additional checks below.
* Otherwise, throw an error.
*/
- if ((m->ol_flags & PKT_TX_TCP_SEG) == 0 ||
+ if ((m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) == 0 ||
tso_bounce_buffer_len == 0)
return EINVAL;
}
}
- if (m->ol_flags & PKT_TX_TCP_SEG) {
- switch (m->ol_flags & PKT_TX_TUNNEL_MASK) {
+ if (m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
+ switch (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
case 0:
break;
- case PKT_TX_TUNNEL_VXLAN:
+ case RTE_MBUF_F_TX_TUNNEL_VXLAN:
/* FALLTHROUGH */
- case PKT_TX_TUNNEL_GENEVE:
+ case RTE_MBUF_F_TX_TUNNEL_GENEVE:
if (!(m->ol_flags &
- (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6)))
+ (RTE_MBUF_F_TX_OUTER_IPV4 | RTE_MBUF_F_TX_OUTER_IPV6)))
return EINVAL;
}
return EFX_WORD_FIELD(class,
ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L4_CSUM) ==
ESE_GZ_RH_HCLASS_L4_CSUM_GOOD ?
- PKT_RX_L4_CKSUM_GOOD : PKT_RX_L4_CKSUM_BAD;
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD : RTE_MBUF_F_RX_L4_CKSUM_BAD;
}
static inline uint64_t
return EFX_WORD_FIELD(class,
ESF_GZ_RX_PREFIX_HCLASS_TUN_OUTER_L4_CSUM) ==
ESE_GZ_RH_HCLASS_L4_CSUM_GOOD ?
- PKT_RX_OUTER_L4_CKSUM_GOOD : PKT_RX_OUTER_L4_CKSUM_BAD;
+ RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD : RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD;
}
static uint32_t
ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L3_CLASS)) {
case ESE_GZ_RH_HCLASS_L3_CLASS_IP4GOOD:
ptype |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
- *ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+ *ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
break;
case ESE_GZ_RH_HCLASS_L3_CLASS_IP4BAD:
ptype |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
- *ol_flags |= PKT_RX_IP_CKSUM_BAD;
+ *ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
break;
case ESE_GZ_RH_HCLASS_L3_CLASS_IP6:
ptype |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
break;
case ESE_GZ_RH_HCLASS_L3_CLASS_IP4BAD:
ptype |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
- *ol_flags |= PKT_RX_OUTER_IP_CKSUM_BAD;
+ *ol_flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
break;
case ESE_GZ_RH_HCLASS_L3_CLASS_IP6:
ptype |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L3_CLASS)) {
case ESE_GZ_RH_HCLASS_L3_CLASS_IP4GOOD:
ptype |= RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
- *ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+ *ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
break;
case ESE_GZ_RH_HCLASS_L3_CLASS_IP4BAD:
ptype |= RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
- *ol_flags |= PKT_RX_IP_CKSUM_BAD;
+ *ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
break;
case ESE_GZ_RH_HCLASS_L3_CLASS_IP6:
ptype |= RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
if ((rxq->flags & SFC_EF100_RXQ_RSS_HASH) &&
EFX_TEST_OWORD_BIT(rx_prefix[0],
ESF_GZ_RX_PREFIX_RSS_HASH_VALID_LBN)) {
- ol_flags |= PKT_RX_RSS_HASH;
+ ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
/* EFX_OWORD_FIELD converts little-endian to CPU */
m->hash.rss = EFX_OWORD_FIELD(rx_prefix[0],
ESF_GZ_RX_PREFIX_RSS_HASH);
user_mark = EFX_OWORD_FIELD(rx_prefix[0],
ESF_GZ_RX_PREFIX_USER_MARK);
if (user_mark != SFC_EF100_USER_MARK_INVALID) {
- ol_flags |= PKT_RX_FDIR_ID;
+ ol_flags |= RTE_MBUF_F_RX_FDIR_ID;
m->hash.fdir.hi = user_mark;
}
}
sfc_ef100_tx_prepare_pkt_tso(struct sfc_ef100_txq * const txq,
struct rte_mbuf *m)
{
- size_t header_len = ((m->ol_flags & PKT_TX_TUNNEL_MASK) ?
+ size_t header_len = ((m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ?
m->outer_l2_len + m->outer_l3_len : 0) +
m->l2_len + m->l3_len + m->l4_len;
size_t payload_len = m->pkt_len - header_len;
unsigned int nb_payload_descs;
#ifdef RTE_LIBRTE_SFC_EFX_DEBUG
- switch (m->ol_flags & PKT_TX_TUNNEL_MASK) {
+ switch (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
case 0:
/* FALLTHROUGH */
- case PKT_TX_TUNNEL_VXLAN:
+ case RTE_MBUF_F_TX_TUNNEL_VXLAN:
/* FALLTHROUGH */
- case PKT_TX_TUNNEL_GENEVE:
+ case RTE_MBUF_F_TX_TUNNEL_GENEVE:
break;
default:
return ENOTSUP;
* pseudo-header checksum which is calculated below,
* but requires contiguous packet headers.
*/
- if ((m->ol_flags & PKT_TX_TUNNEL_MASK) &&
- (m->ol_flags & PKT_TX_L4_MASK)) {
+ if ((m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) &&
+ (m->ol_flags & RTE_MBUF_F_TX_L4_MASK)) {
calc_phdr_cksum = true;
max_nb_header_segs = 1;
- } else if (m->ol_flags & PKT_TX_TCP_SEG) {
+ } else if (m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
max_nb_header_segs = txq->tso_max_nb_header_descs;
}
break;
}
- if (m->ol_flags & PKT_TX_TCP_SEG) {
+ if (m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
ret = sfc_ef100_tx_prepare_pkt_tso(txq, m);
if (unlikely(ret != 0)) {
rte_errno = ret;
* and does not require any assistance.
*/
ret = rte_net_intel_cksum_flags_prepare(m,
- m->ol_flags & ~PKT_TX_IP_CKSUM);
+ m->ol_flags & ~RTE_MBUF_F_TX_IP_CKSUM);
if (unlikely(ret != 0)) {
rte_errno = -ret;
break;
uint8_t inner_l3;
switch (tx_tunnel) {
- case PKT_TX_TUNNEL_VXLAN:
+ case RTE_MBUF_F_TX_TUNNEL_VXLAN:
inner_l3 = ESE_GZ_TX_DESC_CS_INNER_L3_VXLAN;
break;
- case PKT_TX_TUNNEL_GENEVE:
+ case RTE_MBUF_F_TX_TUNNEL_GENEVE:
inner_l3 = ESE_GZ_TX_DESC_CS_INNER_L3_GENEVE;
break;
default:
uint16_t part_cksum_w;
uint16_t l4_offset_w;
- if ((m->ol_flags & PKT_TX_TUNNEL_MASK) == 0) {
- outer_l3 = (m->ol_flags & PKT_TX_IP_CKSUM);
- outer_l4 = (m->ol_flags & PKT_TX_L4_MASK);
+ if ((m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) == 0) {
+ outer_l3 = (m->ol_flags & RTE_MBUF_F_TX_IP_CKSUM);
+ outer_l4 = (m->ol_flags & RTE_MBUF_F_TX_L4_MASK);
inner_l3 = ESE_GZ_TX_DESC_CS_INNER_L3_OFF;
partial_en = ESE_GZ_TX_DESC_CSO_PARTIAL_EN_OFF;
part_cksum_w = 0;
l4_offset_w = 0;
} else {
- outer_l3 = (m->ol_flags & PKT_TX_OUTER_IP_CKSUM);
- outer_l4 = (m->ol_flags & PKT_TX_OUTER_UDP_CKSUM);
+ outer_l3 = (m->ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM);
+ outer_l4 = (m->ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM);
inner_l3 = sfc_ef100_tx_qdesc_cso_inner_l3(m->ol_flags &
- PKT_TX_TUNNEL_MASK);
+ RTE_MBUF_F_TX_TUNNEL_MASK);
- switch (m->ol_flags & PKT_TX_L4_MASK) {
- case PKT_TX_TCP_CKSUM:
+ switch (m->ol_flags & RTE_MBUF_F_TX_L4_MASK) {
+ case RTE_MBUF_F_TX_TCP_CKSUM:
partial_en = ESE_GZ_TX_DESC_CSO_PARTIAL_EN_TCP;
part_cksum_w = offsetof(struct rte_tcp_hdr, cksum) >> 1;
break;
- case PKT_TX_UDP_CKSUM:
+ case RTE_MBUF_F_TX_UDP_CKSUM:
partial_en = ESE_GZ_TX_DESC_CSO_PARTIAL_EN_UDP;
part_cksum_w = offsetof(struct rte_udp_hdr,
dgram_cksum) >> 1;
ESF_GZ_TX_SEND_CSO_OUTER_L4, outer_l4,
ESF_GZ_TX_DESC_TYPE, ESE_GZ_TX_DESC_TYPE_SEND);
- if (m->ol_flags & PKT_TX_VLAN_PKT) {
+ if (m->ol_flags & RTE_MBUF_F_TX_VLAN_PKT) {
efx_oword_t tx_desc_extra_fields;
EFX_POPULATE_OWORD_2(tx_desc_extra_fields,
*/
int ed_inner_ip_id = ESE_GZ_TX_DESC_IP4_ID_INC_MOD16;
uint8_t inner_l3 = sfc_ef100_tx_qdesc_cso_inner_l3(
- m->ol_flags & PKT_TX_TUNNEL_MASK);
+ m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK);
EFX_POPULATE_OWORD_10(*tx_desc,
ESF_GZ_TX_TSO_MSS, m->tso_segsz,
EFX_OR_OWORD(*tx_desc, tx_desc_extra_fields);
- if (m->ol_flags & PKT_TX_VLAN_PKT) {
+ if (m->ol_flags & RTE_MBUF_F_TX_VLAN_PKT) {
EFX_POPULATE_OWORD_2(tx_desc_extra_fields,
ESF_GZ_TX_TSO_VLAN_INSERT_EN, 1,
ESF_GZ_TX_TSO_VLAN_INSERT_TCI, m->vlan_tci);
#define SFC_MBUF_SEG_LEN_MAX UINT16_MAX
RTE_BUILD_BUG_ON(sizeof(m->data_len) != 2);
- if (m->ol_flags & PKT_TX_TCP_SEG) {
+ if (m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
/* Tx TSO descriptor */
extra_descs++;
/*
size_t header_len;
size_t remaining_hdr_len;
- if (m->ol_flags & PKT_TX_TUNNEL_MASK) {
+ if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
outer_iph_off = m->outer_l2_len;
outer_udph_off = outer_iph_off + m->outer_l3_len;
} else {
break;
}
- if (m_seg->ol_flags & PKT_TX_TCP_SEG) {
+ if (m_seg->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
m_seg = sfc_ef100_xmit_tso_pkt(txq, m_seg, &added);
} else {
id = added++ & txq->ptr_mask;
rte_pktmbuf_data_len(m) = pkt_len;
m->ol_flags |=
- (PKT_RX_RSS_HASH *
+ (RTE_MBUF_F_RX_RSS_HASH *
!!EFX_TEST_QWORD_BIT(*qwordp,
ES_EZ_ESSB_RX_PREFIX_HASH_VALID_LBN)) |
- (PKT_RX_FDIR_ID *
+ (RTE_MBUF_F_RX_FDIR_ID *
!!EFX_TEST_QWORD_BIT(*qwordp,
ES_EZ_ESSB_RX_PREFIX_MARK_VALID_LBN)) |
- (PKT_RX_FDIR *
+ (RTE_MBUF_F_RX_FDIR *
!!EFX_TEST_QWORD_BIT(*qwordp,
ES_EZ_ESSB_RX_PREFIX_MATCH_FLAG_LBN));
/* Mask RSS hash offload flag if RSS is not enabled */
sfc_ef10_rx_ev_to_offloads(rx_ev, m,
(rxq->flags & SFC_EF10_RXQ_RSS_HASH) ?
- ~0ull : ~PKT_RX_RSS_HASH);
+ ~0ull : ~RTE_MBUF_F_RX_RSS_HASH);
/* data_off already moved past pseudo header */
pseudo_hdr = (uint8_t *)m->buf_addr + RTE_PKTMBUF_HEADROOM;
/*
* Always get RSS hash from pseudo header to avoid
* condition/branching. If it is valid or not depends on
- * PKT_RX_RSS_HASH in m->ol_flags.
+ * RTE_MBUF_F_RX_RSS_HASH in m->ol_flags.
*/
m->hash.rss = sfc_ef10_rx_pseudo_hdr_get_hash(pseudo_hdr);
/*
* Always get RSS hash from pseudo header to avoid
* condition/branching. If it is valid or not depends on
- * PKT_RX_RSS_HASH in m->ol_flags.
+ * RTE_MBUF_F_RX_RSS_HASH in m->ol_flags.
*/
m->hash.rss = sfc_ef10_rx_pseudo_hdr_get_hash(pseudo_hdr);
uint64_t ol_mask)
{
uint32_t tun_ptype = 0;
- /* Which event bit is mapped to PKT_RX_IP_CKSUM_* */
+ /* Which event bit is mapped to RTE_MBUF_F_RX_IP_CKSUM_* */
int8_t ip_csum_err_bit;
- /* Which event bit is mapped to PKT_RX_L4_CKSUM_* */
+ /* Which event bit is mapped to RTE_MBUF_F_RX_L4_CKSUM_* */
int8_t l4_csum_err_bit;
uint32_t l2_ptype = 0;
uint32_t l3_ptype = 0;
l4_csum_err_bit = ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR_LBN;
if (unlikely(EFX_TEST_QWORD_BIT(rx_ev,
ESF_DZ_RX_IPCKSUM_ERR_LBN)))
- ol_flags |= PKT_RX_OUTER_IP_CKSUM_BAD;
+ ol_flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
}
switch (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_ETH_TAG_CLASS)) {
case ESE_DZ_L3_CLASS_IP4:
l3_ptype = (tun_ptype == 0) ? RTE_PTYPE_L3_IPV4_EXT_UNKNOWN :
RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
- ol_flags |= PKT_RX_RSS_HASH |
+ ol_flags |= RTE_MBUF_F_RX_RSS_HASH |
((EFX_TEST_QWORD_BIT(rx_ev, ip_csum_err_bit)) ?
- PKT_RX_IP_CKSUM_BAD : PKT_RX_IP_CKSUM_GOOD);
+ RTE_MBUF_F_RX_IP_CKSUM_BAD : RTE_MBUF_F_RX_IP_CKSUM_GOOD);
break;
case ESE_DZ_L3_CLASS_IP6_FRAG:
l4_ptype = (tun_ptype == 0) ? RTE_PTYPE_L4_FRAG :
case ESE_DZ_L3_CLASS_IP6:
l3_ptype = (tun_ptype == 0) ? RTE_PTYPE_L3_IPV6_EXT_UNKNOWN :
RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
- ol_flags |= PKT_RX_RSS_HASH;
+ ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
break;
case ESE_DZ_L3_CLASS_ARP:
/* Override Layer 2 packet type */
RTE_PTYPE_INNER_L4_TCP;
ol_flags |=
(EFX_TEST_QWORD_BIT(rx_ev, l4_csum_err_bit)) ?
- PKT_RX_L4_CKSUM_BAD : PKT_RX_L4_CKSUM_GOOD;
+ RTE_MBUF_F_RX_L4_CKSUM_BAD : RTE_MBUF_F_RX_L4_CKSUM_GOOD;
break;
case ESE_FZ_L4_CLASS_UDP:
RTE_BUILD_BUG_ON(ESE_FZ_L4_CLASS_UDP != ESE_DE_L4_CLASS_UDP);
RTE_PTYPE_INNER_L4_UDP;
ol_flags |=
(EFX_TEST_QWORD_BIT(rx_ev, l4_csum_err_bit)) ?
- PKT_RX_L4_CKSUM_BAD : PKT_RX_L4_CKSUM_GOOD;
+ RTE_MBUF_F_RX_L4_CKSUM_BAD : RTE_MBUF_F_RX_L4_CKSUM_GOOD;
break;
case ESE_FZ_L4_CLASS_UNKNOWN:
RTE_BUILD_BUG_ON(ESE_FZ_L4_CLASS_UNKNOWN !=
* the size limit. Perform the check in debug mode since MTU
* more than 9k is not supported, but the limit here is 16k-1.
*/
- if (!(m->ol_flags & PKT_TX_TCP_SEG)) {
+ if (!(m->ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
struct rte_mbuf *m_seg;
for (m_seg = m; m_seg != NULL; m_seg = m_seg->next) {
unsigned int *added, unsigned int *dma_desc_space,
bool *reap_done)
{
- size_t iph_off = ((m_seg->ol_flags & PKT_TX_TUNNEL_MASK) ?
+ size_t iph_off = ((m_seg->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ?
m_seg->outer_l2_len + m_seg->outer_l3_len : 0) +
m_seg->l2_len;
size_t tcph_off = iph_off + m_seg->l3_len;
*
* The same concern applies to outer UDP datagram length field.
*/
- switch (m_seg->ol_flags & PKT_TX_TUNNEL_MASK) {
- case PKT_TX_TUNNEL_VXLAN:
+ switch (m_seg->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
+ case RTE_MBUF_F_TX_TUNNEL_VXLAN:
/* FALLTHROUGH */
- case PKT_TX_TUNNEL_GENEVE:
+ case RTE_MBUF_F_TX_TUNNEL_GENEVE:
sfc_tso_outer_udp_fix_len(first_m_seg, hdr_addr);
break;
default:
* filled in in TSO mbuf. Use zero IPID if there is no IPv4 flag.
* If the packet is still IPv4, HW will simply start from zero IPID.
*/
- if (first_m_seg->ol_flags & PKT_TX_IPV4)
+ if (first_m_seg->ol_flags & RTE_MBUF_F_TX_IPV4)
packet_id = sfc_tso_ip4_get_ipid(hdr_addr, iph_off);
- if (first_m_seg->ol_flags & PKT_TX_OUTER_IPV4)
+ if (first_m_seg->ol_flags & RTE_MBUF_F_TX_OUTER_IPV4)
outer_packet_id = sfc_tso_ip4_get_ipid(hdr_addr,
first_m_seg->outer_l2_len);
if (likely(pktp + 1 != pktp_end))
rte_mbuf_prefetch_part1(pktp[1]);
- if (m_seg->ol_flags & PKT_TX_TCP_SEG) {
+ if (m_seg->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
int rc;
rc = sfc_ef10_xmit_tso_pkt(txq, m_seg, &added,
/* ef10_simple does not support TSO and VLAN insertion */
if (unlikely(m->ol_flags &
- (PKT_TX_TCP_SEG | PKT_TX_VLAN_PKT))) {
+ (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_VLAN_PKT))) {
rte_errno = ENOTSUP;
break;
}
switch (desc_flags & (EFX_PKT_IPV4 | EFX_CKSUM_IPV4)) {
case (EFX_PKT_IPV4 | EFX_CKSUM_IPV4):
- mbuf_flags |= PKT_RX_IP_CKSUM_GOOD;
+ mbuf_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
break;
case EFX_PKT_IPV4:
- mbuf_flags |= PKT_RX_IP_CKSUM_BAD;
+ mbuf_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
break;
default:
- RTE_BUILD_BUG_ON(PKT_RX_IP_CKSUM_UNKNOWN != 0);
- SFC_ASSERT((mbuf_flags & PKT_RX_IP_CKSUM_MASK) ==
- PKT_RX_IP_CKSUM_UNKNOWN);
+ RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN != 0);
+ SFC_ASSERT((mbuf_flags & RTE_MBUF_F_RX_IP_CKSUM_MASK) ==
+ RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN);
break;
}
(EFX_PKT_TCP | EFX_PKT_UDP | EFX_CKSUM_TCPUDP))) {
case (EFX_PKT_TCP | EFX_CKSUM_TCPUDP):
case (EFX_PKT_UDP | EFX_CKSUM_TCPUDP):
- mbuf_flags |= PKT_RX_L4_CKSUM_GOOD;
+ mbuf_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
break;
case EFX_PKT_TCP:
case EFX_PKT_UDP:
- mbuf_flags |= PKT_RX_L4_CKSUM_BAD;
+ mbuf_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
break;
default:
- RTE_BUILD_BUG_ON(PKT_RX_L4_CKSUM_UNKNOWN != 0);
- SFC_ASSERT((mbuf_flags & PKT_RX_L4_CKSUM_MASK) ==
- PKT_RX_L4_CKSUM_UNKNOWN);
+ RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN != 0);
+ SFC_ASSERT((mbuf_flags & RTE_MBUF_F_RX_L4_CKSUM_MASK) ==
+ RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN);
break;
}
EFX_RX_HASHALG_TOEPLITZ,
mbuf_data);
- m->ol_flags |= PKT_RX_RSS_HASH;
+ m->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
}
}
* IPv4 flag. If the packet is still IPv4, HW will simply start from
* zero IPID.
*/
- if (m->ol_flags & PKT_TX_IPV4)
+ if (m->ol_flags & RTE_MBUF_F_TX_IPV4)
packet_id = sfc_tso_ip4_get_ipid(tsoh, nh_off);
/* Handle TCP header */
size_t field_ofst;
rte_be16_t len;
- if (m->ol_flags & PKT_TX_IPV4) {
+ if (m->ol_flags & RTE_MBUF_F_TX_IPV4) {
field_ofst = offsetof(struct rte_ipv4_hdr, total_length);
len = rte_cpu_to_be_16(m->l3_len + ip_payload_len);
} else {
sfc_efx_tx_maybe_insert_tag(struct sfc_efx_txq *txq, struct rte_mbuf *m,
efx_desc_t **pend)
{
- uint16_t this_tag = ((m->ol_flags & PKT_TX_VLAN_PKT) ?
+ uint16_t this_tag = ((m->ol_flags & RTE_MBUF_F_TX_VLAN_PKT) ?
m->vlan_tci : 0);
if (this_tag == txq->hw_vlan_tci)
*/
pkt_descs += sfc_efx_tx_maybe_insert_tag(txq, m_seg, &pend);
- if (m_seg->ol_flags & PKT_TX_TCP_SEG) {
+ if (m_seg->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
/*
* We expect correct 'pkt->l[2, 3, 4]_len' values
* to be set correctly by the caller
cksum = ~rte_raw_cksum(iph, l3_len);
mbuf->ol_flags |= cksum ?
- PKT_RX_IP_CKSUM_BAD :
- PKT_RX_IP_CKSUM_GOOD;
+ RTE_MBUF_F_RX_IP_CKSUM_BAD :
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD;
} else if (l3 == RTE_PTYPE_L3_IPV6) {
struct rte_ipv6_hdr *iph = l3_hdr;
* indicates that the sender did not
* generate one [RFC 768].
*/
- mbuf->ol_flags |= PKT_RX_L4_CKSUM_NONE;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_NONE;
return;
}
}
l4_hdr);
}
mbuf->ol_flags |= cksum_ok ?
- PKT_RX_L4_CKSUM_GOOD : PKT_RX_L4_CKSUM_BAD;
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD : RTE_MBUF_F_RX_L4_CKSUM_BAD;
}
}
{
void *l3_hdr = packet + l2_len;
- if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_IPV4)) {
+ if (ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_IPV4)) {
struct rte_ipv4_hdr *iph = l3_hdr;
uint16_t cksum;
cksum = rte_raw_cksum(iph, l3_len);
iph->hdr_checksum = (cksum == 0xffff) ? cksum : ~cksum;
}
- if (ol_flags & PKT_TX_L4_MASK) {
+ if (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
void *l4_hdr;
l4_hdr = packet + l2_len + l3_len;
- if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM)
+ if ((ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_UDP_CKSUM)
*l4_cksum = &((struct rte_udp_hdr *)l4_hdr)->dgram_cksum;
- else if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM)
+ else if ((ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_TCP_CKSUM)
*l4_cksum = &((struct rte_tcp_hdr *)l4_hdr)->cksum;
else
return;
**l4_cksum = 0;
- if (ol_flags & PKT_TX_IPV4)
+ if (ol_flags & RTE_MBUF_F_TX_IPV4)
*l4_phdr_cksum = rte_ipv4_phdr_cksum(l3_hdr, 0);
else
*l4_phdr_cksum = rte_ipv6_phdr_cksum(l3_hdr, 0);
nb_segs = mbuf->nb_segs;
if (txq->csum &&
- ((mbuf->ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_IPV4) ||
- (mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM ||
- (mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM))) {
+ ((mbuf->ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_IPV4) ||
+ (mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_UDP_CKSUM ||
+ (mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_TCP_CKSUM))) {
is_cksum = 1;
/* Support only packets with at least layer 4
uint16_t hdrs_len;
uint64_t tso;
- tso = mbuf_in->ol_flags & PKT_TX_TCP_SEG;
+ tso = mbuf_in->ol_flags & RTE_MBUF_F_TX_TCP_SEG;
if (tso) {
struct rte_gso_ctx *gso_ctx = &txq->gso_ctx;
/* TCP segmentation implies TCP checksum offload */
- mbuf_in->ol_flags |= PKT_TX_TCP_CKSUM;
+ mbuf_in->ol_flags |= RTE_MBUF_F_TX_TCP_CKSUM;
/* gso size is calculated without RTE_ETHER_CRC_LEN */
hdrs_len = mbuf_in->l2_len + mbuf_in->l3_len +
ol_flags = pkt->ol_flags & NICVF_TX_OFFLOAD_MASK;
if (unlikely(ol_flags)) {
/* L4 cksum */
- uint64_t l4_flags = ol_flags & PKT_TX_L4_MASK;
- if (l4_flags == PKT_TX_TCP_CKSUM)
+ uint64_t l4_flags = ol_flags & RTE_MBUF_F_TX_L4_MASK;
+ if (l4_flags == RTE_MBUF_F_TX_TCP_CKSUM)
sqe.hdr.csum_l4 = SEND_L4_CSUM_TCP;
- else if (l4_flags == PKT_TX_UDP_CKSUM)
+ else if (l4_flags == RTE_MBUF_F_TX_UDP_CKSUM)
sqe.hdr.csum_l4 = SEND_L4_CSUM_UDP;
else
sqe.hdr.csum_l4 = SEND_L4_CSUM_DISABLE;
sqe.hdr.l4_offset = pkt->l3_len + pkt->l2_len;
/* L3 cksum */
- if (ol_flags & PKT_TX_IP_CKSUM)
+ if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
sqe.hdr.csum_l3 = 1;
}
nicvf_set_olflags(const cqe_rx_word0_t cqe_rx_w0)
{
static const uint64_t flag_table[3] __rte_cache_aligned = {
- PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD,
- PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_UNKNOWN,
- PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD,
+ RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN,
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD,
};
const uint8_t idx = (cqe_rx_w0.err_opcode == CQE_RX_ERR_L4_CHK) << 1 |
{
if (likely(cqe_rx_w0.rss_alg)) {
pkt->hash.rss = cqe_rx_w2.rss_tag;
- pkt->ol_flags |= PKT_RX_RSS_HASH;
+ pkt->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
}
}
pkt->ol_flags = nicvf_set_olflags(cqe_rx_w0);
if (flag & NICVF_RX_OFFLOAD_VLAN_STRIP) {
if (unlikely(cqe_rx_w0.vlan_stripped)) {
- pkt->ol_flags |= PKT_RX_VLAN
- | PKT_RX_VLAN_STRIPPED;
+ pkt->ol_flags |= RTE_MBUF_F_RX_VLAN
+ | RTE_MBUF_F_RX_VLAN_STRIPPED;
pkt->vlan_tci =
rte_cpu_to_be_16(cqe_rx_w2.vlan_tci);
}
pkt->ol_flags = nicvf_set_olflags(cqe_rx_w0);
if (flag & NICVF_RX_OFFLOAD_VLAN_STRIP) {
if (unlikely(cqe_rx_w0.vlan_stripped)) {
- pkt->ol_flags |= PKT_RX_VLAN
- | PKT_RX_VLAN_STRIPPED;
+ pkt->ol_flags |= RTE_MBUF_F_RX_VLAN
+ | RTE_MBUF_F_RX_VLAN_STRIPPED;
pkt->vlan_tci = rte_cpu_to_be_16(cqe_rx_w2.vlan_tci);
}
}
#define NICVF_RX_OFFLOAD_CKSUM 0x2
#define NICVF_RX_OFFLOAD_VLAN_STRIP 0x4
-#define NICVF_TX_OFFLOAD_MASK (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK)
+#define NICVF_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_L4_MASK)
#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
static inline uint16_t __attribute__((const))
rxq = dev->data->rx_queues[queue];
if (on) {
- rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+ rxq->vlan_flags = RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
} else {
- rxq->vlan_flags = PKT_RX_VLAN;
+ rxq->vlan_flags = RTE_MBUF_F_RX_VLAN;
rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
}
}
#include "txgbe_rxtx.h"
#ifdef RTE_LIBRTE_IEEE1588
-#define TXGBE_TX_IEEE1588_TMST PKT_TX_IEEE1588_TMST
+#define TXGBE_TX_IEEE1588_TMST RTE_MBUF_F_TX_IEEE1588_TMST
#else
#define TXGBE_TX_IEEE1588_TMST 0
#endif
/* Bit Mask to indicate what bits required for building TX context */
-static const u64 TXGBE_TX_OFFLOAD_MASK = (PKT_TX_IP_CKSUM |
- PKT_TX_OUTER_IPV6 |
- PKT_TX_OUTER_IPV4 |
- PKT_TX_IPV6 |
- PKT_TX_IPV4 |
- PKT_TX_VLAN_PKT |
- PKT_TX_L4_MASK |
- PKT_TX_TCP_SEG |
- PKT_TX_TUNNEL_MASK |
- PKT_TX_OUTER_IP_CKSUM |
- PKT_TX_OUTER_UDP_CKSUM |
+static const u64 TXGBE_TX_OFFLOAD_MASK = (RTE_MBUF_F_TX_IP_CKSUM |
+ RTE_MBUF_F_TX_OUTER_IPV6 |
+ RTE_MBUF_F_TX_OUTER_IPV4 |
+ RTE_MBUF_F_TX_IPV6 |
+ RTE_MBUF_F_TX_IPV4 |
+ RTE_MBUF_F_TX_VLAN_PKT |
+ RTE_MBUF_F_TX_L4_MASK |
+ RTE_MBUF_F_TX_TCP_SEG |
+ RTE_MBUF_F_TX_TUNNEL_MASK |
+ RTE_MBUF_F_TX_OUTER_IP_CKSUM |
+ RTE_MBUF_F_TX_OUTER_UDP_CKSUM |
#ifdef RTE_LIB_SECURITY
- PKT_TX_SEC_OFFLOAD |
+ RTE_MBUF_F_TX_SEC_OFFLOAD |
#endif
TXGBE_TX_IEEE1588_TMST);
#define TXGBE_TX_OFFLOAD_NOTSUP_MASK \
- (PKT_TX_OFFLOAD_MASK ^ TXGBE_TX_OFFLOAD_MASK)
+ (RTE_MBUF_F_TX_OFFLOAD_MASK ^ TXGBE_TX_OFFLOAD_MASK)
/*
* Prefetch a cache line into all cache levels.
type_tucmd_mlhl |= TXGBE_TXD_PTID(tx_offload.ptid);
/* check if TCP segmentation required for this packet */
- if (ol_flags & PKT_TX_TCP_SEG) {
+ if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
tx_offload_mask.l2_len |= ~0;
tx_offload_mask.l3_len |= ~0;
tx_offload_mask.l4_len |= ~0;
mss_l4len_idx |= TXGBE_TXD_MSS(tx_offload.tso_segsz);
mss_l4len_idx |= TXGBE_TXD_L4LEN(tx_offload.l4_len);
} else { /* no TSO, check if hardware checksum is needed */
- if (ol_flags & PKT_TX_IP_CKSUM) {
+ if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
tx_offload_mask.l2_len |= ~0;
tx_offload_mask.l3_len |= ~0;
}
- switch (ol_flags & PKT_TX_L4_MASK) {
- case PKT_TX_UDP_CKSUM:
+ switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
+ case RTE_MBUF_F_TX_UDP_CKSUM:
mss_l4len_idx |=
TXGBE_TXD_L4LEN(sizeof(struct rte_udp_hdr));
tx_offload_mask.l2_len |= ~0;
tx_offload_mask.l3_len |= ~0;
break;
- case PKT_TX_TCP_CKSUM:
+ case RTE_MBUF_F_TX_TCP_CKSUM:
mss_l4len_idx |=
TXGBE_TXD_L4LEN(sizeof(struct rte_tcp_hdr));
tx_offload_mask.l2_len |= ~0;
tx_offload_mask.l3_len |= ~0;
break;
- case PKT_TX_SCTP_CKSUM:
+ case RTE_MBUF_F_TX_SCTP_CKSUM:
mss_l4len_idx |=
TXGBE_TXD_L4LEN(sizeof(struct rte_sctp_hdr));
tx_offload_mask.l2_len |= ~0;
vlan_macip_lens = TXGBE_TXD_IPLEN(tx_offload.l3_len >> 1);
- if (ol_flags & PKT_TX_TUNNEL_MASK) {
+ if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
tx_offload_mask.outer_tun_len |= ~0;
tx_offload_mask.outer_l2_len |= ~0;
tx_offload_mask.outer_l3_len |= ~0;
tunnel_seed = TXGBE_TXD_ETUNLEN(tx_offload.outer_tun_len >> 1);
tunnel_seed |= TXGBE_TXD_EIPLEN(tx_offload.outer_l3_len >> 2);
- switch (ol_flags & PKT_TX_TUNNEL_MASK) {
- case PKT_TX_TUNNEL_IPIP:
+ switch (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
+ case RTE_MBUF_F_TX_TUNNEL_IPIP:
/* for non UDP / GRE tunneling, set to 0b */
break;
- case PKT_TX_TUNNEL_VXLAN:
- case PKT_TX_TUNNEL_VXLAN_GPE:
- case PKT_TX_TUNNEL_GENEVE:
+ case RTE_MBUF_F_TX_TUNNEL_VXLAN:
+ case RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE:
+ case RTE_MBUF_F_TX_TUNNEL_GENEVE:
tunnel_seed |= TXGBE_TXD_ETYPE_UDP;
break;
- case PKT_TX_TUNNEL_GRE:
+ case RTE_MBUF_F_TX_TUNNEL_GRE:
tunnel_seed |= TXGBE_TXD_ETYPE_GRE;
break;
default:
vlan_macip_lens |= TXGBE_TXD_MACLEN(tx_offload.l2_len);
}
- if (ol_flags & PKT_TX_VLAN_PKT) {
+ if (ol_flags & RTE_MBUF_F_TX_VLAN_PKT) {
tx_offload_mask.vlan_tci |= ~0;
vlan_macip_lens |= TXGBE_TXD_VLAN(tx_offload.vlan_tci);
}
#ifdef RTE_LIB_SECURITY
- if (ol_flags & PKT_TX_SEC_OFFLOAD) {
+ if (ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
union txgbe_crypto_tx_desc_md *md =
(union txgbe_crypto_tx_desc_md *)mdata;
tunnel_seed |= TXGBE_TXD_IPSEC_SAIDX(md->sa_idx);
{
uint32_t tmp = 0;
- if ((ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM) {
+ if ((ol_flags & RTE_MBUF_F_TX_L4_MASK) != RTE_MBUF_F_TX_L4_NO_CKSUM) {
tmp |= TXGBE_TXD_CC;
tmp |= TXGBE_TXD_L4CS;
}
- if (ol_flags & PKT_TX_IP_CKSUM) {
+ if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
tmp |= TXGBE_TXD_CC;
tmp |= TXGBE_TXD_IPCS;
}
- if (ol_flags & PKT_TX_OUTER_IP_CKSUM) {
+ if (ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) {
tmp |= TXGBE_TXD_CC;
tmp |= TXGBE_TXD_EIPCS;
}
- if (ol_flags & PKT_TX_TCP_SEG) {
+ if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
tmp |= TXGBE_TXD_CC;
/* implies IPv4 cksum */
- if (ol_flags & PKT_TX_IPV4)
+ if (ol_flags & RTE_MBUF_F_TX_IPV4)
tmp |= TXGBE_TXD_IPCS;
tmp |= TXGBE_TXD_L4CS;
}
- if (ol_flags & PKT_TX_VLAN_PKT)
+ if (ol_flags & RTE_MBUF_F_TX_VLAN_PKT)
tmp |= TXGBE_TXD_CC;
return tmp;
{
uint32_t cmdtype = 0;
- if (ol_flags & PKT_TX_VLAN_PKT)
+ if (ol_flags & RTE_MBUF_F_TX_VLAN_PKT)
cmdtype |= TXGBE_TXD_VLE;
- if (ol_flags & PKT_TX_TCP_SEG)
+ if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
cmdtype |= TXGBE_TXD_TSE;
- if (ol_flags & PKT_TX_MACSEC)
+ if (ol_flags & RTE_MBUF_F_TX_MACSEC)
cmdtype |= TXGBE_TXD_LINKSEC;
return cmdtype;
}
return txgbe_encode_ptype(ptype);
/* Only support flags in TXGBE_TX_OFFLOAD_MASK */
- tun = !!(oflags & PKT_TX_TUNNEL_MASK);
+ tun = !!(oflags & RTE_MBUF_F_TX_TUNNEL_MASK);
/* L2 level */
ptype = RTE_PTYPE_L2_ETHER;
- if (oflags & PKT_TX_VLAN)
+ if (oflags & RTE_MBUF_F_TX_VLAN)
ptype |= RTE_PTYPE_L2_ETHER_VLAN;
/* L3 level */
- if (oflags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IP_CKSUM))
+ if (oflags & (RTE_MBUF_F_TX_OUTER_IPV4 | RTE_MBUF_F_TX_OUTER_IP_CKSUM))
ptype |= RTE_PTYPE_L3_IPV4;
- else if (oflags & (PKT_TX_OUTER_IPV6))
+ else if (oflags & (RTE_MBUF_F_TX_OUTER_IPV6))
ptype |= RTE_PTYPE_L3_IPV6;
- if (oflags & (PKT_TX_IPV4 | PKT_TX_IP_CKSUM))
+ if (oflags & (RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM))
ptype |= (tun ? RTE_PTYPE_INNER_L3_IPV4 : RTE_PTYPE_L3_IPV4);
- else if (oflags & (PKT_TX_IPV6))
+ else if (oflags & (RTE_MBUF_F_TX_IPV6))
ptype |= (tun ? RTE_PTYPE_INNER_L3_IPV6 : RTE_PTYPE_L3_IPV6);
/* L4 level */
- switch (oflags & (PKT_TX_L4_MASK)) {
- case PKT_TX_TCP_CKSUM:
+ switch (oflags & (RTE_MBUF_F_TX_L4_MASK)) {
+ case RTE_MBUF_F_TX_TCP_CKSUM:
ptype |= (tun ? RTE_PTYPE_INNER_L4_TCP : RTE_PTYPE_L4_TCP);
break;
- case PKT_TX_UDP_CKSUM:
+ case RTE_MBUF_F_TX_UDP_CKSUM:
ptype |= (tun ? RTE_PTYPE_INNER_L4_UDP : RTE_PTYPE_L4_UDP);
break;
- case PKT_TX_SCTP_CKSUM:
+ case RTE_MBUF_F_TX_SCTP_CKSUM:
ptype |= (tun ? RTE_PTYPE_INNER_L4_SCTP : RTE_PTYPE_L4_SCTP);
break;
}
- if (oflags & PKT_TX_TCP_SEG)
+ if (oflags & RTE_MBUF_F_TX_TCP_SEG)
ptype |= (tun ? RTE_PTYPE_INNER_L4_TCP : RTE_PTYPE_L4_TCP);
/* Tunnel */
- switch (oflags & PKT_TX_TUNNEL_MASK) {
- case PKT_TX_TUNNEL_VXLAN:
+ switch (oflags & RTE_MBUF_F_TX_TUNNEL_MASK) {
+ case RTE_MBUF_F_TX_TUNNEL_VXLAN:
ptype |= RTE_PTYPE_L2_ETHER |
RTE_PTYPE_L3_IPV4 |
RTE_PTYPE_TUNNEL_VXLAN;
ptype |= RTE_PTYPE_INNER_L2_ETHER;
break;
- case PKT_TX_TUNNEL_GRE:
+ case RTE_MBUF_F_TX_TUNNEL_GRE:
ptype |= RTE_PTYPE_L2_ETHER |
RTE_PTYPE_L3_IPV4 |
RTE_PTYPE_TUNNEL_GRE;
ptype |= RTE_PTYPE_INNER_L2_ETHER;
break;
- case PKT_TX_TUNNEL_GENEVE:
+ case RTE_MBUF_F_TX_TUNNEL_GENEVE:
ptype |= RTE_PTYPE_L2_ETHER |
RTE_PTYPE_L3_IPV4 |
RTE_PTYPE_TUNNEL_GENEVE;
ptype |= RTE_PTYPE_INNER_L2_ETHER;
break;
- case PKT_TX_TUNNEL_VXLAN_GPE:
+ case RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE:
ptype |= RTE_PTYPE_L2_ETHER |
RTE_PTYPE_L3_IPV4 |
RTE_PTYPE_TUNNEL_VXLAN_GPE;
break;
- case PKT_TX_TUNNEL_IPIP:
- case PKT_TX_TUNNEL_IP:
+ case RTE_MBUF_F_TX_TUNNEL_IPIP:
+ case RTE_MBUF_F_TX_TUNNEL_IP:
ptype |= RTE_PTYPE_L2_ETHER |
RTE_PTYPE_L3_IPV4 |
RTE_PTYPE_TUNNEL_IP;
const struct txgbe_genevehdr *gh;
uint8_t tun_len;
- switch (mbuf->ol_flags & PKT_TX_TUNNEL_MASK) {
- case PKT_TX_TUNNEL_IPIP:
+ switch (mbuf->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
+ case RTE_MBUF_F_TX_TUNNEL_IPIP:
tun_len = 0;
break;
- case PKT_TX_TUNNEL_VXLAN:
- case PKT_TX_TUNNEL_VXLAN_GPE:
+ case RTE_MBUF_F_TX_TUNNEL_VXLAN:
+ case RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE:
tun_len = sizeof(struct txgbe_udphdr)
+ sizeof(struct txgbe_vxlanhdr);
break;
- case PKT_TX_TUNNEL_GRE:
+ case RTE_MBUF_F_TX_TUNNEL_GRE:
tun_len = sizeof(struct txgbe_nvgrehdr);
break;
- case PKT_TX_TUNNEL_GENEVE:
+ case RTE_MBUF_F_TX_TUNNEL_GENEVE:
gh = rte_pktmbuf_read(mbuf,
mbuf->outer_l2_len + mbuf->outer_l3_len,
sizeof(genevehdr), &genevehdr);
*/
ol_flags = tx_pkt->ol_flags;
#ifdef RTE_LIB_SECURITY
- use_ipsec = txq->using_ipsec && (ol_flags & PKT_TX_SEC_OFFLOAD);
+ use_ipsec = txq->using_ipsec && (ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD);
#endif
/* If hardware offload required */
cmd_type_len = TXGBE_TXD_FCS;
#ifdef RTE_LIBRTE_IEEE1588
- if (ol_flags & PKT_TX_IEEE1588_TMST)
+ if (ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST)
cmd_type_len |= TXGBE_TXD_1588;
#endif
olinfo_status = 0;
if (tx_ol_req) {
- if (ol_flags & PKT_TX_TCP_SEG) {
+ if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
/* when TSO is on, paylen in descriptor is the
* not the packet len but the tcp payload len
*/
pkt_len -= (tx_offload.l2_len +
tx_offload.l3_len + tx_offload.l4_len);
pkt_len -=
- (tx_pkt->ol_flags & PKT_TX_TUNNEL_MASK)
+ (tx_pkt->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
? tx_offload.outer_l2_len +
tx_offload.outer_l3_len : 0;
}
txgbe_rxd_pkt_info_to_pkt_flags(uint32_t pkt_info)
{
static uint64_t ip_rss_types_map[16] __rte_cache_aligned = {
- 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
- 0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH,
- PKT_RX_RSS_HASH, 0, 0, 0,
- 0, 0, 0, PKT_RX_FDIR,
+ 0, RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH,
+ 0, RTE_MBUF_F_RX_RSS_HASH, 0, RTE_MBUF_F_RX_RSS_HASH,
+ RTE_MBUF_F_RX_RSS_HASH, 0, 0, 0,
+ 0, 0, 0, RTE_MBUF_F_RX_FDIR,
};
#ifdef RTE_LIBRTE_IEEE1588
static uint64_t ip_pkt_etqf_map[8] = {
- 0, 0, 0, PKT_RX_IEEE1588_PTP,
+ 0, 0, 0, RTE_MBUF_F_RX_IEEE1588_PTP,
0, 0, 0, 0,
};
int etfid = txgbe_etflt_id(TXGBE_RXD_PTID(pkt_info));
* That can be found from rte_eth_rxmode.offloads flag
*/
pkt_flags = (rx_status & TXGBE_RXD_STAT_VLAN &&
- vlan_flags & PKT_RX_VLAN_STRIPPED)
+ vlan_flags & RTE_MBUF_F_RX_VLAN_STRIPPED)
? vlan_flags : 0;
#ifdef RTE_LIBRTE_IEEE1588
if (rx_status & TXGBE_RXD_STAT_1588)
- pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
+ pkt_flags = pkt_flags | RTE_MBUF_F_RX_IEEE1588_TMST;
#endif
return pkt_flags;
}
/* checksum offload can't be disabled */
if (rx_status & TXGBE_RXD_STAT_IPCS) {
pkt_flags |= (rx_status & TXGBE_RXD_ERR_IPCS
- ? PKT_RX_IP_CKSUM_BAD : PKT_RX_IP_CKSUM_GOOD);
+ ? RTE_MBUF_F_RX_IP_CKSUM_BAD : RTE_MBUF_F_RX_IP_CKSUM_GOOD);
}
if (rx_status & TXGBE_RXD_STAT_L4CS) {
pkt_flags |= (rx_status & TXGBE_RXD_ERR_L4CS
- ? PKT_RX_L4_CKSUM_BAD : PKT_RX_L4_CKSUM_GOOD);
+ ? RTE_MBUF_F_RX_L4_CKSUM_BAD : RTE_MBUF_F_RX_L4_CKSUM_GOOD);
}
if (rx_status & TXGBE_RXD_STAT_EIPCS &&
rx_status & TXGBE_RXD_ERR_EIPCS) {
- pkt_flags |= PKT_RX_OUTER_IP_CKSUM_BAD;
+ pkt_flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
}
#ifdef RTE_LIB_SECURITY
if (rx_status & TXGBE_RXD_STAT_SECP) {
- pkt_flags |= PKT_RX_SEC_OFFLOAD;
+ pkt_flags |= RTE_MBUF_F_RX_SEC_OFFLOAD;
if (rx_status & TXGBE_RXD_ERR_SECERR)
- pkt_flags |= PKT_RX_SEC_OFFLOAD_FAILED;
+ pkt_flags |= RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED;
}
#endif
txgbe_rxd_pkt_info_to_pkt_type(pkt_info[j],
rxq->pkt_type_mask);
- if (likely(pkt_flags & PKT_RX_RSS_HASH))
+ if (likely(pkt_flags & RTE_MBUF_F_RX_RSS_HASH))
mb->hash.rss =
rte_le_to_cpu_32(rxdp[j].qw0.dw1);
- else if (pkt_flags & PKT_RX_FDIR) {
+ else if (pkt_flags & RTE_MBUF_F_RX_FDIR) {
mb->hash.fdir.hash =
rte_le_to_cpu_16(rxdp[j].qw0.hi.csum) &
TXGBE_ATR_HASH_MASK;
rxm->port = rxq->port_id;
pkt_info = rte_le_to_cpu_32(rxd.qw0.dw0);
- /* Only valid if PKT_RX_VLAN set in pkt_flags */
+ /* Only valid if RTE_MBUF_F_RX_VLAN set in pkt_flags */
rxm->vlan_tci = rte_le_to_cpu_16(rxd.qw1.hi.tag);
pkt_flags = rx_desc_status_to_pkt_flags(staterr,
rxm->packet_type = txgbe_rxd_pkt_info_to_pkt_type(pkt_info,
rxq->pkt_type_mask);
- if (likely(pkt_flags & PKT_RX_RSS_HASH)) {
+ if (likely(pkt_flags & RTE_MBUF_F_RX_RSS_HASH)) {
rxm->hash.rss = rte_le_to_cpu_32(rxd.qw0.dw1);
- } else if (pkt_flags & PKT_RX_FDIR) {
+ } else if (pkt_flags & RTE_MBUF_F_RX_FDIR) {
rxm->hash.fdir.hash =
rte_le_to_cpu_16(rxd.qw0.hi.csum) &
TXGBE_ATR_HASH_MASK;
head->port = rxq->port_id;
- /* The vlan_tci field is only valid when PKT_RX_VLAN is
+ /* The vlan_tci field is only valid when RTE_MBUF_F_RX_VLAN is
* set in the pkt_flags field.
*/
head->vlan_tci = rte_le_to_cpu_16(desc->qw1.hi.tag);
head->packet_type = txgbe_rxd_pkt_info_to_pkt_type(pkt_info,
rxq->pkt_type_mask);
- if (likely(pkt_flags & PKT_RX_RSS_HASH)) {
+ if (likely(pkt_flags & RTE_MBUF_F_RX_RSS_HASH)) {
head->hash.rss = rte_le_to_cpu_32(desc->qw0.dw1);
- } else if (pkt_flags & PKT_RX_FDIR) {
+ } else if (pkt_flags & RTE_MBUF_F_RX_FDIR) {
head->hash.fdir.hash = rte_le_to_cpu_16(desc->qw0.hi.csum)
& TXGBE_ATR_HASH_MASK;
head->hash.fdir.id = rte_le_to_cpu_16(desc->qw0.hi.ipid);
struct rte_mbuf *m = bufs[i];
/* Do VLAN tag insertion */
- if (m->ol_flags & PKT_TX_VLAN_PKT) {
+ if (m->ol_flags & RTE_MBUF_F_TX_VLAN_PKT) {
int error = rte_vlan_insert(&m);
if (unlikely(error)) {
rte_pktmbuf_free(m);
if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
return 0;
- m->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
+ m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN;
ptype = rte_net_get_ptype(m, &hdr_lens, RTE_PTYPE_ALL_MASK);
m->packet_type = ptype;
if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
hdrlen = hdr_lens.l2_len + hdr_lens.l3_len + hdr_lens.l4_len;
if (hdr->csum_start <= hdrlen && l4_supported) {
- m->ol_flags |= PKT_RX_L4_CKSUM_NONE;
+ m->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_NONE;
} else {
/* Unknown proto or tunnel, do sw cksum. We can assume
* the cksum field is in the first segment since the
off) = csum;
}
} else if (hdr->flags & VIRTIO_NET_HDR_F_DATA_VALID && l4_supported) {
- m->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ m->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
}
/* GSO request, save required information in mbuf */
switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
case VIRTIO_NET_HDR_GSO_TCPV4:
case VIRTIO_NET_HDR_GSO_TCPV6:
- m->ol_flags |= PKT_RX_LRO | \
- PKT_RX_L4_CKSUM_NONE;
+ m->ol_flags |= RTE_MBUF_F_RX_LRO | \
+ RTE_MBUF_F_RX_L4_CKSUM_NONE;
break;
default:
return -EINVAL;
#endif
/* Do VLAN tag insertion */
- if (unlikely(m->ol_flags & PKT_TX_VLAN_PKT)) {
+ if (unlikely(m->ol_flags & RTE_MBUF_F_TX_VLAN_PKT)) {
error = rte_vlan_insert(&m);
/* rte_vlan_insert() may change pointer
* even in the case of failure
break;
}
- if (m->ol_flags & PKT_TX_TCP_SEG)
+ if (m->ol_flags & RTE_MBUF_F_TX_TCP_SEG)
virtio_tso_fix_cksum(m);
}
return 0;
/* GSO not support in vec path, skip check */
- m->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
+ m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN;
ptype = rte_net_get_ptype(m, &hdr_lens, RTE_PTYPE_ALL_MASK);
m->packet_type = ptype;
if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
hdrlen = hdr_lens.l2_len + hdr_lens.l3_len + hdr_lens.l4_len;
if (hdr->csum_start <= hdrlen && l4_supported) {
- m->ol_flags |= PKT_RX_L4_CKSUM_NONE;
+ m->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_NONE;
} else {
/* Unknown proto or tunnel, do sw cksum. We can assume
* the cksum field is in the first segment since the
off) = csum;
}
} else if (hdr->flags & VIRTIO_NET_HDR_F_DATA_VALID && l4_supported) {
- m->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ m->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
}
return 0;
static inline void
virtqueue_xmit_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *cookie)
{
- uint64_t csum_l4 = cookie->ol_flags & PKT_TX_L4_MASK;
+ uint64_t csum_l4 = cookie->ol_flags & RTE_MBUF_F_TX_L4_MASK;
- if (cookie->ol_flags & PKT_TX_TCP_SEG)
- csum_l4 |= PKT_TX_TCP_CKSUM;
+ if (cookie->ol_flags & RTE_MBUF_F_TX_TCP_SEG)
+ csum_l4 |= RTE_MBUF_F_TX_TCP_CKSUM;
switch (csum_l4) {
- case PKT_TX_UDP_CKSUM:
+ case RTE_MBUF_F_TX_UDP_CKSUM:
hdr->csum_start = cookie->l2_len + cookie->l3_len;
hdr->csum_offset = offsetof(struct rte_udp_hdr, dgram_cksum);
hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
break;
- case PKT_TX_TCP_CKSUM:
+ case RTE_MBUF_F_TX_TCP_CKSUM:
hdr->csum_start = cookie->l2_len + cookie->l3_len;
hdr->csum_offset = offsetof(struct rte_tcp_hdr, cksum);
hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
}
/* TCP Segmentation Offload */
- if (cookie->ol_flags & PKT_TX_TCP_SEG) {
- hdr->gso_type = (cookie->ol_flags & PKT_TX_IPV6) ?
+ if (cookie->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
+ hdr->gso_type = (cookie->ol_flags & RTE_MBUF_F_TX_IPV6) ?
VIRTIO_NET_HDR_GSO_TCPV6 :
VIRTIO_NET_HDR_GSO_TCPV4;
hdr->gso_size = cookie->tso_segsz;
#include "vmxnet3_logs.h"
#include "vmxnet3_ethdev.h"
-#define VMXNET3_TX_OFFLOAD_MASK ( \
- PKT_TX_VLAN_PKT | \
- PKT_TX_IPV6 | \
- PKT_TX_IPV4 | \
- PKT_TX_L4_MASK | \
- PKT_TX_TCP_SEG)
+#define VMXNET3_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_VLAN_PKT | \
+ RTE_MBUF_F_TX_IPV6 | \
+ RTE_MBUF_F_TX_IPV4 | \
+ RTE_MBUF_F_TX_L4_MASK | \
+ RTE_MBUF_F_TX_TCP_SEG)
#define VMXNET3_TX_OFFLOAD_NOTSUP_MASK \
- (PKT_TX_OFFLOAD_MASK ^ VMXNET3_TX_OFFLOAD_MASK)
+ (RTE_MBUF_F_TX_OFFLOAD_MASK ^ VMXNET3_TX_OFFLOAD_MASK)
static const uint32_t rxprod_reg[2] = {VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2};
/* Non-TSO packet cannot occupy more than
* VMXNET3_MAX_TXD_PER_PKT TX descriptors.
*/
- if ((ol_flags & PKT_TX_TCP_SEG) == 0 &&
+ if ((ol_flags & RTE_MBUF_F_TX_TCP_SEG) == 0 &&
m->nb_segs > VMXNET3_MAX_TXD_PER_PKT) {
rte_errno = EINVAL;
return i;
/* check that only supported TX offloads are requested. */
if ((ol_flags & VMXNET3_TX_OFFLOAD_NOTSUP_MASK) != 0 ||
- (ol_flags & PKT_TX_L4_MASK) ==
- PKT_TX_SCTP_CKSUM) {
+ (ol_flags & RTE_MBUF_F_TX_L4_MASK) ==
+ RTE_MBUF_F_TX_SCTP_CKSUM) {
rte_errno = ENOTSUP;
return i;
}
struct rte_mbuf *txm = tx_pkts[nb_tx];
struct rte_mbuf *m_seg = txm;
int copy_size = 0;
- bool tso = (txm->ol_flags & PKT_TX_TCP_SEG) != 0;
+ bool tso = (txm->ol_flags & RTE_MBUF_F_TX_TCP_SEG) != 0;
/* # of descriptors needed for a packet. */
unsigned count = txm->nb_segs;
/* Add VLAN tag if present */
gdesc = txq->cmd_ring.base + first2fill;
- if (txm->ol_flags & PKT_TX_VLAN_PKT) {
+ if (txm->ol_flags & RTE_MBUF_F_TX_VLAN_PKT) {
gdesc->txd.ti = 1;
gdesc->txd.tci = txm->vlan_tci;
}
gdesc->txd.msscof = mss;
deferred += (rte_pktmbuf_pkt_len(txm) - gdesc->txd.hlen + mss - 1) / mss;
- } else if (txm->ol_flags & PKT_TX_L4_MASK) {
+ } else if (txm->ol_flags & RTE_MBUF_F_TX_L4_MASK) {
gdesc->txd.om = VMXNET3_OM_CSUM;
gdesc->txd.hlen = txm->l2_len + txm->l3_len;
- switch (txm->ol_flags & PKT_TX_L4_MASK) {
- case PKT_TX_TCP_CKSUM:
+ switch (txm->ol_flags & RTE_MBUF_F_TX_L4_MASK) {
+ case RTE_MBUF_F_TX_TCP_CKSUM:
gdesc->txd.msscof = gdesc->txd.hlen +
offsetof(struct rte_tcp_hdr, cksum);
break;
- case PKT_TX_UDP_CKSUM:
+ case RTE_MBUF_F_TX_UDP_CKSUM:
gdesc->txd.msscof = gdesc->txd.hlen +
offsetof(struct rte_udp_hdr,
dgram_cksum);
break;
default:
PMD_TX_LOG(WARNING, "requested cksum offload not supported %#llx",
- txm->ol_flags & PKT_TX_L4_MASK);
+ txm->ol_flags & RTE_MBUF_F_TX_L4_MASK);
abort();
}
deferred++;
rxm->tso_segsz = rcde->mss;
*vmxnet3_segs_dynfield(rxm) = rcde->segCnt;
- ol_flags |= PKT_RX_LRO;
+ ol_flags |= RTE_MBUF_F_RX_LRO;
}
} else { /* Offloads set in eop */
/* Check for RSS */
if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE) {
- ol_flags |= PKT_RX_RSS_HASH;
+ ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
rxm->hash.rss = rcd->rssHash;
}
/* Check for hardware stripped VLAN tag */
if (rcd->ts) {
- ol_flags |= (PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED);
+ ol_flags |= (RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED);
rxm->vlan_tci = rte_le_to_cpu_16((uint16_t)rcd->tci);
}
/* Check packet type, checksum errors, etc. */
if (rcd->cnc) {
- ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
+ ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN;
} else {
if (rcd->v4) {
packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
if (rcd->ipc)
- ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+ ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
else
- ol_flags |= PKT_RX_IP_CKSUM_BAD;
+ ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
if (rcd->tuc) {
- ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
if (rcd->tcp)
packet_type |= RTE_PTYPE_L4_TCP;
else
} else {
if (rcd->tcp) {
packet_type |= RTE_PTYPE_L4_TCP;
- ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
} else if (rcd->udp) {
packet_type |= RTE_PTYPE_L4_UDP;
- ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
}
}
} else if (rcd->v6) {
packet_type |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
if (rcd->tuc) {
- ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
if (rcd->tcp)
packet_type |= RTE_PTYPE_L4_TCP;
else
} else {
if (rcd->tcp) {
packet_type |= RTE_PTYPE_L4_TCP;
- ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
} else if (rcd->udp) {
packet_type |= RTE_PTYPE_L4_UDP;
- ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
}
}
} else {
}
/* Old variants of vmxnet3 do not provide MSS */
- if ((ol_flags & PKT_RX_LRO) && rxm->tso_segsz == 0)
+ if ((ol_flags & RTE_MBUF_F_RX_LRO) && rxm->tso_segsz == 0)
rxm->tso_segsz = vmxnet3_guess_mss(hw,
rcd, rxm);
}
return lkey;
/* Take slower bottom-half on miss. */
return mlx5_mr_addr2mr_bh(priv->pd, 0, &priv->mr_scache, mr_ctrl, addr,
- !!(mbuf->ol_flags & EXT_ATTACHED_MBUF));
+ !!(mbuf->ol_flags & RTE_MBUF_F_EXTERNAL));
}
* eBPF program sample.
* Accepts pointer to struct rte_mbuf as an input parameter.
* cleanup mbuf's vlan_tci and all related RX flags
- * (PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED).
+ * (RTE_MBUF_F_RX_VLAN_PKT | RTE_MBUF_F_RX_VLAN_STRIPPED).
* Doesn't touch contents of packet data.
* To compile:
* clang -O2 -target bpf -Wno-int-to-void-pointer-cast -c t2.c
mb = pkt;
mb->vlan_tci = 0;
- mb->ol_flags &= ~(PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED);
+ mb->ol_flags &= ~(RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED);
return 1;
}
rte_pktmbuf_free(m);
/* request HW to regenerate IPv4 cksum */
- ol_flags |= (PKT_TX_IPV4 | PKT_TX_IP_CKSUM);
+ ol_flags |= (RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM);
/* If we fail to fragment the packet */
if (unlikely (len2 < 0))
}
/* update offloading flags */
- m->ol_flags |= (PKT_TX_IPV4 | PKT_TX_IP_CKSUM);
+ m->ol_flags |= (RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM);
}
ip_dst = rte_be_to_cpu_32(ip_hdr->dst_addr);
if ((ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) ||
(ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)) {
- if (m->ol_flags & PKT_RX_SEC_OFFLOAD) {
- if (m->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED)
+ if (m->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) {
+ if (m->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED)
cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
else
cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
if ((type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) ||
(type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)) {
- m->ol_flags |= PKT_TX_SEC_OFFLOAD;
+ m->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
} else {
RTE_ASSERT(cop != NULL);
if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
* with the security session.
*/
- if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD &&
+ if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD &&
rte_security_dynfield_is_registered()) {
struct ipsec_sa *sa;
struct ipsec_mbuf_metadata *priv;
ip->ip_sum = 0;
/* calculate IPv4 cksum in SW */
- if ((pkt->ol_flags & PKT_TX_IP_CKSUM) == 0)
+ if ((pkt->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) == 0)
ip->ip_sum = rte_ipv4_cksum((struct rte_ipv4_hdr *)ip);
ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
}
/* Only check SPI match for processed IPSec packets */
- if (i < lim && ((m->ol_flags & PKT_RX_SEC_OFFLOAD) == 0)) {
+ if (i < lim && ((m->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) == 0)) {
free_pkts(&m, 1);
continue;
}
*/
for (i = 0; i < nb_pkts; i++) {
- if (!(pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD)) {
+ if (!(pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) {
/* Security offload not enabled. So an LPM lookup is
* required to get the hop
*/
lpm_pkts = 0;
for (i = 0; i < nb_pkts; i++) {
- if (pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD) {
+ if (pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
/* Read hop from the SA */
pkt_hop = get_hop_for_offload_pkt(pkts[i], 0);
} else {
*/
for (i = 0; i < nb_pkts; i++) {
- if (!(pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD)) {
+ if (!(pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) {
/* Security offload not enabled. So an LPM lookup is
* required to get the hop
*/
lpm_pkts = 0;
for (i = 0; i < nb_pkts; i++) {
- if (pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD) {
+ if (pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
/* Read hop from the SA */
pkt_hop = get_hop_for_offload_pkt(pkts[i], 1);
} else {
qconf->tx_queue_id[portid] = tx_queueid;
/* Pre-populate pkt offloads based on capabilities */
- qconf->outbound.ipv4_offloads = PKT_TX_IPV4;
- qconf->outbound.ipv6_offloads = PKT_TX_IPV6;
+ qconf->outbound.ipv4_offloads = RTE_MBUF_F_TX_IPV4;
+ qconf->outbound.ipv6_offloads = RTE_MBUF_F_TX_IPV6;
if (local_port_conf.txmode.offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
- qconf->outbound.ipv4_offloads |= PKT_TX_IP_CKSUM;
+ qconf->outbound.ipv4_offloads |= RTE_MBUF_F_TX_IP_CKSUM;
tx_queueid++;
switch (type) {
case PKT_TYPE_PLAIN_IPV4:
- if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) {
+ if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) {
if (unlikely(pkt->ol_flags &
- PKT_RX_SEC_OFFLOAD_FAILED)) {
+ RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED)) {
RTE_LOG(ERR, IPSEC,
"Inbound security offload failed\n");
goto drop_pkt_and_exit;
break;
case PKT_TYPE_PLAIN_IPV6:
- if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) {
+ if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) {
if (unlikely(pkt->ol_flags &
- PKT_RX_SEC_OFFLOAD_FAILED)) {
+ RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED)) {
RTE_LOG(ERR, IPSEC,
"Inbound security offload failed\n");
goto drop_pkt_and_exit;
sess->security.ses;
/* Mark the packet for Tx security offload */
- pkt->ol_flags |= PKT_TX_SEC_OFFLOAD;
+ pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
/* Get the port to which this pkt need to be submitted */
port_id = sa->portid;
sess_tbl[port_id];
/* Mark the packet for Tx security offload */
- pkt->ol_flags |= PKT_TX_SEC_OFFLOAD;
+ pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
}
/*
#define IP6_FULL_MASK (sizeof(((struct ip_addr *)NULL)->ip.ip6.ip6) * CHAR_BIT)
-#define MBUF_NO_SEC_OFFLOAD(m) ((m->ol_flags & PKT_RX_SEC_OFFLOAD) == 0)
+#define MBUF_NO_SEC_OFFLOAD(m) ((m->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) == 0)
struct supported_cipher_algo {
const char *keyword;
sizeof(struct clock_id));
/* Enable flag for hardware timestamping. */
- created_pkt->ol_flags |= PKT_TX_IEEE1588_TMST;
+ created_pkt->ol_flags |= RTE_MBUF_F_TX_IEEE1588_TMST;
/*Read value from NIC to prevent latching with old value. */
rte_eth_timesync_read_tx_timestamp(ptp_data->portid,
if (likely(nb_rx == 0))
continue;
- if (m->ol_flags & PKT_RX_IEEE1588_PTP)
+ if (m->ol_flags & RTE_MBUF_F_RX_IEEE1588_PTP)
parse_ptp_frames(portid, m);
rte_pktmbuf_free(m);
* Packet RX/TX
*
***/
-#define PKT_RX_BURST_MAX 32
-#define PKT_TX_BURST_MAX 32
+#define RTE_MBUF_F_RX_BURST_MAX 32
+#define RTE_MBUF_F_TX_BURST_MAX 32
#define TIME_TX_DRAIN 200000ULL
static uint16_t port_rx;
static uint16_t port_tx;
-static struct rte_mbuf *pkts_rx[PKT_RX_BURST_MAX];
+static struct rte_mbuf *pkts_rx[RTE_MBUF_F_RX_BURST_MAX];
struct rte_eth_dev_tx_buffer *tx_buffer;
struct rte_meter_srtcm_params app_srtcm_params = {
}
/* Read packet burst from NIC RX */
- nb_rx = rte_eth_rx_burst(port_rx, NIC_RX_QUEUE, pkts_rx, PKT_RX_BURST_MAX);
+ nb_rx = rte_eth_rx_burst(port_rx, NIC_RX_QUEUE, pkts_rx, RTE_MBUF_F_RX_BURST_MAX);
/* Handle packets */
for (i = 0; i < nb_rx; i ++) {
rte_exit(EXIT_FAILURE, "Port %d TX queue setup error (%d)\n", port_tx, ret);
tx_buffer = rte_zmalloc_socket("tx_buffer",
- RTE_ETH_TX_BUFFER_SIZE(PKT_TX_BURST_MAX), 0,
+ RTE_ETH_TX_BUFFER_SIZE(RTE_MBUF_F_TX_BURST_MAX), 0,
rte_eth_dev_socket_id(port_tx));
if (tx_buffer == NULL)
rte_exit(EXIT_FAILURE, "Port %d TX buffer allocation error\n",
port_tx);
- rte_eth_tx_buffer_init(tx_buffer, PKT_TX_BURST_MAX);
+ rte_eth_tx_buffer_init(tx_buffer, RTE_MBUF_F_TX_BURST_MAX);
ret = rte_eth_dev_start(port_rx);
if (ret < 0)
tcp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_tcp_hdr *,
m->l2_len + m->l3_len);
- m->ol_flags |= PKT_TX_TCP_SEG;
+ m->ol_flags |= RTE_MBUF_F_TX_TCP_SEG;
if ((ptype & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV4) {
- m->ol_flags |= PKT_TX_IPV4;
- m->ol_flags |= PKT_TX_IP_CKSUM;
+ m->ol_flags |= RTE_MBUF_F_TX_IPV4;
+ m->ol_flags |= RTE_MBUF_F_TX_IP_CKSUM;
ipv4_hdr = l3_hdr;
ipv4_hdr->hdr_checksum = 0;
tcp_hdr->cksum = rte_ipv4_phdr_cksum(l3_hdr, m->ol_flags);
} else { /* assume ethertype == RTE_ETHER_TYPE_IPV6 */
- m->ol_flags |= PKT_TX_IPV6;
+ m->ol_flags |= RTE_MBUF_F_TX_IPV6;
tcp_hdr->cksum = rte_ipv6_phdr_cksum(l3_hdr, m->ol_flags);
}
}
(vh->vlan_tci != vlan_tag_be))
vh->vlan_tci = vlan_tag_be;
} else {
- m->ol_flags |= PKT_TX_VLAN_PKT;
+ m->ol_flags |= RTE_MBUF_F_TX_VLAN_PKT;
/*
* Find the right seg to adjust the data len when offset is
m->vlan_tci = vlan_tag;
}
- if (m->ol_flags & PKT_RX_LRO)
+ if (m->ol_flags & RTE_MBUF_F_RX_LRO)
virtio_tx_offload(m);
tx_q->m_table[tx_q->len++] = m;
#define DEV_TX_OFFLOAD_SECURITY 0x00020000
/**
* Device supports generic UDP tunneled packet TSO.
- * Application must set PKT_TX_TUNNEL_UDP and other mbuf fields required
+ * Application must set RTE_MBUF_F_TX_TUNNEL_UDP and other mbuf fields required
* for tunnel TSO.
*/
#define DEV_TX_OFFLOAD_UDP_TNL_TSO 0x00040000
/**
* Device supports generic IP tunneled packet TSO.
- * Application must set PKT_TX_TUNNEL_IP and other mbuf fields required
+ * Application must set RTE_MBUF_F_TX_TUNNEL_IP and other mbuf fields required
* for tunnel TSO.
*/
#define DEV_TX_OFFLOAD_IP_TNL_TSO 0x00080000
* RTE_FLOW_ITEM_TYPE_META
*
* Matches a specified metadata value. On egress, metadata can be set
- * either by mbuf dynamic metadata field with PKT_TX_DYNF_METADATA flag or
+ * either by mbuf dynamic metadata field with RTE_MBUF_F_TX_DYNF_METADATA flag or
* RTE_FLOW_ACTION_TYPE_SET_META. On ingress, RTE_FLOW_ACTION_TYPE_SET_META
* sets metadata for a packet and the metadata will be reported via mbuf
- * metadata dynamic field with PKT_RX_DYNF_METADATA flag. The dynamic mbuf
+ * metadata dynamic field with RTE_MBUF_F_RX_DYNF_METADATA flag. The dynamic mbuf
* field must be registered in advance by rte_flow_dynf_metadata_register().
*/
struct rte_flow_item_meta {
RTE_FLOW_ACTION_TYPE_JUMP,
/**
- * Attaches an integer value to packets and sets PKT_RX_FDIR and
- * PKT_RX_FDIR_ID mbuf flags.
+ * Attaches an integer value to packets and sets RTE_MBUF_F_RX_FDIR and
+ * RTE_MBUF_F_RX_FDIR_ID mbuf flags.
*
* See struct rte_flow_action_mark.
*/
/**
* Flags packets. Similar to MARK without a specific value; only
- * sets the PKT_RX_FDIR mbuf flag.
+ * sets the RTE_MBUF_F_RX_FDIR mbuf flag.
*
* No associated configuration structure.
*/
/**
* RTE_FLOW_ACTION_TYPE_MARK
*
- * Attaches an integer value to packets and sets PKT_RX_FDIR and
- * PKT_RX_FDIR_ID mbuf flags.
+ * Attaches an integer value to packets and sets RTE_MBUF_F_RX_FDIR and
+ * RTE_MBUF_F_RX_FDIR_ID mbuf flags.
*
* This value is arbitrary and application-defined. Maximum allowed value
* depends on the underlying implementation. It is returned in the
* RTE_FLOW_ACTION_TYPE_SET_META
*
* Set metadata. Metadata set by mbuf metadata dynamic field with
- * PKT_TX_DYNF_DATA flag on egress will be overridden by this action. On
+ * RTE_MBUF_F_TX_DYNF_DATA flag on egress will be overridden by this action. On
* ingress, the metadata will be carried by mbuf metadata dynamic field
- * with PKT_RX_DYNF_METADATA flag if set. The dynamic mbuf field must be
+ * with RTE_MBUF_F_RX_DYNF_METADATA flag if set. The dynamic mbuf field must be
* registered in advance by rte_flow_dynf_metadata_register().
*
* Altering partial bits is supported with mask. For bits which have never
RTE_MBUF_DYNFIELD((m), rte_flow_dynf_metadata_offs, uint32_t *)
/* Mbuf dynamic flags for metadata. */
-#define PKT_RX_DYNF_METADATA (rte_flow_dynf_metadata_mask)
-#define PKT_TX_DYNF_METADATA (rte_flow_dynf_metadata_mask)
+#define RTE_MBUF_F_RX_DYNF_METADATA (rte_flow_dynf_metadata_mask)
+#define RTE_MBUF_F_TX_DYNF_METADATA (rte_flow_dynf_metadata_mask)
__rte_experimental
static inline uint32_t
uint16_t dropped;
if (!eth_rx_queue_info->ena_vector) {
- /* 0xffff ffff if PKT_RX_RSS_HASH is set, otherwise 0 */
- rss_mask = ~(((m->ol_flags & PKT_RX_RSS_HASH) != 0) - 1);
+ /* 0xffff ffff if RTE_MBUF_F_RX_RSS_HASH is set, otherwise 0 */
+ rss_mask = ~(((m->ol_flags & RTE_MBUF_F_RX_RSS_HASH) != 0) - 1);
do_rss = !rss_mask && !eth_rx_queue_info->flow_id_mask;
for (i = 0; i < num; i++) {
m = mbufs[i];
#define TCP_HDR_PSH_MASK ((uint8_t)0x08)
#define TCP_HDR_FIN_MASK ((uint8_t)0x01)
-#define IS_IPV4_TCP(flag) (((flag) & (PKT_TX_TCP_SEG | PKT_TX_IPV4)) == \
- (PKT_TX_TCP_SEG | PKT_TX_IPV4))
-
-#define IS_IPV4_VXLAN_TCP4(flag) (((flag) & (PKT_TX_TCP_SEG | PKT_TX_IPV4 | \
- PKT_TX_OUTER_IPV4 | PKT_TX_TUNNEL_MASK)) == \
- (PKT_TX_TCP_SEG | PKT_TX_IPV4 | PKT_TX_OUTER_IPV4 | \
- PKT_TX_TUNNEL_VXLAN))
-
-#define IS_IPV4_VXLAN_UDP4(flag) (((flag) & (PKT_TX_UDP_SEG | PKT_TX_IPV4 | \
- PKT_TX_OUTER_IPV4 | PKT_TX_TUNNEL_MASK)) == \
- (PKT_TX_UDP_SEG | PKT_TX_IPV4 | PKT_TX_OUTER_IPV4 | \
- PKT_TX_TUNNEL_VXLAN))
-
-#define IS_IPV4_GRE_TCP4(flag) (((flag) & (PKT_TX_TCP_SEG | PKT_TX_IPV4 | \
- PKT_TX_OUTER_IPV4 | PKT_TX_TUNNEL_MASK)) == \
- (PKT_TX_TCP_SEG | PKT_TX_IPV4 | PKT_TX_OUTER_IPV4 | \
- PKT_TX_TUNNEL_GRE))
-
-#define IS_IPV4_UDP(flag) (((flag) & (PKT_TX_UDP_SEG | PKT_TX_IPV4)) == \
- (PKT_TX_UDP_SEG | PKT_TX_IPV4))
+#define IS_IPV4_TCP(flag) (((flag) & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_IPV4)) == \
+ (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_IPV4))
+
+#define IS_IPV4_VXLAN_TCP4(flag) (((flag) & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_IPV4 | \
+ RTE_MBUF_F_TX_OUTER_IPV4 | RTE_MBUF_F_TX_TUNNEL_MASK)) == \
+ (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_OUTER_IPV4 | \
+ RTE_MBUF_F_TX_TUNNEL_VXLAN))
+
+#define IS_IPV4_VXLAN_UDP4(flag) (((flag) & (RTE_MBUF_F_TX_UDP_SEG | RTE_MBUF_F_TX_IPV4 | \
+ RTE_MBUF_F_TX_OUTER_IPV4 | RTE_MBUF_F_TX_TUNNEL_MASK)) == \
+ (RTE_MBUF_F_TX_UDP_SEG | RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_OUTER_IPV4 | \
+ RTE_MBUF_F_TX_TUNNEL_VXLAN))
+
+#define IS_IPV4_GRE_TCP4(flag) (((flag) & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_IPV4 | \
+ RTE_MBUF_F_TX_OUTER_IPV4 | RTE_MBUF_F_TX_TUNNEL_MASK)) == \
+ (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_OUTER_IPV4 | \
+ RTE_MBUF_F_TX_TUNNEL_GRE))
+
+#define IS_IPV4_UDP(flag) (((flag) & (RTE_MBUF_F_TX_UDP_SEG | RTE_MBUF_F_TX_IPV4)) == \
+ (RTE_MBUF_F_TX_UDP_SEG | RTE_MBUF_F_TX_IPV4))
/**
* Internal function which updates the UDP header of a packet, following
tail_idx = nb_segs - 1;
/* Only update UDP header for VxLAN packets. */
- update_udp_hdr = (pkt->ol_flags & PKT_TX_TUNNEL_VXLAN) ? 1 : 0;
+ update_udp_hdr = (pkt->ol_flags & RTE_MBUF_F_TX_TUNNEL_VXLAN) ? 1 : 0;
for (i = 0; i < nb_segs; i++) {
update_ipv4_header(segs[i], outer_ipv4_offset, outer_id);
return -EINVAL;
if (gso_ctx->gso_size >= pkt->pkt_len) {
- pkt->ol_flags &= (~(PKT_TX_TCP_SEG | PKT_TX_UDP_SEG));
+ pkt->ol_flags &= (~(RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG));
return 0;
}
(gso_ctx->gso_types & DEV_TX_OFFLOAD_VXLAN_TNL_TSO)) ||
((IS_IPV4_GRE_TCP4(pkt->ol_flags) &&
(gso_ctx->gso_types & DEV_TX_OFFLOAD_GRE_TNL_TSO)))) {
- pkt->ol_flags &= (~PKT_TX_TCP_SEG);
+ pkt->ol_flags &= (~RTE_MBUF_F_TX_TCP_SEG);
ret = gso_tunnel_tcp4_segment(pkt, gso_size, ipid_delta,
direct_pool, indirect_pool,
pkts_out, nb_pkts_out);
} else if (IS_IPV4_VXLAN_UDP4(pkt->ol_flags) &&
(gso_ctx->gso_types & DEV_TX_OFFLOAD_VXLAN_TNL_TSO) &&
(gso_ctx->gso_types & DEV_TX_OFFLOAD_UDP_TSO)) {
- pkt->ol_flags &= (~PKT_TX_UDP_SEG);
+ pkt->ol_flags &= (~RTE_MBUF_F_TX_UDP_SEG);
ret = gso_tunnel_udp4_segment(pkt, gso_size,
direct_pool, indirect_pool,
pkts_out, nb_pkts_out);
} else if (IS_IPV4_TCP(pkt->ol_flags) &&
(gso_ctx->gso_types & DEV_TX_OFFLOAD_TCP_TSO)) {
- pkt->ol_flags &= (~PKT_TX_TCP_SEG);
+ pkt->ol_flags &= (~RTE_MBUF_F_TX_TCP_SEG);
ret = gso_tcp4_segment(pkt, gso_size, ipid_delta,
direct_pool, indirect_pool,
pkts_out, nb_pkts_out);
} else if (IS_IPV4_UDP(pkt->ol_flags) &&
(gso_ctx->gso_types & DEV_TX_OFFLOAD_UDP_TSO)) {
- pkt->ol_flags &= (~PKT_TX_UDP_SEG);
+ pkt->ol_flags &= (~RTE_MBUF_F_TX_UDP_SEG);
ret = gso_udp4_segment(pkt, gso_size, direct_pool,
indirect_pool, pkts_out, nb_pkts_out);
} else {
*
* Before calling rte_gso_segment(), applications must set proper ol_flags
* for the packet. The GSO library uses the same macros as that of TSO.
- * For example, set PKT_TX_TCP_SEG and PKT_TX_IPV4 in ol_flags to segment
- * a TCP/IPv4 packet. If rte_gso_segment() succeeds, the PKT_TX_TCP_SEG
+ * For example, set RTE_MBUF_F_TX_TCP_SEG and RTE_MBUF_F_TX_IPV4 in ol_flags to segment
+ * a TCP/IPv4 packet. If rte_gso_segment() succeeds, the RTE_MBUF_F_TX_TCP_SEG
* flag is removed for all GSO segments and the input packet.
*
* Each of the newly-created GSO segments is organized as a two-segment
trs_process_check(struct rte_mbuf *mb, struct rte_mbuf **ml,
uint32_t *tofs, struct rte_esp_tail espt, uint32_t hlen, uint32_t tlen)
{
- if ((mb->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) != 0 ||
+ if ((mb->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED) != 0 ||
tlen + hlen > mb->pkt_len)
return -EBADMSG;
/* reset mbuf packet type */
mb->packet_type &= (RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK);
- /* clear the PKT_RX_SEC_OFFLOAD flag if set */
- mb->ol_flags &= ~PKT_RX_SEC_OFFLOAD;
+ /* clear the RTE_MBUF_F_RX_SEC_OFFLOAD flag if set */
+ mb->ol_flags &= ~RTE_MBUF_F_RX_SEC_OFFLOAD;
}
/*
mb->packet_type = RTE_PTYPE_UNKNOWN;
mb->tx_offload = (mb->tx_offload & txof_msk) | txof_val;
- /* clear the PKT_RX_SEC_OFFLOAD flag if set */
- mb->ol_flags &= ~PKT_RX_SEC_OFFLOAD;
+ /* clear the RTE_MBUF_F_RX_SEC_OFFLOAD flag if set */
+ mb->ol_flags &= ~RTE_MBUF_F_RX_SEC_OFFLOAD;
}
/*
icv_len = sa->icv_len;
for (i = 0; i != num; i++) {
- if ((mb[i]->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) == 0) {
+ if ((mb[i]->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED) == 0) {
ml = rte_pktmbuf_lastseg(mb[i]);
/* remove high-order 32 bits of esn from packet len */
mb[i]->pkt_len -= sa->sqh_len;
ol_flags = ss->security.ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA;
for (i = 0; i != num; i++) {
- mb[i]->ol_flags |= PKT_TX_SEC_OFFLOAD;
+ mb[i]->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
if (ol_flags != 0)
rte_security_set_pkt_metadata(ss->security.ctx,
ss->security.ses, mb[i], NULL);
j = num - n;
for (i = 0; j != 0 && i != num; i++) {
if (st[i] != 0) {
- mb[i]->ol_flags |= PKT_RX_SEC_OFFLOAD_FAILED;
+ mb[i]->ol_flags |= RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED;
j--;
}
}
* Take as input completed crypto ops, extract related mbufs
* and group them by rte_ipsec_session they belong to.
* For mbuf which crypto-op wasn't completed successfully
- * PKT_RX_SEC_OFFLOAD_FAILED will be raised in ol_flags.
+ * RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED will be raised in ol_flags.
* Note that mbufs with undetermined SA (session-less) are not freed
* by the function, but are placed beyond mbufs for the last valid group.
* It is a user responsibility to handle them further.
m = cop[i]->sym[0].m_src;
ns = cop[i]->sym[0].session;
- m->ol_flags |= PKT_RX_SEC_OFFLOAD;
+ m->ol_flags |= RTE_MBUF_F_RX_SEC_OFFLOAD;
if (cop[i]->status != RTE_CRYPTO_OP_STATUS_SUCCESS)
- m->ol_flags |= PKT_RX_SEC_OFFLOAD_FAILED;
+ m->ol_flags |= RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED;
/* no valid session found */
if (ns == NULL) {
k = 0;
for (i = 0; i != num; i++) {
- if ((mb[i]->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) == 0)
+ if ((mb[i]->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED) == 0)
k++;
else
dr[i - k] = i;
RTE_ASSERT(m->shinfo->fcb_opaque == m);
rte_mbuf_ext_refcnt_set(m->shinfo, 1);
- m->ol_flags = EXT_ATTACHED_MBUF;
+ m->ol_flags = RTE_MBUF_F_EXTERNAL;
if (m->next != NULL) {
m->next = NULL;
m->nb_segs = 1;
m->pool = mp;
m->nb_segs = 1;
m->port = RTE_MBUF_PORT_INVALID;
- m->ol_flags = EXT_ATTACHED_MBUF;
+ m->ol_flags = RTE_MBUF_F_EXTERNAL;
rte_mbuf_refcnt_set(m, 1);
m->next = NULL;
__rte_pktmbuf_copy_hdr(mc, m);
/* copied mbuf is not indirect or external */
- mc->ol_flags = m->ol_flags & ~(IND_ATTACHED_MBUF|EXT_ATTACHED_MBUF);
+ mc->ol_flags = m->ol_flags & ~(RTE_MBUF_F_INDIRECT|RTE_MBUF_F_EXTERNAL);
prev = &mc->next;
m_last = mc;
fprintf(f, " pkt_len=%u, ol_flags=%#"PRIx64", nb_segs=%u, port=%u",
m->pkt_len, m->ol_flags, m->nb_segs, m->port);
- if (m->ol_flags & (PKT_RX_VLAN | PKT_TX_VLAN))
+ if (m->ol_flags & (RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_TX_VLAN))
fprintf(f, ", vlan_tci=%u", m->vlan_tci);
fprintf(f, ", ptype=%#"PRIx32"\n", m->packet_type);
const char *rte_get_rx_ol_flag_name(uint64_t mask)
{
switch (mask) {
- case PKT_RX_VLAN: return "PKT_RX_VLAN";
- case PKT_RX_RSS_HASH: return "PKT_RX_RSS_HASH";
- case PKT_RX_FDIR: return "PKT_RX_FDIR";
- case PKT_RX_L4_CKSUM_BAD: return "PKT_RX_L4_CKSUM_BAD";
- case PKT_RX_L4_CKSUM_GOOD: return "PKT_RX_L4_CKSUM_GOOD";
- case PKT_RX_L4_CKSUM_NONE: return "PKT_RX_L4_CKSUM_NONE";
- case PKT_RX_IP_CKSUM_BAD: return "PKT_RX_IP_CKSUM_BAD";
- case PKT_RX_IP_CKSUM_GOOD: return "PKT_RX_IP_CKSUM_GOOD";
- case PKT_RX_IP_CKSUM_NONE: return "PKT_RX_IP_CKSUM_NONE";
- case PKT_RX_OUTER_IP_CKSUM_BAD: return "PKT_RX_OUTER_IP_CKSUM_BAD";
- case PKT_RX_VLAN_STRIPPED: return "PKT_RX_VLAN_STRIPPED";
- case PKT_RX_IEEE1588_PTP: return "PKT_RX_IEEE1588_PTP";
- case PKT_RX_IEEE1588_TMST: return "PKT_RX_IEEE1588_TMST";
- case PKT_RX_FDIR_ID: return "PKT_RX_FDIR_ID";
- case PKT_RX_FDIR_FLX: return "PKT_RX_FDIR_FLX";
- case PKT_RX_QINQ_STRIPPED: return "PKT_RX_QINQ_STRIPPED";
- case PKT_RX_QINQ: return "PKT_RX_QINQ";
- case PKT_RX_LRO: return "PKT_RX_LRO";
- case PKT_RX_SEC_OFFLOAD: return "PKT_RX_SEC_OFFLOAD";
- case PKT_RX_SEC_OFFLOAD_FAILED: return "PKT_RX_SEC_OFFLOAD_FAILED";
- case PKT_RX_OUTER_L4_CKSUM_BAD: return "PKT_RX_OUTER_L4_CKSUM_BAD";
- case PKT_RX_OUTER_L4_CKSUM_GOOD: return "PKT_RX_OUTER_L4_CKSUM_GOOD";
- case PKT_RX_OUTER_L4_CKSUM_INVALID:
- return "PKT_RX_OUTER_L4_CKSUM_INVALID";
+ case RTE_MBUF_F_RX_VLAN: return "RTE_MBUF_F_RX_VLAN";
+ case RTE_MBUF_F_RX_RSS_HASH: return "RTE_MBUF_F_RX_RSS_HASH";
+ case RTE_MBUF_F_RX_FDIR: return "RTE_MBUF_F_RX_FDIR";
+ case RTE_MBUF_F_RX_L4_CKSUM_BAD: return "RTE_MBUF_F_RX_L4_CKSUM_BAD";
+ case RTE_MBUF_F_RX_L4_CKSUM_GOOD: return "RTE_MBUF_F_RX_L4_CKSUM_GOOD";
+ case RTE_MBUF_F_RX_L4_CKSUM_NONE: return "RTE_MBUF_F_RX_L4_CKSUM_NONE";
+ case RTE_MBUF_F_RX_IP_CKSUM_BAD: return "RTE_MBUF_F_RX_IP_CKSUM_BAD";
+ case RTE_MBUF_F_RX_IP_CKSUM_GOOD: return "RTE_MBUF_F_RX_IP_CKSUM_GOOD";
+ case RTE_MBUF_F_RX_IP_CKSUM_NONE: return "RTE_MBUF_F_RX_IP_CKSUM_NONE";
+ case RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD: return "RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD";
+ case RTE_MBUF_F_RX_VLAN_STRIPPED: return "RTE_MBUF_F_RX_VLAN_STRIPPED";
+ case RTE_MBUF_F_RX_IEEE1588_PTP: return "RTE_MBUF_F_RX_IEEE1588_PTP";
+ case RTE_MBUF_F_RX_IEEE1588_TMST: return "RTE_MBUF_F_RX_IEEE1588_TMST";
+ case RTE_MBUF_F_RX_FDIR_ID: return "RTE_MBUF_F_RX_FDIR_ID";
+ case RTE_MBUF_F_RX_FDIR_FLX: return "RTE_MBUF_F_RX_FDIR_FLX";
+ case RTE_MBUF_F_RX_QINQ_STRIPPED: return "RTE_MBUF_F_RX_QINQ_STRIPPED";
+ case RTE_MBUF_F_RX_QINQ: return "RTE_MBUF_F_RX_QINQ";
+ case RTE_MBUF_F_RX_LRO: return "RTE_MBUF_F_RX_LRO";
+ case RTE_MBUF_F_RX_SEC_OFFLOAD: return "RTE_MBUF_F_RX_SEC_OFFLOAD";
+ case RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED: return "RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED";
+ case RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD: return "RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD";
+ case RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD: return "RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD";
+ case RTE_MBUF_F_RX_OUTER_L4_CKSUM_INVALID:
+ return "RTE_MBUF_F_RX_OUTER_L4_CKSUM_INVALID";
default: return NULL;
}
rte_get_rx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
{
const struct flag_mask rx_flags[] = {
- { PKT_RX_VLAN, PKT_RX_VLAN, NULL },
- { PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, NULL },
- { PKT_RX_FDIR, PKT_RX_FDIR, NULL },
- { PKT_RX_L4_CKSUM_BAD, PKT_RX_L4_CKSUM_MASK, NULL },
- { PKT_RX_L4_CKSUM_GOOD, PKT_RX_L4_CKSUM_MASK, NULL },
- { PKT_RX_L4_CKSUM_NONE, PKT_RX_L4_CKSUM_MASK, NULL },
- { PKT_RX_L4_CKSUM_UNKNOWN, PKT_RX_L4_CKSUM_MASK,
- "PKT_RX_L4_CKSUM_UNKNOWN" },
- { PKT_RX_IP_CKSUM_BAD, PKT_RX_IP_CKSUM_MASK, NULL },
- { PKT_RX_IP_CKSUM_GOOD, PKT_RX_IP_CKSUM_MASK, NULL },
- { PKT_RX_IP_CKSUM_NONE, PKT_RX_IP_CKSUM_MASK, NULL },
- { PKT_RX_IP_CKSUM_UNKNOWN, PKT_RX_IP_CKSUM_MASK,
- "PKT_RX_IP_CKSUM_UNKNOWN" },
- { PKT_RX_OUTER_IP_CKSUM_BAD, PKT_RX_OUTER_IP_CKSUM_BAD, NULL },
- { PKT_RX_VLAN_STRIPPED, PKT_RX_VLAN_STRIPPED, NULL },
- { PKT_RX_IEEE1588_PTP, PKT_RX_IEEE1588_PTP, NULL },
- { PKT_RX_IEEE1588_TMST, PKT_RX_IEEE1588_TMST, NULL },
- { PKT_RX_FDIR_ID, PKT_RX_FDIR_ID, NULL },
- { PKT_RX_FDIR_FLX, PKT_RX_FDIR_FLX, NULL },
- { PKT_RX_QINQ_STRIPPED, PKT_RX_QINQ_STRIPPED, NULL },
- { PKT_RX_LRO, PKT_RX_LRO, NULL },
- { PKT_RX_SEC_OFFLOAD, PKT_RX_SEC_OFFLOAD, NULL },
- { PKT_RX_SEC_OFFLOAD_FAILED, PKT_RX_SEC_OFFLOAD_FAILED, NULL },
- { PKT_RX_QINQ, PKT_RX_QINQ, NULL },
- { PKT_RX_OUTER_L4_CKSUM_BAD, PKT_RX_OUTER_L4_CKSUM_MASK, NULL },
- { PKT_RX_OUTER_L4_CKSUM_GOOD, PKT_RX_OUTER_L4_CKSUM_MASK,
+ { RTE_MBUF_F_RX_VLAN, RTE_MBUF_F_RX_VLAN, NULL },
+ { RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH, NULL },
+ { RTE_MBUF_F_RX_FDIR, RTE_MBUF_F_RX_FDIR, NULL },
+ { RTE_MBUF_F_RX_L4_CKSUM_BAD, RTE_MBUF_F_RX_L4_CKSUM_MASK, NULL },
+ { RTE_MBUF_F_RX_L4_CKSUM_GOOD, RTE_MBUF_F_RX_L4_CKSUM_MASK, NULL },
+ { RTE_MBUF_F_RX_L4_CKSUM_NONE, RTE_MBUF_F_RX_L4_CKSUM_MASK, NULL },
+ { RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN, RTE_MBUF_F_RX_L4_CKSUM_MASK,
+ "RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN" },
+ { RTE_MBUF_F_RX_IP_CKSUM_BAD, RTE_MBUF_F_RX_IP_CKSUM_MASK, NULL },
+ { RTE_MBUF_F_RX_IP_CKSUM_GOOD, RTE_MBUF_F_RX_IP_CKSUM_MASK, NULL },
+ { RTE_MBUF_F_RX_IP_CKSUM_NONE, RTE_MBUF_F_RX_IP_CKSUM_MASK, NULL },
+ { RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN, RTE_MBUF_F_RX_IP_CKSUM_MASK,
+ "RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN" },
+ { RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD, RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD, NULL },
+ { RTE_MBUF_F_RX_VLAN_STRIPPED, RTE_MBUF_F_RX_VLAN_STRIPPED, NULL },
+ { RTE_MBUF_F_RX_IEEE1588_PTP, RTE_MBUF_F_RX_IEEE1588_PTP, NULL },
+ { RTE_MBUF_F_RX_IEEE1588_TMST, RTE_MBUF_F_RX_IEEE1588_TMST, NULL },
+ { RTE_MBUF_F_RX_FDIR_ID, RTE_MBUF_F_RX_FDIR_ID, NULL },
+ { RTE_MBUF_F_RX_FDIR_FLX, RTE_MBUF_F_RX_FDIR_FLX, NULL },
+ { RTE_MBUF_F_RX_QINQ_STRIPPED, RTE_MBUF_F_RX_QINQ_STRIPPED, NULL },
+ { RTE_MBUF_F_RX_LRO, RTE_MBUF_F_RX_LRO, NULL },
+ { RTE_MBUF_F_RX_SEC_OFFLOAD, RTE_MBUF_F_RX_SEC_OFFLOAD, NULL },
+ { RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED, RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED, NULL },
+ { RTE_MBUF_F_RX_QINQ, RTE_MBUF_F_RX_QINQ, NULL },
+ { RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD, RTE_MBUF_F_RX_OUTER_L4_CKSUM_MASK, NULL },
+ { RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD, RTE_MBUF_F_RX_OUTER_L4_CKSUM_MASK,
NULL },
- { PKT_RX_OUTER_L4_CKSUM_INVALID, PKT_RX_OUTER_L4_CKSUM_MASK,
+ { RTE_MBUF_F_RX_OUTER_L4_CKSUM_INVALID, RTE_MBUF_F_RX_OUTER_L4_CKSUM_MASK,
NULL },
- { PKT_RX_OUTER_L4_CKSUM_UNKNOWN, PKT_RX_OUTER_L4_CKSUM_MASK,
- "PKT_RX_OUTER_L4_CKSUM_UNKNOWN" },
+ { RTE_MBUF_F_RX_OUTER_L4_CKSUM_UNKNOWN, RTE_MBUF_F_RX_OUTER_L4_CKSUM_MASK,
+ "RTE_MBUF_F_RX_OUTER_L4_CKSUM_UNKNOWN" },
};
const char *name;
unsigned int i;
const char *rte_get_tx_ol_flag_name(uint64_t mask)
{
switch (mask) {
- case PKT_TX_VLAN: return "PKT_TX_VLAN";
- case PKT_TX_IP_CKSUM: return "PKT_TX_IP_CKSUM";
- case PKT_TX_TCP_CKSUM: return "PKT_TX_TCP_CKSUM";
- case PKT_TX_SCTP_CKSUM: return "PKT_TX_SCTP_CKSUM";
- case PKT_TX_UDP_CKSUM: return "PKT_TX_UDP_CKSUM";
- case PKT_TX_IEEE1588_TMST: return "PKT_TX_IEEE1588_TMST";
- case PKT_TX_TCP_SEG: return "PKT_TX_TCP_SEG";
- case PKT_TX_IPV4: return "PKT_TX_IPV4";
- case PKT_TX_IPV6: return "PKT_TX_IPV6";
- case PKT_TX_OUTER_IP_CKSUM: return "PKT_TX_OUTER_IP_CKSUM";
- case PKT_TX_OUTER_IPV4: return "PKT_TX_OUTER_IPV4";
- case PKT_TX_OUTER_IPV6: return "PKT_TX_OUTER_IPV6";
- case PKT_TX_TUNNEL_VXLAN: return "PKT_TX_TUNNEL_VXLAN";
- case PKT_TX_TUNNEL_GTP: return "PKT_TX_TUNNEL_GTP";
- case PKT_TX_TUNNEL_GRE: return "PKT_TX_TUNNEL_GRE";
- case PKT_TX_TUNNEL_IPIP: return "PKT_TX_TUNNEL_IPIP";
- case PKT_TX_TUNNEL_GENEVE: return "PKT_TX_TUNNEL_GENEVE";
- case PKT_TX_TUNNEL_MPLSINUDP: return "PKT_TX_TUNNEL_MPLSINUDP";
- case PKT_TX_TUNNEL_VXLAN_GPE: return "PKT_TX_TUNNEL_VXLAN_GPE";
- case PKT_TX_TUNNEL_IP: return "PKT_TX_TUNNEL_IP";
- case PKT_TX_TUNNEL_UDP: return "PKT_TX_TUNNEL_UDP";
- case PKT_TX_QINQ: return "PKT_TX_QINQ";
- case PKT_TX_MACSEC: return "PKT_TX_MACSEC";
- case PKT_TX_SEC_OFFLOAD: return "PKT_TX_SEC_OFFLOAD";
- case PKT_TX_UDP_SEG: return "PKT_TX_UDP_SEG";
- case PKT_TX_OUTER_UDP_CKSUM: return "PKT_TX_OUTER_UDP_CKSUM";
+ case RTE_MBUF_F_TX_VLAN: return "RTE_MBUF_F_TX_VLAN";
+ case RTE_MBUF_F_TX_IP_CKSUM: return "RTE_MBUF_F_TX_IP_CKSUM";
+ case RTE_MBUF_F_TX_TCP_CKSUM: return "RTE_MBUF_F_TX_TCP_CKSUM";
+ case RTE_MBUF_F_TX_SCTP_CKSUM: return "RTE_MBUF_F_TX_SCTP_CKSUM";
+ case RTE_MBUF_F_TX_UDP_CKSUM: return "RTE_MBUF_F_TX_UDP_CKSUM";
+ case RTE_MBUF_F_TX_IEEE1588_TMST: return "RTE_MBUF_F_TX_IEEE1588_TMST";
+ case RTE_MBUF_F_TX_TCP_SEG: return "RTE_MBUF_F_TX_TCP_SEG";
+ case RTE_MBUF_F_TX_IPV4: return "RTE_MBUF_F_TX_IPV4";
+ case RTE_MBUF_F_TX_IPV6: return "RTE_MBUF_F_TX_IPV6";
+ case RTE_MBUF_F_TX_OUTER_IP_CKSUM: return "RTE_MBUF_F_TX_OUTER_IP_CKSUM";
+ case RTE_MBUF_F_TX_OUTER_IPV4: return "RTE_MBUF_F_TX_OUTER_IPV4";
+ case RTE_MBUF_F_TX_OUTER_IPV6: return "RTE_MBUF_F_TX_OUTER_IPV6";
+ case RTE_MBUF_F_TX_TUNNEL_VXLAN: return "RTE_MBUF_F_TX_TUNNEL_VXLAN";
+ case RTE_MBUF_F_TX_TUNNEL_GTP: return "RTE_MBUF_F_TX_TUNNEL_GTP";
+ case RTE_MBUF_F_TX_TUNNEL_GRE: return "RTE_MBUF_F_TX_TUNNEL_GRE";
+ case RTE_MBUF_F_TX_TUNNEL_IPIP: return "RTE_MBUF_F_TX_TUNNEL_IPIP";
+ case RTE_MBUF_F_TX_TUNNEL_GENEVE: return "RTE_MBUF_F_TX_TUNNEL_GENEVE";
+ case RTE_MBUF_F_TX_TUNNEL_MPLSINUDP: return "RTE_MBUF_F_TX_TUNNEL_MPLSINUDP";
+ case RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE: return "RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE";
+ case RTE_MBUF_F_TX_TUNNEL_IP: return "RTE_MBUF_F_TX_TUNNEL_IP";
+ case RTE_MBUF_F_TX_TUNNEL_UDP: return "RTE_MBUF_F_TX_TUNNEL_UDP";
+ case RTE_MBUF_F_TX_QINQ: return "RTE_MBUF_F_TX_QINQ";
+ case RTE_MBUF_F_TX_MACSEC: return "RTE_MBUF_F_TX_MACSEC";
+ case RTE_MBUF_F_TX_SEC_OFFLOAD: return "RTE_MBUF_F_TX_SEC_OFFLOAD";
+ case RTE_MBUF_F_TX_UDP_SEG: return "RTE_MBUF_F_TX_UDP_SEG";
+ case RTE_MBUF_F_TX_OUTER_UDP_CKSUM: return "RTE_MBUF_F_TX_OUTER_UDP_CKSUM";
default: return NULL;
}
}
rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
{
const struct flag_mask tx_flags[] = {
- { PKT_TX_VLAN, PKT_TX_VLAN, NULL },
- { PKT_TX_IP_CKSUM, PKT_TX_IP_CKSUM, NULL },
- { PKT_TX_TCP_CKSUM, PKT_TX_L4_MASK, NULL },
- { PKT_TX_SCTP_CKSUM, PKT_TX_L4_MASK, NULL },
- { PKT_TX_UDP_CKSUM, PKT_TX_L4_MASK, NULL },
- { PKT_TX_L4_NO_CKSUM, PKT_TX_L4_MASK, "PKT_TX_L4_NO_CKSUM" },
- { PKT_TX_IEEE1588_TMST, PKT_TX_IEEE1588_TMST, NULL },
- { PKT_TX_TCP_SEG, PKT_TX_TCP_SEG, NULL },
- { PKT_TX_IPV4, PKT_TX_IPV4, NULL },
- { PKT_TX_IPV6, PKT_TX_IPV6, NULL },
- { PKT_TX_OUTER_IP_CKSUM, PKT_TX_OUTER_IP_CKSUM, NULL },
- { PKT_TX_OUTER_IPV4, PKT_TX_OUTER_IPV4, NULL },
- { PKT_TX_OUTER_IPV6, PKT_TX_OUTER_IPV6, NULL },
- { PKT_TX_TUNNEL_VXLAN, PKT_TX_TUNNEL_MASK, NULL },
- { PKT_TX_TUNNEL_GTP, PKT_TX_TUNNEL_MASK, NULL },
- { PKT_TX_TUNNEL_GRE, PKT_TX_TUNNEL_MASK, NULL },
- { PKT_TX_TUNNEL_IPIP, PKT_TX_TUNNEL_MASK, NULL },
- { PKT_TX_TUNNEL_GENEVE, PKT_TX_TUNNEL_MASK, NULL },
- { PKT_TX_TUNNEL_MPLSINUDP, PKT_TX_TUNNEL_MASK, NULL },
- { PKT_TX_TUNNEL_VXLAN_GPE, PKT_TX_TUNNEL_MASK, NULL },
- { PKT_TX_TUNNEL_IP, PKT_TX_TUNNEL_MASK, NULL },
- { PKT_TX_TUNNEL_UDP, PKT_TX_TUNNEL_MASK, NULL },
- { PKT_TX_QINQ, PKT_TX_QINQ, NULL },
- { PKT_TX_MACSEC, PKT_TX_MACSEC, NULL },
- { PKT_TX_SEC_OFFLOAD, PKT_TX_SEC_OFFLOAD, NULL },
- { PKT_TX_UDP_SEG, PKT_TX_UDP_SEG, NULL },
- { PKT_TX_OUTER_UDP_CKSUM, PKT_TX_OUTER_UDP_CKSUM, NULL },
+ { RTE_MBUF_F_TX_VLAN, RTE_MBUF_F_TX_VLAN, NULL },
+ { RTE_MBUF_F_TX_IP_CKSUM, RTE_MBUF_F_TX_IP_CKSUM, NULL },
+ { RTE_MBUF_F_TX_TCP_CKSUM, RTE_MBUF_F_TX_L4_MASK, NULL },
+ { RTE_MBUF_F_TX_SCTP_CKSUM, RTE_MBUF_F_TX_L4_MASK, NULL },
+ { RTE_MBUF_F_TX_UDP_CKSUM, RTE_MBUF_F_TX_L4_MASK, NULL },
+ { RTE_MBUF_F_TX_L4_NO_CKSUM, RTE_MBUF_F_TX_L4_MASK, "RTE_MBUF_F_TX_L4_NO_CKSUM" },
+ { RTE_MBUF_F_TX_IEEE1588_TMST, RTE_MBUF_F_TX_IEEE1588_TMST, NULL },
+ { RTE_MBUF_F_TX_TCP_SEG, RTE_MBUF_F_TX_TCP_SEG, NULL },
+ { RTE_MBUF_F_TX_IPV4, RTE_MBUF_F_TX_IPV4, NULL },
+ { RTE_MBUF_F_TX_IPV6, RTE_MBUF_F_TX_IPV6, NULL },
+ { RTE_MBUF_F_TX_OUTER_IP_CKSUM, RTE_MBUF_F_TX_OUTER_IP_CKSUM, NULL },
+ { RTE_MBUF_F_TX_OUTER_IPV4, RTE_MBUF_F_TX_OUTER_IPV4, NULL },
+ { RTE_MBUF_F_TX_OUTER_IPV6, RTE_MBUF_F_TX_OUTER_IPV6, NULL },
+ { RTE_MBUF_F_TX_TUNNEL_VXLAN, RTE_MBUF_F_TX_TUNNEL_MASK, NULL },
+ { RTE_MBUF_F_TX_TUNNEL_GTP, RTE_MBUF_F_TX_TUNNEL_MASK, NULL },
+ { RTE_MBUF_F_TX_TUNNEL_GRE, RTE_MBUF_F_TX_TUNNEL_MASK, NULL },
+ { RTE_MBUF_F_TX_TUNNEL_IPIP, RTE_MBUF_F_TX_TUNNEL_MASK, NULL },
+ { RTE_MBUF_F_TX_TUNNEL_GENEVE, RTE_MBUF_F_TX_TUNNEL_MASK, NULL },
+ { RTE_MBUF_F_TX_TUNNEL_MPLSINUDP, RTE_MBUF_F_TX_TUNNEL_MASK, NULL },
+ { RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE, RTE_MBUF_F_TX_TUNNEL_MASK, NULL },
+ { RTE_MBUF_F_TX_TUNNEL_IP, RTE_MBUF_F_TX_TUNNEL_MASK, NULL },
+ { RTE_MBUF_F_TX_TUNNEL_UDP, RTE_MBUF_F_TX_TUNNEL_MASK, NULL },
+ { RTE_MBUF_F_TX_QINQ, RTE_MBUF_F_TX_QINQ, NULL },
+ { RTE_MBUF_F_TX_MACSEC, RTE_MBUF_F_TX_MACSEC, NULL },
+ { RTE_MBUF_F_TX_SEC_OFFLOAD, RTE_MBUF_F_TX_SEC_OFFLOAD, NULL },
+ { RTE_MBUF_F_TX_UDP_SEG, RTE_MBUF_F_TX_UDP_SEG, NULL },
+ { RTE_MBUF_F_TX_OUTER_UDP_CKSUM, RTE_MBUF_F_TX_OUTER_UDP_CKSUM, NULL },
};
const char *name;
unsigned int i;
* @param mask
* The mask describing the flag. Usually only one bit must be set.
* Several bits can be given if they belong to the same mask.
- * Ex: PKT_TX_L4_MASK.
+ * Ex: RTE_MBUF_F_TX_L4_MASK.
* @return
* The name of this flag, or NULL if it's not a valid TX flag.
*/
m->nb_segs = 1;
m->port = RTE_MBUF_PORT_INVALID;
- m->ol_flags &= EXT_ATTACHED_MBUF;
+ m->ol_flags &= RTE_MBUF_F_EXTERNAL;
m->packet_type = 0;
rte_pktmbuf_reset_headroom(m);
m->data_len = 0;
m->data_off = 0;
- m->ol_flags |= EXT_ATTACHED_MBUF;
+ m->ol_flags |= RTE_MBUF_F_EXTERNAL;
m->shinfo = shinfo;
}
/* if m is not direct, get the mbuf that embeds the data */
rte_mbuf_refcnt_update(rte_mbuf_from_indirect(m), 1);
mi->priv_size = m->priv_size;
- mi->ol_flags = m->ol_flags | IND_ATTACHED_MBUF;
+ mi->ol_flags = m->ol_flags | RTE_MBUF_F_INDIRECT;
}
__rte_pktmbuf_copy_hdr(mi, m);
struct rte_mbuf_ext_shared_info *shinfo;
/* Clear flags, mbuf is being freed. */
- m->ol_flags = EXT_ATTACHED_MBUF;
+ m->ol_flags = RTE_MBUF_F_EXTERNAL;
shinfo = m->shinfo;
/* Optimize for performance - do not dec/reinit */
uint64_t ol_flags = m->ol_flags;
/* Does packet set any of available offloads? */
- if (!(ol_flags & PKT_TX_OFFLOAD_MASK))
+ if (!(ol_flags & RTE_MBUF_F_TX_OFFLOAD_MASK))
return 0;
/* IP checksum can be counted only for IPv4 packet */
- if ((ol_flags & PKT_TX_IP_CKSUM) && (ol_flags & PKT_TX_IPV6))
+ if ((ol_flags & RTE_MBUF_F_TX_IP_CKSUM) && (ol_flags & RTE_MBUF_F_TX_IPV6))
return -EINVAL;
/* IP type not set when required */
- if (ol_flags & (PKT_TX_L4_MASK | PKT_TX_TCP_SEG))
- if (!(ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)))
+ if (ol_flags & (RTE_MBUF_F_TX_L4_MASK | RTE_MBUF_F_TX_TCP_SEG))
+ if (!(ol_flags & (RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IPV6)))
return -EINVAL;
/* Check requirements for TSO packet */
- if (ol_flags & PKT_TX_TCP_SEG)
+ if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
if ((m->tso_segsz == 0) ||
- ((ol_flags & PKT_TX_IPV4) &&
- !(ol_flags & PKT_TX_IP_CKSUM)))
+ ((ol_flags & RTE_MBUF_F_TX_IPV4) &&
+ !(ol_flags & RTE_MBUF_F_TX_IP_CKSUM)))
return -EINVAL;
- /* PKT_TX_OUTER_IP_CKSUM set for non outer IPv4 packet. */
- if ((ol_flags & PKT_TX_OUTER_IP_CKSUM) &&
- !(ol_flags & PKT_TX_OUTER_IPV4))
+ /* RTE_MBUF_F_TX_OUTER_IP_CKSUM set for non outer IPv4 packet. */
+ if ((ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) &&
+ !(ol_flags & RTE_MBUF_F_TX_OUTER_IPV4))
return -EINVAL;
return 0;
/**
* The RX packet is a 802.1q VLAN packet, and the tci has been
* saved in in mbuf->vlan_tci.
- * If the flag PKT_RX_VLAN_STRIPPED is also present, the VLAN
+ * If the flag RTE_MBUF_F_RX_VLAN_STRIPPED is also present, the VLAN
* header has been stripped from mbuf data, else it is still
* present.
*/
-#define PKT_RX_VLAN (1ULL << 0)
+#define RTE_MBUF_F_RX_VLAN (1ULL << 0)
/** RX packet with RSS hash result. */
-#define PKT_RX_RSS_HASH (1ULL << 1)
+#define RTE_MBUF_F_RX_RSS_HASH (1ULL << 1)
/** RX packet with FDIR match indicate. */
-#define PKT_RX_FDIR (1ULL << 2)
+#define RTE_MBUF_F_RX_FDIR (1ULL << 2)
/**
* Deprecated.
* Checking this flag alone is deprecated: check the 2 bits of
- * PKT_RX_L4_CKSUM_MASK.
+ * RTE_MBUF_F_RX_L4_CKSUM_MASK.
* This flag was set when the L4 checksum of a packet was detected as
* wrong by the hardware.
*/
-#define PKT_RX_L4_CKSUM_BAD (1ULL << 3)
+#define RTE_MBUF_F_RX_L4_CKSUM_BAD (1ULL << 3)
/**
* Deprecated.
* Checking this flag alone is deprecated: check the 2 bits of
- * PKT_RX_IP_CKSUM_MASK.
+ * RTE_MBUF_F_RX_IP_CKSUM_MASK.
* This flag was set when the IP checksum of a packet was detected as
* wrong by the hardware.
*/
-#define PKT_RX_IP_CKSUM_BAD (1ULL << 4)
+#define RTE_MBUF_F_RX_IP_CKSUM_BAD (1ULL << 4)
/**
* This flag is set when the outermost IP header checksum is detected as
* wrong by the hardware.
*/
-#define PKT_RX_OUTER_IP_CKSUM_BAD (1ULL << 5)
+#define RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD (1ULL << 5)
/**
* Deprecated.
- * This flag has been renamed, use PKT_RX_OUTER_IP_CKSUM_BAD instead.
+ * This flag has been renamed, use RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD instead.
*/
-#define PKT_RX_EIP_CKSUM_BAD \
- RTE_DEPRECATED(PKT_RX_EIP_CKSUM_BAD) PKT_RX_OUTER_IP_CKSUM_BAD
+#define RTE_MBUF_F_RX_EIP_CKSUM_BAD \
+ RTE_DEPRECATED(RTE_MBUF_F_RX_EIP_CKSUM_BAD) RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD
/**
* A vlan has been stripped by the hardware and its tci is saved in
* mbuf->vlan_tci. This can only happen if vlan stripping is enabled
* in the RX configuration of the PMD.
- * When PKT_RX_VLAN_STRIPPED is set, PKT_RX_VLAN must also be set.
+ * When RTE_MBUF_F_RX_VLAN_STRIPPED is set, RTE_MBUF_F_RX_VLAN must also be set.
*/
-#define PKT_RX_VLAN_STRIPPED (1ULL << 6)
+#define RTE_MBUF_F_RX_VLAN_STRIPPED (1ULL << 6)
/**
* Mask of bits used to determine the status of RX IP checksum.
- * - PKT_RX_IP_CKSUM_UNKNOWN: no information about the RX IP checksum
- * - PKT_RX_IP_CKSUM_BAD: the IP checksum in the packet is wrong
- * - PKT_RX_IP_CKSUM_GOOD: the IP checksum in the packet is valid
- * - PKT_RX_IP_CKSUM_NONE: the IP checksum is not correct in the packet
+ * - RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN: no information about the RX IP checksum
+ * - RTE_MBUF_F_RX_IP_CKSUM_BAD: the IP checksum in the packet is wrong
+ * - RTE_MBUF_F_RX_IP_CKSUM_GOOD: the IP checksum in the packet is valid
+ * - RTE_MBUF_F_RX_IP_CKSUM_NONE: the IP checksum is not correct in the packet
* data, but the integrity of the IP header is verified.
*/
-#define PKT_RX_IP_CKSUM_MASK ((1ULL << 4) | (1ULL << 7))
+#define RTE_MBUF_F_RX_IP_CKSUM_MASK ((1ULL << 4) | (1ULL << 7))
-#define PKT_RX_IP_CKSUM_UNKNOWN 0
-#define PKT_RX_IP_CKSUM_BAD (1ULL << 4)
-#define PKT_RX_IP_CKSUM_GOOD (1ULL << 7)
-#define PKT_RX_IP_CKSUM_NONE ((1ULL << 4) | (1ULL << 7))
+#define RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN 0
+#define RTE_MBUF_F_RX_IP_CKSUM_BAD (1ULL << 4)
+#define RTE_MBUF_F_RX_IP_CKSUM_GOOD (1ULL << 7)
+#define RTE_MBUF_F_RX_IP_CKSUM_NONE ((1ULL << 4) | (1ULL << 7))
/**
* Mask of bits used to determine the status of RX L4 checksum.
- * - PKT_RX_L4_CKSUM_UNKNOWN: no information about the RX L4 checksum
- * - PKT_RX_L4_CKSUM_BAD: the L4 checksum in the packet is wrong
- * - PKT_RX_L4_CKSUM_GOOD: the L4 checksum in the packet is valid
- * - PKT_RX_L4_CKSUM_NONE: the L4 checksum is not correct in the packet
+ * - RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN: no information about the RX L4 checksum
+ * - RTE_MBUF_F_RX_L4_CKSUM_BAD: the L4 checksum in the packet is wrong
+ * - RTE_MBUF_F_RX_L4_CKSUM_GOOD: the L4 checksum in the packet is valid
+ * - RTE_MBUF_F_RX_L4_CKSUM_NONE: the L4 checksum is not correct in the packet
* data, but the integrity of the L4 data is verified.
*/
-#define PKT_RX_L4_CKSUM_MASK ((1ULL << 3) | (1ULL << 8))
+#define RTE_MBUF_F_RX_L4_CKSUM_MASK ((1ULL << 3) | (1ULL << 8))
-#define PKT_RX_L4_CKSUM_UNKNOWN 0
-#define PKT_RX_L4_CKSUM_BAD (1ULL << 3)
-#define PKT_RX_L4_CKSUM_GOOD (1ULL << 8)
-#define PKT_RX_L4_CKSUM_NONE ((1ULL << 3) | (1ULL << 8))
+#define RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN 0
+#define RTE_MBUF_F_RX_L4_CKSUM_BAD (1ULL << 3)
+#define RTE_MBUF_F_RX_L4_CKSUM_GOOD (1ULL << 8)
+#define RTE_MBUF_F_RX_L4_CKSUM_NONE ((1ULL << 3) | (1ULL << 8))
/** RX IEEE1588 L2 Ethernet PT Packet. */
-#define PKT_RX_IEEE1588_PTP (1ULL << 9)
+#define RTE_MBUF_F_RX_IEEE1588_PTP (1ULL << 9)
/** RX IEEE1588 L2/L4 timestamped packet.*/
-#define PKT_RX_IEEE1588_TMST (1ULL << 10)
+#define RTE_MBUF_F_RX_IEEE1588_TMST (1ULL << 10)
/** FD id reported if FDIR match. */
-#define PKT_RX_FDIR_ID (1ULL << 13)
+#define RTE_MBUF_F_RX_FDIR_ID (1ULL << 13)
/** Flexible bytes reported if FDIR match. */
-#define PKT_RX_FDIR_FLX (1ULL << 14)
+#define RTE_MBUF_F_RX_FDIR_FLX (1ULL << 14)
/**
* The outer VLAN has been stripped by the hardware and its TCI is
* saved in mbuf->vlan_tci_outer.
* This can only happen if VLAN stripping is enabled in the Rx
* configuration of the PMD.
- * When PKT_RX_QINQ_STRIPPED is set, the flags PKT_RX_VLAN and PKT_RX_QINQ
+ * When RTE_MBUF_F_RX_QINQ_STRIPPED is set, the flags RTE_MBUF_F_RX_VLAN and RTE_MBUF_F_RX_QINQ
* must also be set.
*
- * - If both PKT_RX_QINQ_STRIPPED and PKT_RX_VLAN_STRIPPED are set, the 2 VLANs
+ * - If both RTE_MBUF_F_RX_QINQ_STRIPPED and RTE_MBUF_F_RX_VLAN_STRIPPED are set, the 2 VLANs
* have been stripped by the hardware and their TCIs are saved in
* mbuf->vlan_tci (inner) and mbuf->vlan_tci_outer (outer).
- * - If PKT_RX_QINQ_STRIPPED is set and PKT_RX_VLAN_STRIPPED is unset, only the
+ * - If RTE_MBUF_F_RX_QINQ_STRIPPED is set and RTE_MBUF_F_RX_VLAN_STRIPPED is unset, only the
* outer VLAN is removed from packet data, but both tci are saved in
* mbuf->vlan_tci (inner) and mbuf->vlan_tci_outer (outer).
*/
-#define PKT_RX_QINQ_STRIPPED (1ULL << 15)
+#define RTE_MBUF_F_RX_QINQ_STRIPPED (1ULL << 15)
/**
* When packets are coalesced by a hardware or virtual driver, this flag
* can be set in the RX mbuf, meaning that the m->tso_segsz field is
* valid and is set to the segment size of original packets.
*/
-#define PKT_RX_LRO (1ULL << 16)
+#define RTE_MBUF_F_RX_LRO (1ULL << 16)
/* There is no flag defined at offset 17. It is free for any future use. */
/**
* Indicate that security offload processing was applied on the RX packet.
*/
-#define PKT_RX_SEC_OFFLOAD (1ULL << 18)
+#define RTE_MBUF_F_RX_SEC_OFFLOAD (1ULL << 18)
/**
* Indicate that security offload processing failed on the RX packet.
*/
-#define PKT_RX_SEC_OFFLOAD_FAILED (1ULL << 19)
+#define RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED (1ULL << 19)
/**
* The RX packet is a double VLAN, and the outer tci has been
- * saved in mbuf->vlan_tci_outer. If this flag is set, PKT_RX_VLAN
+ * saved in mbuf->vlan_tci_outer. If this flag is set, RTE_MBUF_F_RX_VLAN
* must also be set and the inner tci is saved in mbuf->vlan_tci.
- * If the flag PKT_RX_QINQ_STRIPPED is also present, both VLANs
+ * If the flag RTE_MBUF_F_RX_QINQ_STRIPPED is also present, both VLANs
* headers have been stripped from mbuf data, else they are still
* present.
*/
-#define PKT_RX_QINQ (1ULL << 20)
+#define RTE_MBUF_F_RX_QINQ (1ULL << 20)
/**
* Mask of bits used to determine the status of outer RX L4 checksum.
- * - PKT_RX_OUTER_L4_CKSUM_UNKNOWN: no info about the outer RX L4 checksum
- * - PKT_RX_OUTER_L4_CKSUM_BAD: the outer L4 checksum in the packet is wrong
- * - PKT_RX_OUTER_L4_CKSUM_GOOD: the outer L4 checksum in the packet is valid
- * - PKT_RX_OUTER_L4_CKSUM_INVALID: invalid outer L4 checksum state.
+ * - RTE_MBUF_F_RX_OUTER_L4_CKSUM_UNKNOWN: no info about the outer RX L4 checksum
+ * - RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD: the outer L4 checksum in the packet is wrong
+ * - RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD: the outer L4 checksum in the packet is valid
+ * - RTE_MBUF_F_RX_OUTER_L4_CKSUM_INVALID: invalid outer L4 checksum state.
*
- * The detection of PKT_RX_OUTER_L4_CKSUM_GOOD shall be based on the given
+ * The detection of RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD shall be based on the given
* HW capability, At minimum, the PMD should support
- * PKT_RX_OUTER_L4_CKSUM_UNKNOWN and PKT_RX_OUTER_L4_CKSUM_BAD states
+ * RTE_MBUF_F_RX_OUTER_L4_CKSUM_UNKNOWN and RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD states
* if the DEV_RX_OFFLOAD_OUTER_UDP_CKSUM offload is available.
*/
-#define PKT_RX_OUTER_L4_CKSUM_MASK ((1ULL << 21) | (1ULL << 22))
+#define RTE_MBUF_F_RX_OUTER_L4_CKSUM_MASK ((1ULL << 21) | (1ULL << 22))
-#define PKT_RX_OUTER_L4_CKSUM_UNKNOWN 0
-#define PKT_RX_OUTER_L4_CKSUM_BAD (1ULL << 21)
-#define PKT_RX_OUTER_L4_CKSUM_GOOD (1ULL << 22)
-#define PKT_RX_OUTER_L4_CKSUM_INVALID ((1ULL << 21) | (1ULL << 22))
+#define RTE_MBUF_F_RX_OUTER_L4_CKSUM_UNKNOWN 0
+#define RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD (1ULL << 21)
+#define RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD (1ULL << 22)
+#define RTE_MBUF_F_RX_OUTER_L4_CKSUM_INVALID ((1ULL << 21) | (1ULL << 22))
-/* add new RX flags here, don't forget to update PKT_FIRST_FREE */
+/* add new RX flags here, don't forget to update RTE_MBUF_F_FIRST_FREE */
-#define PKT_FIRST_FREE (1ULL << 23)
-#define PKT_LAST_FREE (1ULL << 40)
+#define RTE_MBUF_F_FIRST_FREE (1ULL << 23)
+#define RTE_MBUF_F_LAST_FREE (1ULL << 40)
-/* add new TX flags here, don't forget to update PKT_LAST_FREE */
+/* add new TX flags here, don't forget to update RTE_MBUF_F_LAST_FREE */
/**
* Outer UDP checksum offload flag. This flag is used for enabling
* outer UDP checksum in PMD. To use outer UDP checksum, the user needs to
* 1) Enable the following in mbuf,
* a) Fill outer_l2_len and outer_l3_len in mbuf.
- * b) Set the PKT_TX_OUTER_UDP_CKSUM flag.
- * c) Set the PKT_TX_OUTER_IPV4 or PKT_TX_OUTER_IPV6 flag.
+ * b) Set the RTE_MBUF_F_TX_OUTER_UDP_CKSUM flag.
+ * c) Set the RTE_MBUF_F_TX_OUTER_IPV4 or RTE_MBUF_F_TX_OUTER_IPV6 flag.
* 2) Configure DEV_TX_OFFLOAD_OUTER_UDP_CKSUM offload flag.
*/
-#define PKT_TX_OUTER_UDP_CKSUM (1ULL << 41)
+#define RTE_MBUF_F_TX_OUTER_UDP_CKSUM (1ULL << 41)
/**
* UDP Fragmentation Offload flag. This flag is used for enabling UDP
* fragmentation in SW or in HW. When use UFO, mbuf->tso_segsz is used
* to store the MSS of UDP fragments.
*/
-#define PKT_TX_UDP_SEG (1ULL << 42)
+#define RTE_MBUF_F_TX_UDP_SEG (1ULL << 42)
/**
* Request security offload processing on the TX packet.
*/
-#define PKT_TX_SEC_OFFLOAD (1ULL << 43)
+#define RTE_MBUF_F_TX_SEC_OFFLOAD (1ULL << 43)
/**
* Offload the MACsec. This flag must be set by the application to enable
* this offload feature for a packet to be transmitted.
*/
-#define PKT_TX_MACSEC (1ULL << 44)
+#define RTE_MBUF_F_TX_MACSEC (1ULL << 44)
/**
* Bits 45:48 used for the tunnel type.
* The tunnel type must be specified for TSO or checksum on the inner part
* of tunnel packets.
- * These flags can be used with PKT_TX_TCP_SEG for TSO, or PKT_TX_xxx_CKSUM.
+ * These flags can be used with RTE_MBUF_F_TX_TCP_SEG for TSO, or RTE_MBUF_F_TX_xxx_CKSUM.
* The mbuf fields for inner and outer header lengths are required:
* outer_l2_len, outer_l3_len, l2_len, l3_len, l4_len and tso_segsz for TSO.
*/
-#define PKT_TX_TUNNEL_VXLAN (0x1ULL << 45)
-#define PKT_TX_TUNNEL_GRE (0x2ULL << 45)
-#define PKT_TX_TUNNEL_IPIP (0x3ULL << 45)
-#define PKT_TX_TUNNEL_GENEVE (0x4ULL << 45)
+#define RTE_MBUF_F_TX_TUNNEL_VXLAN (0x1ULL << 45)
+#define RTE_MBUF_F_TX_TUNNEL_GRE (0x2ULL << 45)
+#define RTE_MBUF_F_TX_TUNNEL_IPIP (0x3ULL << 45)
+#define RTE_MBUF_F_TX_TUNNEL_GENEVE (0x4ULL << 45)
/** TX packet with MPLS-in-UDP RFC 7510 header. */
-#define PKT_TX_TUNNEL_MPLSINUDP (0x5ULL << 45)
-#define PKT_TX_TUNNEL_VXLAN_GPE (0x6ULL << 45)
-#define PKT_TX_TUNNEL_GTP (0x7ULL << 45)
+#define RTE_MBUF_F_TX_TUNNEL_MPLSINUDP (0x5ULL << 45)
+#define RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE (0x6ULL << 45)
+#define RTE_MBUF_F_TX_TUNNEL_GTP (0x7ULL << 45)
/**
* Generic IP encapsulated tunnel type, used for TSO and checksum offload.
* It can be used for tunnels which are not standards or listed above.
- * It is preferred to use specific tunnel flags like PKT_TX_TUNNEL_GRE
- * or PKT_TX_TUNNEL_IPIP if possible.
+ * It is preferred to use specific tunnel flags like RTE_MBUF_F_TX_TUNNEL_GRE
+ * or RTE_MBUF_F_TX_TUNNEL_IPIP if possible.
* The ethdev must be configured with DEV_TX_OFFLOAD_IP_TNL_TSO.
* Outer and inner checksums are done according to the existing flags like
- * PKT_TX_xxx_CKSUM.
+ * RTE_MBUF_F_TX_xxx_CKSUM.
* Specific tunnel headers that contain payload length, sequence id
* or checksum are not expected to be updated.
*/
-#define PKT_TX_TUNNEL_IP (0xDULL << 45)
+#define RTE_MBUF_F_TX_TUNNEL_IP (0xDULL << 45)
/**
* Generic UDP encapsulated tunnel type, used for TSO and checksum offload.
* UDP tunnel type implies outer IP layer.
* It can be used for tunnels which are not standards or listed above.
- * It is preferred to use specific tunnel flags like PKT_TX_TUNNEL_VXLAN
+ * It is preferred to use specific tunnel flags like RTE_MBUF_F_TX_TUNNEL_VXLAN
* if possible.
* The ethdev must be configured with DEV_TX_OFFLOAD_UDP_TNL_TSO.
* Outer and inner checksums are done according to the existing flags like
- * PKT_TX_xxx_CKSUM.
+ * RTE_MBUF_F_TX_xxx_CKSUM.
* Specific tunnel headers that contain payload length, sequence id
* or checksum are not expected to be updated.
*/
-#define PKT_TX_TUNNEL_UDP (0xEULL << 45)
+#define RTE_MBUF_F_TX_TUNNEL_UDP (0xEULL << 45)
/* add new TX TUNNEL type here */
-#define PKT_TX_TUNNEL_MASK (0xFULL << 45)
+#define RTE_MBUF_F_TX_TUNNEL_MASK (0xFULL << 45)
/**
* Double VLAN insertion (QinQ) request to driver, driver may offload the
* insertion based on device capability.
* mbuf 'vlan_tci' & 'vlan_tci_outer' must be valid when this flag is set.
*/
-#define PKT_TX_QINQ (1ULL << 49)
+#define RTE_MBUF_F_TX_QINQ (1ULL << 49)
/** This old name is deprecated. */
-#define PKT_TX_QINQ_PKT PKT_TX_QINQ
+#define RTE_MBUF_F_TX_QINQ_PKT RTE_MBUF_F_TX_QINQ
/**
* TCP segmentation offload. To enable this offload feature for a
* packet to be transmitted on hardware supporting TSO:
- * - set the PKT_TX_TCP_SEG flag in mbuf->ol_flags (this flag implies
- * PKT_TX_TCP_CKSUM)
- * - set the flag PKT_TX_IPV4 or PKT_TX_IPV6
- * - if it's IPv4, set the PKT_TX_IP_CKSUM flag
+ * - set the RTE_MBUF_F_TX_TCP_SEG flag in mbuf->ol_flags (this flag implies
+ * RTE_MBUF_F_TX_TCP_CKSUM)
+ * - set the flag RTE_MBUF_F_TX_IPV4 or RTE_MBUF_F_TX_IPV6
+ * - if it's IPv4, set the RTE_MBUF_F_TX_IP_CKSUM flag
* - fill the mbuf offload information: l2_len, l3_len, l4_len, tso_segsz
*/
-#define PKT_TX_TCP_SEG (1ULL << 50)
+#define RTE_MBUF_F_TX_TCP_SEG (1ULL << 50)
/** TX IEEE1588 packet to timestamp. */
-#define PKT_TX_IEEE1588_TMST (1ULL << 51)
+#define RTE_MBUF_F_TX_IEEE1588_TMST (1ULL << 51)
/**
* Bits 52+53 used for L4 packet type with checksum enabled: 00: Reserved,
* 01: TCP checksum, 10: SCTP checksum, 11: UDP checksum. To use hardware
* L4 checksum offload, the user needs to:
* - fill l2_len and l3_len in mbuf
- * - set the flags PKT_TX_TCP_CKSUM, PKT_TX_SCTP_CKSUM or PKT_TX_UDP_CKSUM
- * - set the flag PKT_TX_IPV4 or PKT_TX_IPV6
+ * - set the flags RTE_MBUF_F_TX_TCP_CKSUM, RTE_MBUF_F_TX_SCTP_CKSUM or RTE_MBUF_F_TX_UDP_CKSUM
+ * - set the flag RTE_MBUF_F_TX_IPV4 or RTE_MBUF_F_TX_IPV6
*/
-#define PKT_TX_L4_NO_CKSUM (0ULL << 52) /**< Disable L4 cksum of TX pkt. */
+#define RTE_MBUF_F_TX_L4_NO_CKSUM (0ULL << 52) /**< Disable L4 cksum of TX pkt. */
/** TCP cksum of TX pkt. computed by NIC. */
-#define PKT_TX_TCP_CKSUM (1ULL << 52)
+#define RTE_MBUF_F_TX_TCP_CKSUM (1ULL << 52)
/** SCTP cksum of TX pkt. computed by NIC. */
-#define PKT_TX_SCTP_CKSUM (2ULL << 52)
+#define RTE_MBUF_F_TX_SCTP_CKSUM (2ULL << 52)
/** UDP cksum of TX pkt. computed by NIC. */
-#define PKT_TX_UDP_CKSUM (3ULL << 52)
+#define RTE_MBUF_F_TX_UDP_CKSUM (3ULL << 52)
/** Mask for L4 cksum offload request. */
-#define PKT_TX_L4_MASK (3ULL << 52)
+#define RTE_MBUF_F_TX_L4_MASK (3ULL << 52)
/**
- * Offload the IP checksum in the hardware. The flag PKT_TX_IPV4 should
+ * Offload the IP checksum in the hardware. The flag RTE_MBUF_F_TX_IPV4 should
* also be set by the application, although a PMD will only check
- * PKT_TX_IP_CKSUM.
+ * RTE_MBUF_F_TX_IP_CKSUM.
* - fill the mbuf offload information: l2_len, l3_len
*/
-#define PKT_TX_IP_CKSUM (1ULL << 54)
+#define RTE_MBUF_F_TX_IP_CKSUM (1ULL << 54)
/**
* Packet is IPv4. This flag must be set when using any offload feature
* packet. If the packet is a tunneled packet, this flag is related to
* the inner headers.
*/
-#define PKT_TX_IPV4 (1ULL << 55)
+#define RTE_MBUF_F_TX_IPV4 (1ULL << 55)
/**
* Packet is IPv6. This flag must be set when using an offload feature
* packet. If the packet is a tunneled packet, this flag is related to
* the inner headers.
*/
-#define PKT_TX_IPV6 (1ULL << 56)
+#define RTE_MBUF_F_TX_IPV6 (1ULL << 56)
/**
* VLAN tag insertion request to driver, driver may offload the insertion
* based on the device capability.
* mbuf 'vlan_tci' field must be valid when this flag is set.
*/
-#define PKT_TX_VLAN (1ULL << 57)
+#define RTE_MBUF_F_TX_VLAN (1ULL << 57)
/* this old name is deprecated */
-#define PKT_TX_VLAN_PKT PKT_TX_VLAN
+#define RTE_MBUF_F_TX_VLAN_PKT RTE_MBUF_F_TX_VLAN
/**
* Offload the IP checksum of an external header in the hardware. The
- * flag PKT_TX_OUTER_IPV4 should also be set by the application, although
- * a PMD will only check PKT_TX_OUTER_IP_CKSUM.
+ * flag RTE_MBUF_F_TX_OUTER_IPV4 should also be set by the application, although
+ * a PMD will only check RTE_MBUF_F_TX_OUTER_IP_CKSUM.
* - fill the mbuf offload information: outer_l2_len, outer_l3_len
*/
-#define PKT_TX_OUTER_IP_CKSUM (1ULL << 58)
+#define RTE_MBUF_F_TX_OUTER_IP_CKSUM (1ULL << 58)
/**
* Packet outer header is IPv4. This flag must be set when using any
* outer offload feature (L3 or L4 checksum) to tell the NIC that the
* outer header of the tunneled packet is an IPv4 packet.
*/
-#define PKT_TX_OUTER_IPV4 (1ULL << 59)
+#define RTE_MBUF_F_TX_OUTER_IPV4 (1ULL << 59)
/**
* Packet outer header is IPv6. This flag must be set when using any
* outer offload feature (L4 checksum) to tell the NIC that the outer
* header of the tunneled packet is an IPv6 packet.
*/
-#define PKT_TX_OUTER_IPV6 (1ULL << 60)
+#define RTE_MBUF_F_TX_OUTER_IPV6 (1ULL << 60)
/**
* Bitmask of all supported packet Tx offload features flags,
* which can be set for packet.
*/
-#define PKT_TX_OFFLOAD_MASK ( \
- PKT_TX_OUTER_IPV6 | \
- PKT_TX_OUTER_IPV4 | \
- PKT_TX_OUTER_IP_CKSUM | \
- PKT_TX_VLAN_PKT | \
- PKT_TX_IPV6 | \
- PKT_TX_IPV4 | \
- PKT_TX_IP_CKSUM | \
- PKT_TX_L4_MASK | \
- PKT_TX_IEEE1588_TMST | \
- PKT_TX_TCP_SEG | \
- PKT_TX_QINQ_PKT | \
- PKT_TX_TUNNEL_MASK | \
- PKT_TX_MACSEC | \
- PKT_TX_SEC_OFFLOAD | \
- PKT_TX_UDP_SEG | \
- PKT_TX_OUTER_UDP_CKSUM)
+#define RTE_MBUF_F_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_OUTER_IPV6 | \
+ RTE_MBUF_F_TX_OUTER_IPV4 | \
+ RTE_MBUF_F_TX_OUTER_IP_CKSUM | \
+ RTE_MBUF_F_TX_VLAN_PKT | \
+ RTE_MBUF_F_TX_IPV6 | \
+ RTE_MBUF_F_TX_IPV4 | \
+ RTE_MBUF_F_TX_IP_CKSUM | \
+ RTE_MBUF_F_TX_L4_MASK | \
+ RTE_MBUF_F_TX_IEEE1588_TMST | \
+ RTE_MBUF_F_TX_TCP_SEG | \
+ RTE_MBUF_F_TX_QINQ_PKT | \
+ RTE_MBUF_F_TX_TUNNEL_MASK | \
+ RTE_MBUF_F_TX_MACSEC | \
+ RTE_MBUF_F_TX_SEC_OFFLOAD | \
+ RTE_MBUF_F_TX_UDP_SEG | \
+ RTE_MBUF_F_TX_OUTER_UDP_CKSUM)
/**
* Mbuf having an external buffer attached. shinfo in mbuf must be filled.
*/
-#define EXT_ATTACHED_MBUF (1ULL << 61)
+#define RTE_MBUF_F_EXTERNAL (1ULL << 61)
-#define IND_ATTACHED_MBUF (1ULL << 62) /**< Indirect attached mbuf */
+#define RTE_MBUF_F_INDIRECT (1ULL << 62) /**< Indirect attached mbuf */
/** Alignment constraint of mbuf private area. */
#define RTE_MBUF_PRIV_ALIGN 8
uint32_t pkt_len; /**< Total pkt len: sum of all segments. */
uint16_t data_len; /**< Amount of data in segment buffer. */
- /** VLAN TCI (CPU order), valid if PKT_RX_VLAN is set. */
+ /** VLAN TCI (CPU order), valid if RTE_MBUF_F_RX_VLAN is set. */
uint16_t vlan_tci;
RTE_STD_C11
};
uint32_t hi;
/**< First 4 flexible bytes or FD ID, dependent
- * on PKT_RX_FDIR_* flag in ol_flags.
+ * on RTE_MBUF_F_RX_FDIR_* flag in ol_flags.
*/
} fdir; /**< Filter identifier if FDIR enabled */
struct rte_mbuf_sched sched;
} hash; /**< hash information */
};
- /** Outer VLAN TCI (CPU order), valid if PKT_RX_QINQ is set. */
+ /** Outer VLAN TCI (CPU order), valid if RTE_MBUF_F_RX_QINQ is set. */
uint16_t vlan_tci_outer;
uint16_t buf_len; /**< Length of segment buffer. */
* If a mbuf has its data in another mbuf and references it by mbuf
* indirection, this mbuf can be defined as a cloned mbuf.
*/
-#define RTE_MBUF_CLONED(mb) ((mb)->ol_flags & IND_ATTACHED_MBUF)
+#define RTE_MBUF_CLONED(mb) ((mb)->ol_flags & RTE_MBUF_F_INDIRECT)
/**
* Returns TRUE if given mbuf has an external buffer, or FALSE otherwise.
*
* External buffer is a user-provided anonymous buffer.
*/
-#define RTE_MBUF_HAS_EXTBUF(mb) ((mb)->ol_flags & EXT_ATTACHED_MBUF)
+#define RTE_MBUF_HAS_EXTBUF(mb) ((mb)->ol_flags & RTE_MBUF_F_EXTERNAL)
/**
* Returns TRUE if given mbuf is direct, or FALSE otherwise.
* can be defined as a direct mbuf.
*/
#define RTE_MBUF_DIRECT(mb) \
- (!((mb)->ol_flags & (IND_ATTACHED_MBUF | EXT_ATTACHED_MBUF)))
+ (!((mb)->ol_flags & (RTE_MBUF_F_INDIRECT | RTE_MBUF_F_EXTERNAL)))
/** Uninitialized or unspecified port. */
#define RTE_MBUF_PORT_INVALID UINT16_MAX
mark_free(dynfield1);
/* init free_flags */
- for (mask = PKT_FIRST_FREE; mask <= PKT_LAST_FREE; mask <<= 1)
+ for (mask = RTE_MBUF_F_FIRST_FREE; mask <= RTE_MBUF_F_LAST_FREE; mask <<= 1)
shm->free_flags |= mask;
process_score();
return -1;
vh = (struct rte_vlan_hdr *)(eh + 1);
- m->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+ m->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
m->vlan_tci = rte_be_to_cpu_16(vh->vlan_tci);
/* Copy ether header over rather than moving whole packet */
vh = (struct rte_vlan_hdr *) (nh + 1);
vh->vlan_tci = rte_cpu_to_be_16((*m)->vlan_tci);
- (*m)->ol_flags &= ~(PKT_RX_VLAN_STRIPPED | PKT_TX_VLAN);
+ (*m)->ol_flags &= ~(RTE_MBUF_F_RX_VLAN_STRIPPED | RTE_MBUF_F_TX_VLAN);
- if ((*m)->ol_flags & PKT_TX_TUNNEL_MASK)
+ if ((*m)->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
(*m)->outer_l2_len += sizeof(struct rte_vlan_hdr);
else
(*m)->l2_len += sizeof(struct rte_vlan_hdr);
psd_hdr.dst_addr = ipv4_hdr->dst_addr;
psd_hdr.zero = 0;
psd_hdr.proto = ipv4_hdr->next_proto_id;
- if (ol_flags & PKT_TX_TCP_SEG) {
+ if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
psd_hdr.len = 0;
} else {
l3_len = rte_be_to_cpu_16(ipv4_hdr->total_length);
} psd_hdr;
psd_hdr.proto = (uint32_t)(ipv6_hdr->proto << 24);
- if (ol_flags & PKT_TX_TCP_SEG) {
+ if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
psd_hdr.len = 0;
} else {
psd_hdr.len = ipv6_hdr->payload_len;
* Mainly it is required to avoid fragmented headers check if
* no offloads are requested.
*/
- if (!(ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK | PKT_TX_TCP_SEG)))
+ if (!(ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_L4_MASK | RTE_MBUF_F_TX_TCP_SEG)))
return 0;
- if (ol_flags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6))
+ if (ol_flags & (RTE_MBUF_F_TX_OUTER_IPV4 | RTE_MBUF_F_TX_OUTER_IPV6))
inner_l3_offset += m->outer_l2_len + m->outer_l3_len;
/*
inner_l3_offset + m->l3_len + m->l4_len))
return -ENOTSUP;
- if (ol_flags & PKT_TX_IPV4) {
+ if (ol_flags & RTE_MBUF_F_TX_IPV4) {
ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
inner_l3_offset);
- if (ol_flags & PKT_TX_IP_CKSUM)
+ if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
ipv4_hdr->hdr_checksum = 0;
}
- if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM) {
- if (ol_flags & PKT_TX_IPV4) {
+ if ((ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_UDP_CKSUM) {
+ if (ol_flags & RTE_MBUF_F_TX_IPV4) {
udp_hdr = (struct rte_udp_hdr *)((char *)ipv4_hdr +
m->l3_len);
udp_hdr->dgram_cksum = rte_ipv4_phdr_cksum(ipv4_hdr,
udp_hdr->dgram_cksum = rte_ipv6_phdr_cksum(ipv6_hdr,
ol_flags);
}
- } else if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM ||
- (ol_flags & PKT_TX_TCP_SEG)) {
- if (ol_flags & PKT_TX_IPV4) {
+ } else if ((ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_TCP_CKSUM ||
+ (ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
+ if (ol_flags & RTE_MBUF_F_TX_IPV4) {
/* non-TSO tcp or TSO */
tcp_hdr = (struct rte_tcp_hdr *)((char *)ipv4_hdr +
m->l3_len);
struct tag_data *data)
{
mbuf->hash.fdir.hi = data->tag;
- mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
}
static __rte_always_inline void
mbuf2->hash.fdir.hi = data2->tag;
mbuf3->hash.fdir.hi = data3->tag;
- mbuf0->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
- mbuf1->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
- mbuf2->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
- mbuf3->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+ mbuf0->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
+ mbuf1->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
+ mbuf2->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
+ mbuf3->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
}
/**
static __rte_always_inline void
virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
{
- uint64_t csum_l4 = m_buf->ol_flags & PKT_TX_L4_MASK;
+ uint64_t csum_l4 = m_buf->ol_flags & RTE_MBUF_F_TX_L4_MASK;
- if (m_buf->ol_flags & PKT_TX_TCP_SEG)
- csum_l4 |= PKT_TX_TCP_CKSUM;
+ if (m_buf->ol_flags & RTE_MBUF_F_TX_TCP_SEG)
+ csum_l4 |= RTE_MBUF_F_TX_TCP_CKSUM;
if (csum_l4) {
net_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
net_hdr->csum_start = m_buf->l2_len + m_buf->l3_len;
switch (csum_l4) {
- case PKT_TX_TCP_CKSUM:
+ case RTE_MBUF_F_TX_TCP_CKSUM:
net_hdr->csum_offset = (offsetof(struct rte_tcp_hdr,
cksum));
break;
- case PKT_TX_UDP_CKSUM:
+ case RTE_MBUF_F_TX_UDP_CKSUM:
net_hdr->csum_offset = (offsetof(struct rte_udp_hdr,
dgram_cksum));
break;
- case PKT_TX_SCTP_CKSUM:
+ case RTE_MBUF_F_TX_SCTP_CKSUM:
net_hdr->csum_offset = (offsetof(struct rte_sctp_hdr,
cksum));
break;
}
/* IP cksum verification cannot be bypassed, then calculate here */
- if (m_buf->ol_flags & PKT_TX_IP_CKSUM) {
+ if (m_buf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
struct rte_ipv4_hdr *ipv4_hdr;
ipv4_hdr = rte_pktmbuf_mtod_offset(m_buf, struct rte_ipv4_hdr *,
ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
}
- if (m_buf->ol_flags & PKT_TX_TCP_SEG) {
- if (m_buf->ol_flags & PKT_TX_IPV4)
+ if (m_buf->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
+ if (m_buf->ol_flags & RTE_MBUF_F_TX_IPV4)
net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
else
net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
net_hdr->gso_size = m_buf->tso_segsz;
net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len
+ m_buf->l4_len;
- } else if (m_buf->ol_flags & PKT_TX_UDP_SEG) {
+ } else if (m_buf->ol_flags & RTE_MBUF_F_TX_UDP_SEG) {
net_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
net_hdr->gso_size = m_buf->tso_segsz;
net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len +
m->l3_len = rte_ipv4_hdr_len(ipv4_hdr);
if (data_len < m->l2_len + m->l3_len)
goto error;
- m->ol_flags |= PKT_TX_IPV4;
+ m->ol_flags |= RTE_MBUF_F_TX_IPV4;
*l4_proto = ipv4_hdr->next_proto_id;
break;
case RTE_ETHER_TYPE_IPV6:
ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
m->l2_len);
m->l3_len = sizeof(struct rte_ipv6_hdr);
- m->ol_flags |= PKT_TX_IPV6;
+ m->ol_flags |= RTE_MBUF_F_TX_IPV6;
*l4_proto = ipv6_hdr->proto;
break;
default:
case (offsetof(struct rte_tcp_hdr, cksum)):
if (l4_proto != IPPROTO_TCP)
goto error;
- m->ol_flags |= PKT_TX_TCP_CKSUM;
+ m->ol_flags |= RTE_MBUF_F_TX_TCP_CKSUM;
break;
case (offsetof(struct rte_udp_hdr, dgram_cksum)):
if (l4_proto != IPPROTO_UDP)
goto error;
- m->ol_flags |= PKT_TX_UDP_CKSUM;
+ m->ol_flags |= RTE_MBUF_F_TX_UDP_CKSUM;
break;
case (offsetof(struct rte_sctp_hdr, cksum)):
if (l4_proto != IPPROTO_SCTP)
goto error;
- m->ol_flags |= PKT_TX_SCTP_CKSUM;
+ m->ol_flags |= RTE_MBUF_F_TX_SCTP_CKSUM;
break;
default:
goto error;
tcp_len = (tcp_hdr->data_off & 0xf0) >> 2;
if (data_len < m->l2_len + m->l3_len + tcp_len)
goto error;
- m->ol_flags |= PKT_TX_TCP_SEG;
+ m->ol_flags |= RTE_MBUF_F_TX_TCP_SEG;
m->tso_segsz = hdr->gso_size;
m->l4_len = tcp_len;
break;
case VIRTIO_NET_HDR_GSO_UDP:
if (l4_proto != IPPROTO_UDP)
goto error;
- m->ol_flags |= PKT_TX_UDP_SEG;
+ m->ol_flags |= RTE_MBUF_F_TX_UDP_SEG;
m->tso_segsz = hdr->gso_size;
m->l4_len = sizeof(struct rte_udp_hdr);
break;
return;
}
- m->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
+ m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN;
ptype = rte_net_get_ptype(m, &hdr_lens, RTE_PTYPE_ALL_MASK);
m->packet_type = ptype;
hdrlen = hdr_lens.l2_len + hdr_lens.l3_len + hdr_lens.l4_len;
if (hdr->csum_start <= hdrlen && l4_supported != 0) {
- m->ol_flags |= PKT_RX_L4_CKSUM_NONE;
+ m->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_NONE;
} else {
/* Unknown proto or tunnel, do sw cksum. We can assume
* the cksum field is in the first segment since the
case VIRTIO_NET_HDR_GSO_TCPV6:
if ((ptype & RTE_PTYPE_L4_MASK) != RTE_PTYPE_L4_TCP)
break;
- m->ol_flags |= PKT_RX_LRO | PKT_RX_L4_CKSUM_NONE;
+ m->ol_flags |= RTE_MBUF_F_RX_LRO | RTE_MBUF_F_RX_L4_CKSUM_NONE;
m->tso_segsz = hdr->gso_size;
break;
case VIRTIO_NET_HDR_GSO_UDP:
if ((ptype & RTE_PTYPE_L4_MASK) != RTE_PTYPE_L4_UDP)
break;
- m->ol_flags |= PKT_RX_LRO | PKT_RX_L4_CKSUM_NONE;
+ m->ol_flags |= RTE_MBUF_F_RX_LRO | RTE_MBUF_F_RX_L4_CKSUM_NONE;
m->tso_segsz = hdr->gso_size;
break;
default: